Bug #275047 - Split _emerge/__init__.py into smaller pieces (part 5).
authorZac Medico <zmedico@gentoo.org>
Tue, 23 Jun 2009 18:46:38 +0000 (18:46 -0000)
committerZac Medico <zmedico@gentoo.org>
Tue, 23 Jun 2009 18:46:38 +0000 (18:46 -0000)
Thanks to Sebastian Mingramm (few) <s.mingramm@gmx.de> for this patch.

svn path=/main/trunk/; revision=13672

pym/_emerge/Scheduler.py [new file with mode: 0644]
pym/_emerge/__init__.py
pym/_emerge/_find_deep_system_runtime_deps.py [new file with mode: 0644]
pym/_emerge/_flush_elog_mod_echo.py [new file with mode: 0644]
pym/_emerge/clear_caches.py [new file with mode: 0644]
pym/_emerge/create_depgraph_params.py [new file with mode: 0644]
pym/_emerge/create_world_atom.py [new file with mode: 0644]
pym/_emerge/depgraph.py [new file with mode: 0644]
pym/_emerge/is_valid_package_atom.py [new file with mode: 0644]

diff --git a/pym/_emerge/Scheduler.py b/pym/_emerge/Scheduler.py
new file mode 100644 (file)
index 0000000..7cf7134
--- /dev/null
@@ -0,0 +1,1641 @@
+import logging
+import os
+import sys
+import textwrap
+import time
+import weakref
+from itertools import izip
+
+try:
+       import portage
+except ImportError:
+       from os import path as osp
+       sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+       import portage
+
+from portage.cache.mappings import slot_dict_class
+from portage.elog.messages import eerror
+from portage.output import colorize, create_color_func, darkgreen, red
+bad = create_color_func("BAD")
+from portage.sets.base import InternalPackageSet
+from portage.util import writemsg, writemsg_level
+
+from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
+from _emerge.Blocker import Blocker
+from _emerge.BlockerDB import BlockerDB
+from _emerge.clear_caches import clear_caches
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.create_world_atom import create_world_atom
+from _emerge.DepPriority import DepPriority
+from _emerge.EbuildFetcher import EbuildFetcher
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.emergelog import emergelog, _emerge_log_dir
+from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
+from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
+from _emerge.JobStatusDisplay import JobStatusDisplay
+from _emerge.MergeListItem import MergeListItem
+from _emerge.Package import Package
+from _emerge.PackageMerge import PackageMerge
+from _emerge.PollScheduler import PollScheduler
+from _emerge.RootConfig import RootConfig
+from _emerge.SlotObject import SlotObject
+from _emerge.SequentialTaskQueue import SequentialTaskQueue
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+
+import portage.proxy.lazyimport
+import portage.proxy as proxy
+proxy.lazyimport.lazyimport(globals(),
+       '_emerge.depgraph:depgraph',
+)
+
+class Scheduler(PollScheduler):
+
+       _opts_ignore_blockers = \
+               frozenset(["--buildpkgonly",
+               "--fetchonly", "--fetch-all-uri",
+               "--nodeps", "--pretend"])
+
+       _opts_no_background = \
+               frozenset(["--pretend",
+               "--fetchonly", "--fetch-all-uri"])
+
+       _opts_no_restart = frozenset(["--buildpkgonly",
+               "--fetchonly", "--fetch-all-uri", "--pretend"])
+
+       _bad_resume_opts = set(["--ask", "--changelog",
+               "--resume", "--skipfirst"])
+
+       _fetch_log = os.path.join(_emerge_log_dir, 'emerge-fetch.log')
+
+       class _iface_class(SlotObject):
+               __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
+                       "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
+                       "scheduleSetup", "scheduleUnpack", "scheduleYield",
+                       "unregister")
+
+       class _fetch_iface_class(SlotObject):
+               __slots__ = ("log_file", "schedule")
+
+       _task_queues_class = slot_dict_class(
+               ("merge", "jobs", "fetch", "unpack"), prefix="")
+
+       class _build_opts_class(SlotObject):
+               __slots__ = ("buildpkg", "buildpkgonly",
+                       "fetch_all_uri", "fetchonly", "pretend")
+
+       class _binpkg_opts_class(SlotObject):
+               __slots__ = ("fetchonly", "getbinpkg", "pretend")
+
+       class _pkg_count_class(SlotObject):
+               __slots__ = ("curval", "maxval")
+
+       class _emerge_log_class(SlotObject):
+               __slots__ = ("xterm_titles",)
+
+               def log(self, *pargs, **kwargs):
+                       if not self.xterm_titles:
+                               # Avoid interference with the scheduler's status display.
+                               kwargs.pop("short_msg", None)
+                       emergelog(self.xterm_titles, *pargs, **kwargs)
+
+       class _failed_pkg(SlotObject):
+               __slots__ = ("build_dir", "build_log", "pkg", "returncode")
+
+       class _ConfigPool(object):
+               """Interface for a task to temporarily allocate a config
+               instance from a pool. This allows a task to be constructed
+               long before the config instance actually becomes needed, like
+               when prefetchers are constructed for the whole merge list."""
+               __slots__ = ("_root", "_allocate", "_deallocate")
+               def __init__(self, root, allocate, deallocate):
+                       self._root = root
+                       self._allocate = allocate
+                       self._deallocate = deallocate
+               def allocate(self):
+                       return self._allocate(self._root)
+               def deallocate(self, settings):
+                       self._deallocate(settings)
+
+       class _unknown_internal_error(portage.exception.PortageException):
+               """
+               Used internally to terminate scheduling. The specific reason for
+               the failure should have been dumped to stderr.
+               """
+               def __init__(self, value=""):
+                       portage.exception.PortageException.__init__(self, value)
+
+       def __init__(self, settings, trees, mtimedb, myopts,
+               spinner, mergelist, favorites, digraph):
+               PollScheduler.__init__(self)
+               self.settings = settings
+               self.target_root = settings["ROOT"]
+               self.trees = trees
+               self.myopts = myopts
+               self._spinner = spinner
+               self._mtimedb = mtimedb
+               self._mergelist = mergelist
+               self._favorites = favorites
+               self._args_set = InternalPackageSet(favorites)
+               self._build_opts = self._build_opts_class()
+               for k in self._build_opts.__slots__:
+                       setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
+               self._binpkg_opts = self._binpkg_opts_class()
+               for k in self._binpkg_opts.__slots__:
+                       setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
+
+               self.curval = 0
+               self._logger = self._emerge_log_class()
+               self._task_queues = self._task_queues_class()
+               for k in self._task_queues.allowed_keys:
+                       setattr(self._task_queues, k,
+                               SequentialTaskQueue())
+
+               # Holds merges that will wait to be executed when no builds are
+               # executing. This is useful for system packages since dependencies
+               # on system packages are frequently unspecified.
+               self._merge_wait_queue = []
+               # Holds merges that have been transfered from the merge_wait_queue to
+               # the actual merge queue. They are removed from this list upon
+               # completion. Other packages can start building only when this list is
+               # empty.
+               self._merge_wait_scheduled = []
+
+               # Holds system packages and their deep runtime dependencies. Before
+               # being merged, these packages go to merge_wait_queue, to be merged
+               # when no other packages are building.
+               self._deep_system_deps = set()
+
+               # Holds packages to merge which will satisfy currently unsatisfied
+               # deep runtime dependencies of system packages. If this is not empty
+               # then no parallel builds will be spawned until it is empty. This
+               # minimizes the possibility that a build will fail due to the system
+               # being in a fragile state. For example, see bug #259954.
+               self._unsatisfied_system_deps = set()
+
+               self._status_display = JobStatusDisplay(
+                       xterm_titles=('notitles' not in settings.features))
+               self._max_load = myopts.get("--load-average")
+               max_jobs = myopts.get("--jobs")
+               if max_jobs is None:
+                       max_jobs = 1
+               self._set_max_jobs(max_jobs)
+
+               # The root where the currently running
+               # portage instance is installed.
+               self._running_root = trees["/"]["root_config"]
+               self.edebug = 0
+               if settings.get("PORTAGE_DEBUG", "") == "1":
+                       self.edebug = 1
+               self.pkgsettings = {}
+               self._config_pool = {}
+               self._blocker_db = {}
+               for root in trees:
+                       self._config_pool[root] = []
+                       self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
+
+               fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
+                       schedule=self._schedule_fetch)
+               self._sched_iface = self._iface_class(
+                       dblinkEbuildPhase=self._dblink_ebuild_phase,
+                       dblinkDisplayMerge=self._dblink_display_merge,
+                       dblinkElog=self._dblink_elog,
+                       dblinkEmergeLog=self._dblink_emerge_log,
+                       fetch=fetch_iface, register=self._register,
+                       schedule=self._schedule_wait,
+                       scheduleSetup=self._schedule_setup,
+                       scheduleUnpack=self._schedule_unpack,
+                       scheduleYield=self._schedule_yield,
+                       unregister=self._unregister)
+
+               self._prefetchers = weakref.WeakValueDictionary()
+               self._pkg_queue = []
+               self._completed_tasks = set()
+
+               self._failed_pkgs = []
+               self._failed_pkgs_all = []
+               self._failed_pkgs_die_msgs = []
+               self._post_mod_echo_msgs = []
+               self._parallel_fetch = False
+               merge_count = len([x for x in mergelist \
+                       if isinstance(x, Package) and x.operation == "merge"])
+               self._pkg_count = self._pkg_count_class(
+                       curval=0, maxval=merge_count)
+               self._status_display.maxval = self._pkg_count.maxval
+
+               # The load average takes some time to respond when new
+               # jobs are added, so we need to limit the rate of adding
+               # new jobs.
+               self._job_delay_max = 10
+               self._job_delay_factor = 1.0
+               self._job_delay_exp = 1.5
+               self._previous_job_start_time = None
+
+               self._set_digraph(digraph)
+
+               # This is used to memoize the _choose_pkg() result when
+               # no packages can be chosen until one of the existing
+               # jobs completes.
+               self._choose_pkg_return_early = False
+
+               features = self.settings.features
+               if "parallel-fetch" in features and \
+                       not ("--pretend" in self.myopts or \
+                       "--fetch-all-uri" in self.myopts or \
+                       "--fetchonly" in self.myopts):
+                       if "distlocks" not in features:
+                               portage.writemsg(red("!!!")+"\n", noiselevel=-1)
+                               portage.writemsg(red("!!!")+" parallel-fetching " + \
+                                       "requires the distlocks feature enabled"+"\n",
+                                       noiselevel=-1)
+                               portage.writemsg(red("!!!")+" you have it disabled, " + \
+                                       "thus parallel-fetching is being disabled"+"\n",
+                                       noiselevel=-1)
+                               portage.writemsg(red("!!!")+"\n", noiselevel=-1)
+                       elif len(mergelist) > 1:
+                               self._parallel_fetch = True
+
+               if self._parallel_fetch:
+                               # clear out existing fetch log if it exists
+                               try:
+                                       open(self._fetch_log, 'w')
+                               except EnvironmentError:
+                                       pass
+
+               self._running_portage = None
+               portage_match = self._running_root.trees["vartree"].dbapi.match(
+                       portage.const.PORTAGE_PACKAGE_ATOM)
+               if portage_match:
+                       cpv = portage_match.pop()
+                       self._running_portage = self._pkg(cpv, "installed",
+                               self._running_root, installed=True)
+
+       def _poll(self, timeout=None):
+               self._schedule()
+               PollScheduler._poll(self, timeout=timeout)
+
+       def _set_max_jobs(self, max_jobs):
+               self._max_jobs = max_jobs
+               self._task_queues.jobs.max_jobs = max_jobs
+
+       def _background_mode(self):
+               """
+               Check if background mode is enabled and adjust states as necessary.
+
+               @rtype: bool
+               @returns: True if background mode is enabled, False otherwise.
+               """
+               background = (self._max_jobs is True or \
+                       self._max_jobs > 1 or "--quiet" in self.myopts) and \
+                       not bool(self._opts_no_background.intersection(self.myopts))
+
+               if background:
+                       interactive_tasks = self._get_interactive_tasks()
+                       if interactive_tasks:
+                               background = False
+                               writemsg_level(">>> Sending package output to stdio due " + \
+                                       "to interactive package(s):\n",
+                                       level=logging.INFO, noiselevel=-1)
+                               msg = [""]
+                               for pkg in interactive_tasks:
+                                       pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
+                                       if pkg.root != "/":
+                                               pkg_str += " for " + pkg.root
+                                       msg.append(pkg_str)
+                               msg.append("")
+                               writemsg_level("".join("%s\n" % (l,) for l in msg),
+                                       level=logging.INFO, noiselevel=-1)
+                               if self._max_jobs is True or self._max_jobs > 1:
+                                       self._set_max_jobs(1)
+                                       writemsg_level(">>> Setting --jobs=1 due " + \
+                                               "to the above interactive package(s)\n",
+                                               level=logging.INFO, noiselevel=-1)
+
+               self._status_display.quiet = \
+                       not background or \
+                       ("--quiet" in self.myopts and \
+                       "--verbose" not in self.myopts)
+
+               self._logger.xterm_titles = \
+                       "notitles" not in self.settings.features and \
+                       self._status_display.quiet
+
+               return background
+
+       def _get_interactive_tasks(self):
+               from portage import flatten
+               from portage.dep import use_reduce, paren_reduce
+               interactive_tasks = []
+               for task in self._mergelist:
+                       if not (isinstance(task, Package) and \
+                               task.operation == "merge"):
+                               continue
+                       try:
+                               properties = flatten(use_reduce(paren_reduce(
+                                       task.metadata["PROPERTIES"]), uselist=task.use.enabled))
+                       except portage.exception.InvalidDependString, e:
+                               show_invalid_depstring_notice(task,
+                                       task.metadata["PROPERTIES"], str(e))
+                               raise self._unknown_internal_error()
+                       if "interactive" in properties:
+                               interactive_tasks.append(task)
+               return interactive_tasks
+
+       def _set_digraph(self, digraph):
+               if "--nodeps" in self.myopts or \
+                       (self._max_jobs is not True and self._max_jobs < 2):
+                       # save some memory
+                       self._digraph = None
+                       return
+
+               self._digraph = digraph
+               self._find_system_deps()
+               self._prune_digraph()
+               self._prevent_builddir_collisions()
+
+       def _find_system_deps(self):
+               """
+               Find system packages and their deep runtime dependencies. Before being
+               merged, these packages go to merge_wait_queue, to be merged when no
+               other packages are building.
+               """
+               deep_system_deps = self._deep_system_deps
+               deep_system_deps.clear()
+               deep_system_deps.update(
+                       _find_deep_system_runtime_deps(self._digraph))
+               deep_system_deps.difference_update([pkg for pkg in \
+                       deep_system_deps if pkg.operation != "merge"])
+
+       def _prune_digraph(self):
+               """
+               Prune any root nodes that are irrelevant.
+               """
+
+               graph = self._digraph
+               completed_tasks = self._completed_tasks
+               removed_nodes = set()
+               while True:
+                       for node in graph.root_nodes():
+                               if not isinstance(node, Package) or \
+                                       (node.installed and node.operation == "nomerge") or \
+                                       node.onlydeps or \
+                                       node in completed_tasks:
+                                       removed_nodes.add(node)
+                       if removed_nodes:
+                               graph.difference_update(removed_nodes)
+                       if not removed_nodes:
+                               break
+                       removed_nodes.clear()
+
+       def _prevent_builddir_collisions(self):
+               """
+               When building stages, sometimes the same exact cpv needs to be merged
+               to both $ROOTs. Add edges to the digraph in order to avoid collisions
+               in the builddir. Currently, normal file locks would be inappropriate
+               for this purpose since emerge holds all of it's build dir locks from
+               the main process.
+               """
+               cpv_map = {}
+               for pkg in self._mergelist:
+                       if not isinstance(pkg, Package):
+                               # a satisfied blocker
+                               continue
+                       if pkg.installed:
+                               continue
+                       if pkg.cpv not in cpv_map:
+                               cpv_map[pkg.cpv] = [pkg]
+                               continue
+                       for earlier_pkg in cpv_map[pkg.cpv]:
+                               self._digraph.add(earlier_pkg, pkg,
+                                       priority=DepPriority(buildtime=True))
+                       cpv_map[pkg.cpv].append(pkg)
+
+       class _pkg_failure(portage.exception.PortageException):
+               """
+               An instance of this class is raised by unmerge() when
+               an uninstallation fails.
+               """
+               status = 1
+               def __init__(self, *pargs):
+                       portage.exception.PortageException.__init__(self, pargs)
+                       if pargs:
+                               self.status = pargs[0]
+
+       def _schedule_fetch(self, fetcher):
+               """
+               Schedule a fetcher on the fetch queue, in order to
+               serialize access to the fetch log.
+               """
+               self._task_queues.fetch.addFront(fetcher)
+
+       def _schedule_setup(self, setup_phase):
+               """
+               Schedule a setup phase on the merge queue, in order to
+               serialize unsandboxed access to the live filesystem.
+               """
+               self._task_queues.merge.addFront(setup_phase)
+               self._schedule()
+
+       def _schedule_unpack(self, unpack_phase):
+               """
+               Schedule an unpack phase on the unpack queue, in order
+               to serialize $DISTDIR access for live ebuilds.
+               """
+               self._task_queues.unpack.add(unpack_phase)
+
+       def _find_blockers(self, new_pkg):
+               """
+               Returns a callable which should be called only when
+               the vdb lock has been acquired.
+               """
+               def get_blockers():
+                       return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
+               return get_blockers
+
+       def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
+               if self._opts_ignore_blockers.intersection(self.myopts):
+                       return None
+
+               # Call gc.collect() here to avoid heap overflow that
+               # triggers 'Cannot allocate memory' errors (reported
+               # with python-2.5).
+               import gc
+               gc.collect()
+
+               blocker_db = self._blocker_db[new_pkg.root]
+
+               blocker_dblinks = []
+               for blocking_pkg in blocker_db.findInstalledBlockers(
+                       new_pkg, acquire_lock=acquire_lock):
+                       if new_pkg.slot_atom == blocking_pkg.slot_atom:
+                               continue
+                       if new_pkg.cpv == blocking_pkg.cpv:
+                               continue
+                       blocker_dblinks.append(portage.dblink(
+                               blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
+                               self.pkgsettings[blocking_pkg.root], treetype="vartree",
+                               vartree=self.trees[blocking_pkg.root]["vartree"]))
+
+               gc.collect()
+
+               return blocker_dblinks
+
+       def _dblink_pkg(self, pkg_dblink):
+               cpv = pkg_dblink.mycpv
+               type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
+               root_config = self.trees[pkg_dblink.myroot]["root_config"]
+               installed = type_name == "installed"
+               return self._pkg(cpv, type_name, root_config, installed=installed)
+
+       def _append_to_log_path(self, log_path, msg):
+               f = open(log_path, 'a')
+               try:
+                       f.write(msg)
+               finally:
+                       f.close()
+
+       def _dblink_elog(self, pkg_dblink, phase, func, msgs):
+
+               log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
+               log_file = None
+               out = sys.stdout
+               background = self._background
+
+               if background and log_path is not None:
+                       log_file = open(log_path, 'a')
+                       out = log_file
+
+               try:
+                       for msg in msgs:
+                               func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
+               finally:
+                       if log_file is not None:
+                               log_file.close()
+
+       def _dblink_emerge_log(self, msg):
+               self._logger.log(msg)
+
+       def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
+               log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
+               background = self._background
+
+               if log_path is None:
+                       if not (background and level < logging.WARN):
+                               portage.util.writemsg_level(msg,
+                                       level=level, noiselevel=noiselevel)
+               else:
+                       if not background:
+                               portage.util.writemsg_level(msg,
+                                       level=level, noiselevel=noiselevel)
+                       self._append_to_log_path(log_path, msg)
+
+       def _dblink_ebuild_phase(self,
+               pkg_dblink, pkg_dbapi, ebuild_path, phase):
+               """
+               Using this callback for merge phases allows the scheduler
+               to run while these phases execute asynchronously, and allows
+               the scheduler control output handling.
+               """
+
+               scheduler = self._sched_iface
+               settings = pkg_dblink.settings
+               pkg = self._dblink_pkg(pkg_dblink)
+               background = self._background
+               log_path = settings.get("PORTAGE_LOG_FILE")
+
+               ebuild_phase = EbuildPhase(background=background,
+                       pkg=pkg, phase=phase, scheduler=scheduler,
+                       settings=settings, tree=pkg_dblink.treetype)
+               ebuild_phase.start()
+               ebuild_phase.wait()
+
+               return ebuild_phase.returncode
+
+       def _generate_digests(self):
+               """
+               Generate digests if necessary for --digests or FEATURES=digest.
+               In order to avoid interference, this must done before parallel
+               tasks are started.
+               """
+
+               if '--fetchonly' in self.myopts:
+                       return os.EX_OK
+
+               digest = '--digest' in self.myopts
+               if not digest:
+                       for pkgsettings in self.pkgsettings.itervalues():
+                               if 'digest' in pkgsettings.features:
+                                       digest = True
+                                       break
+
+               if not digest:
+                       return os.EX_OK
+
+               for x in self._mergelist:
+                       if not isinstance(x, Package) or \
+                               x.type_name != 'ebuild' or \
+                               x.operation != 'merge':
+                               continue
+                       pkgsettings = self.pkgsettings[x.root]
+                       if '--digest' not in self.myopts and \
+                               'digest' not in pkgsettings.features:
+                               continue
+                       portdb = x.root_config.trees['porttree'].dbapi
+                       ebuild_path = portdb.findname(x.cpv)
+                       if not ebuild_path:
+                               writemsg_level(
+                                       "!!! Could not locate ebuild for '%s'.\n" \
+                                       % x.cpv, level=logging.ERROR, noiselevel=-1)
+                               return 1
+                       pkgsettings['O'] = os.path.dirname(ebuild_path)
+                       if not portage.digestgen([], pkgsettings, myportdb=portdb):
+                               writemsg_level(
+                                       "!!! Unable to generate manifest for '%s'.\n" \
+                                       % x.cpv, level=logging.ERROR, noiselevel=-1)
+                               return 1
+
+               return os.EX_OK
+
+       def _check_manifests(self):
+               # Verify all the manifests now so that the user is notified of failure
+               # as soon as possible.
+               if "strict" not in self.settings.features or \
+                       "--fetchonly" in self.myopts or \
+                       "--fetch-all-uri" in self.myopts:
+                       return os.EX_OK
+
+               shown_verifying_msg = False
+               quiet_settings = {}
+               for myroot, pkgsettings in self.pkgsettings.iteritems():
+                       quiet_config = portage.config(clone=pkgsettings)
+                       quiet_config["PORTAGE_QUIET"] = "1"
+                       quiet_config.backup_changes("PORTAGE_QUIET")
+                       quiet_settings[myroot] = quiet_config
+                       del quiet_config
+
+               for x in self._mergelist:
+                       if not isinstance(x, Package) or \
+                               x.type_name != "ebuild":
+                               continue
+
+                       if not shown_verifying_msg:
+                               shown_verifying_msg = True
+                               self._status_msg("Verifying ebuild manifests")
+
+                       root_config = x.root_config
+                       portdb = root_config.trees["porttree"].dbapi
+                       quiet_config = quiet_settings[root_config.root]
+                       quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
+                       if not portage.digestcheck([], quiet_config, strict=True):
+                               return 1
+
+               return os.EX_OK
+
+       def _add_prefetchers(self):
+
+               if not self._parallel_fetch:
+                       return
+
+               if self._parallel_fetch:
+                       self._status_msg("Starting parallel fetch")
+
+                       prefetchers = self._prefetchers
+                       getbinpkg = "--getbinpkg" in self.myopts
+
+                       # In order to avoid "waiting for lock" messages
+                       # at the beginning, which annoy users, never
+                       # spawn a prefetcher for the first package.
+                       for pkg in self._mergelist[1:]:
+                               prefetcher = self._create_prefetcher(pkg)
+                               if prefetcher is not None:
+                                       self._task_queues.fetch.add(prefetcher)
+                                       prefetchers[pkg] = prefetcher
+
+       def _create_prefetcher(self, pkg):
+               """
+               @return: a prefetcher, or None if not applicable
+               """
+               prefetcher = None
+
+               if not isinstance(pkg, Package):
+                       pass
+
+               elif pkg.type_name == "ebuild":
+
+                       prefetcher = EbuildFetcher(background=True,
+                               config_pool=self._ConfigPool(pkg.root,
+                               self._allocate_config, self._deallocate_config),
+                               fetchonly=1, logfile=self._fetch_log,
+                               pkg=pkg, prefetch=True, scheduler=self._sched_iface)
+
+               elif pkg.type_name == "binary" and \
+                       "--getbinpkg" in self.myopts and \
+                       pkg.root_config.trees["bintree"].isremote(pkg.cpv):
+
+                       prefetcher = BinpkgPrefetcher(background=True,
+                               pkg=pkg, scheduler=self._sched_iface)
+
+               return prefetcher
+
+       def _is_restart_scheduled(self):
+               """
+               Check if the merge list contains a replacement
+               for the current running instance, that will result
+               in restart after merge.
+               @rtype: bool
+               @returns: True if a restart is scheduled, False otherwise.
+               """
+               if self._opts_no_restart.intersection(self.myopts):
+                       return False
+
+               mergelist = self._mergelist
+
+               for i, pkg in enumerate(mergelist):
+                       if self._is_restart_necessary(pkg) and \
+                               i != len(mergelist) - 1:
+                               return True
+
+               return False
+
+       def _is_restart_necessary(self, pkg):
+               """
+               @return: True if merging the given package
+                       requires restart, False otherwise.
+               """
+
+               # Figure out if we need a restart.
+               if pkg.root == self._running_root.root and \
+                       portage.match_from_list(
+                       portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
+                       if self._running_portage:
+                               return pkg.cpv != self._running_portage.cpv
+                       return True
+               return False
+
+       def _restart_if_necessary(self, pkg):
+               """
+               Use execv() to restart emerge. This happens
+               if portage upgrades itself and there are
+               remaining packages in the list.
+               """
+
+               if self._opts_no_restart.intersection(self.myopts):
+                       return
+
+               if not self._is_restart_necessary(pkg):
+                       return
+
+               if pkg == self._mergelist[-1]:
+                       return
+
+               self._main_loop_cleanup()
+
+               logger = self._logger
+               pkg_count = self._pkg_count
+               mtimedb = self._mtimedb
+               bad_resume_opts = self._bad_resume_opts
+
+               logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
+                       (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+
+               logger.log(" *** RESTARTING " + \
+                       "emerge via exec() after change of " + \
+                       "portage version.")
+
+               mtimedb["resume"]["mergelist"].remove(list(pkg))
+               mtimedb.commit()
+               portage.run_exitfuncs()
+               mynewargv = [sys.argv[0], "--resume"]
+               resume_opts = self.myopts.copy()
+               # For automatic resume, we need to prevent
+               # any of bad_resume_opts from leaking in
+               # via EMERGE_DEFAULT_OPTS.
+               resume_opts["--ignore-default-opts"] = True
+               for myopt, myarg in resume_opts.iteritems():
+                       if myopt not in bad_resume_opts:
+                               if myarg is True:
+                                       mynewargv.append(myopt)
+                               else:
+                                       mynewargv.append(myopt +"="+ str(myarg))
+               # priority only needs to be adjusted on the first run
+               os.environ["PORTAGE_NICENESS"] = "0"
+               os.execv(mynewargv[0], mynewargv)
+
+       def merge(self):
+
+               if "--resume" in self.myopts:
+                       # We're resuming.
+                       portage.writemsg_stdout(
+                               colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
+                       self._logger.log(" *** Resuming merge...")
+
+               self._save_resume_list()
+
+               try:
+                       self._background = self._background_mode()
+               except self._unknown_internal_error:
+                       return 1
+
+               for root in self.trees:
+                       root_config = self.trees[root]["root_config"]
+
+                       # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
+                       # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
+                       # for ensuring sane $PWD (bug #239560) and storing elog messages.
+                       tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
+                       if not tmpdir or not os.path.isdir(tmpdir):
+                               msg = "The directory specified in your " + \
+                                       "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
+                               "does not exist. Please create this " + \
+                               "directory or correct your PORTAGE_TMPDIR setting."
+                               msg = textwrap.wrap(msg, 70)
+                               out = portage.output.EOutput()
+                               for l in msg:
+                                       out.eerror(l)
+                               return 1
+
+                       if self._background:
+                               root_config.settings.unlock()
+                               root_config.settings["PORTAGE_BACKGROUND"] = "1"
+                               root_config.settings.backup_changes("PORTAGE_BACKGROUND")
+                               root_config.settings.lock()
+
+                       self.pkgsettings[root] = portage.config(
+                               clone=root_config.settings)
+
+               rval = self._generate_digests()
+               if rval != os.EX_OK:
+                       return rval
+
+               rval = self._check_manifests()
+               if rval != os.EX_OK:
+                       return rval
+
+               keep_going = "--keep-going" in self.myopts
+               fetchonly = self._build_opts.fetchonly
+               mtimedb = self._mtimedb
+               failed_pkgs = self._failed_pkgs
+
+               while True:
+                       rval = self._merge()
+                       if rval == os.EX_OK or fetchonly or not keep_going:
+                               break
+                       if "resume" not in mtimedb:
+                               break
+                       mergelist = self._mtimedb["resume"].get("mergelist")
+                       if not mergelist:
+                               break
+
+                       if not failed_pkgs:
+                               break
+
+                       for failed_pkg in failed_pkgs:
+                               mergelist.remove(list(failed_pkg.pkg))
+
+                       self._failed_pkgs_all.extend(failed_pkgs)
+                       del failed_pkgs[:]
+
+                       if not mergelist:
+                               break
+
+                       if not self._calc_resume_list():
+                               break
+
+                       clear_caches(self.trees)
+                       if not self._mergelist:
+                               break
+
+                       self._save_resume_list()
+                       self._pkg_count.curval = 0
+                       self._pkg_count.maxval = len([x for x in self._mergelist \
+                               if isinstance(x, Package) and x.operation == "merge"])
+                       self._status_display.maxval = self._pkg_count.maxval
+
+               self._logger.log(" *** Finished. Cleaning up...")
+
+               if failed_pkgs:
+                       self._failed_pkgs_all.extend(failed_pkgs)
+                       del failed_pkgs[:]
+
+               background = self._background
+               failure_log_shown = False
+               if background and len(self._failed_pkgs_all) == 1:
+                       # If only one package failed then just show it's
+                       # whole log for easy viewing.
+                       failed_pkg = self._failed_pkgs_all[-1]
+                       build_dir = failed_pkg.build_dir
+                       log_file = None
+
+                       log_paths = [failed_pkg.build_log]
+
+                       log_path = self._locate_failure_log(failed_pkg)
+                       if log_path is not None:
+                               try:
+                                       log_file = open(log_path)
+                               except IOError:
+                                       pass
+
+                       if log_file is not None:
+                               try:
+                                       for line in log_file:
+                                               writemsg_level(line, noiselevel=-1)
+                               finally:
+                                       log_file.close()
+                               failure_log_shown = True
+
+               # Dump mod_echo output now since it tends to flood the terminal.
+               # This allows us to avoid having more important output, generated
+               # later, from being swept away by the mod_echo output.
+               mod_echo_output =  _flush_elog_mod_echo()
+
+               if background and not failure_log_shown and \
+                       self._failed_pkgs_all and \
+                       self._failed_pkgs_die_msgs and \
+                       not mod_echo_output:
+
+                       printer = portage.output.EOutput()
+                       for mysettings, key, logentries in self._failed_pkgs_die_msgs:
+                               root_msg = ""
+                               if mysettings["ROOT"] != "/":
+                                       root_msg = " merged to %s" % mysettings["ROOT"]
+                               print
+                               printer.einfo("Error messages for package %s%s:" % \
+                                       (colorize("INFORM", key), root_msg))
+                               print
+                               for phase in portage.const.EBUILD_PHASES:
+                                       if phase not in logentries:
+                                               continue
+                                       for msgtype, msgcontent in logentries[phase]:
+                                               if isinstance(msgcontent, basestring):
+                                                       msgcontent = [msgcontent]
+                                               for line in msgcontent:
+                                                       printer.eerror(line.strip("\n"))
+
+               if self._post_mod_echo_msgs:
+                       for msg in self._post_mod_echo_msgs:
+                               msg()
+
+               if len(self._failed_pkgs_all) > 1 or \
+                       (self._failed_pkgs_all and "--keep-going" in self.myopts):
+                       if len(self._failed_pkgs_all) > 1:
+                               msg = "The following %d packages have " % \
+                                       len(self._failed_pkgs_all) + \
+                                       "failed to build or install:"
+                       else:
+                               msg = "The following package has " + \
+                                       "failed to build or install:"
+                       prefix = bad(" * ")
+                       writemsg(prefix + "\n", noiselevel=-1)
+                       from textwrap import wrap
+                       for line in wrap(msg, 72):
+                               writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
+                       writemsg(prefix + "\n", noiselevel=-1)
+                       for failed_pkg in self._failed_pkgs_all:
+                               writemsg("%s\t%s\n" % (prefix,
+                                       colorize("INFORM", str(failed_pkg.pkg))),
+                                       noiselevel=-1)
+                       writemsg(prefix + "\n", noiselevel=-1)
+
+               return rval
+
+       def _elog_listener(self, mysettings, key, logentries, fulltext):
+               errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
+               if errors:
+                       self._failed_pkgs_die_msgs.append(
+                               (mysettings, key, errors))
+
+       def _locate_failure_log(self, failed_pkg):
+
+               build_dir = failed_pkg.build_dir
+               log_file = None
+
+               log_paths = [failed_pkg.build_log]
+
+               for log_path in log_paths:
+                       if not log_path:
+                               continue
+
+                       try:
+                               log_size = os.stat(log_path).st_size
+                       except OSError:
+                               continue
+
+                       if log_size == 0:
+                               continue
+
+                       return log_path
+
+               return None
+
+       def _add_packages(self):
+               pkg_queue = self._pkg_queue
+               for pkg in self._mergelist:
+                       if isinstance(pkg, Package):
+                               pkg_queue.append(pkg)
+                       elif isinstance(pkg, Blocker):
+                               pass
+
+       def _system_merge_started(self, merge):
+               """
+               Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
+               """
+               graph = self._digraph
+               if graph is None:
+                       return
+               pkg = merge.merge.pkg
+
+               # Skip this if $ROOT != / since it shouldn't matter if there
+               # are unsatisfied system runtime deps in this case.
+               if pkg.root != '/':
+                       return
+
+               completed_tasks = self._completed_tasks
+               unsatisfied = self._unsatisfied_system_deps
+
+               def ignore_non_runtime_or_satisfied(priority):
+                       """
+                       Ignore non-runtime and satisfied runtime priorities.
+                       """
+                       if isinstance(priority, DepPriority) and \
+                               not priority.satisfied and \
+                               (priority.runtime or priority.runtime_post):
+                               return False
+                       return True
+
+               # When checking for unsatisfied runtime deps, only check
+               # direct deps since indirect deps are checked when the
+               # corresponding parent is merged.
+               for child in graph.child_nodes(pkg,
+                       ignore_priority=ignore_non_runtime_or_satisfied):
+                       if not isinstance(child, Package) or \
+                               child.operation == 'uninstall':
+                               continue
+                       if child is pkg:
+                               continue
+                       if child.operation == 'merge' and \
+                               child not in completed_tasks:
+                               unsatisfied.add(child)
+
+       def _merge_wait_exit_handler(self, task):
+               self._merge_wait_scheduled.remove(task)
+               self._merge_exit(task)
+
+       def _merge_exit(self, merge):
+               self._do_merge_exit(merge)
+               self._deallocate_config(merge.merge.settings)
+               if merge.returncode == os.EX_OK and \
+                       not merge.merge.pkg.installed:
+                       self._status_display.curval += 1
+               self._status_display.merges = len(self._task_queues.merge)
+               self._schedule()
+
+       def _do_merge_exit(self, merge):
+               pkg = merge.merge.pkg
+               if merge.returncode != os.EX_OK:
+                       settings = merge.merge.settings
+                       build_dir = settings.get("PORTAGE_BUILDDIR")
+                       build_log = settings.get("PORTAGE_LOG_FILE")
+
+                       self._failed_pkgs.append(self._failed_pkg(
+                               build_dir=build_dir, build_log=build_log,
+                               pkg=pkg,
+                               returncode=merge.returncode))
+                       self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
+
+                       self._status_display.failed = len(self._failed_pkgs)
+                       return
+
+               self._task_complete(pkg)
+               pkg_to_replace = merge.merge.pkg_to_replace
+               if pkg_to_replace is not None:
+                       # When a package is replaced, mark it's uninstall
+                       # task complete (if any).
+                       uninst_hash_key = \
+                               ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
+                       self._task_complete(uninst_hash_key)
+
+               if pkg.installed:
+                       return
+
+               self._restart_if_necessary(pkg)
+
+               # Call mtimedb.commit() after each merge so that
+               # --resume still works after being interrupted
+               # by reboot, sigkill or similar.
+               mtimedb = self._mtimedb
+               mtimedb["resume"]["mergelist"].remove(list(pkg))
+               if not mtimedb["resume"]["mergelist"]:
+                       del mtimedb["resume"]
+               mtimedb.commit()
+
+       def _build_exit(self, build):
+               if build.returncode == os.EX_OK:
+                       self.curval += 1
+                       merge = PackageMerge(merge=build)
+                       if not build.build_opts.buildpkgonly and \
+                               build.pkg in self._deep_system_deps:
+                               # Since dependencies on system packages are frequently
+                               # unspecified, merge them only when no builds are executing.
+                               self._merge_wait_queue.append(merge)
+                               merge.addStartListener(self._system_merge_started)
+                       else:
+                               merge.addExitListener(self._merge_exit)
+                               self._task_queues.merge.add(merge)
+                               self._status_display.merges = len(self._task_queues.merge)
+               else:
+                       settings = build.settings
+                       build_dir = settings.get("PORTAGE_BUILDDIR")
+                       build_log = settings.get("PORTAGE_LOG_FILE")
+
+                       self._failed_pkgs.append(self._failed_pkg(
+                               build_dir=build_dir, build_log=build_log,
+                               pkg=build.pkg,
+                               returncode=build.returncode))
+                       self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
+
+                       self._status_display.failed = len(self._failed_pkgs)
+                       self._deallocate_config(build.settings)
+               self._jobs -= 1
+               self._status_display.running = self._jobs
+               self._schedule()
+
+       def _extract_exit(self, build):
+               self._build_exit(build)
+
+       def _task_complete(self, pkg):
+               self._completed_tasks.add(pkg)
+               self._unsatisfied_system_deps.discard(pkg)
+               self._choose_pkg_return_early = False
+
+       def _merge(self):
+
+               self._add_prefetchers()
+               self._add_packages()
+               pkg_queue = self._pkg_queue
+               failed_pkgs = self._failed_pkgs
+               portage.locks._quiet = self._background
+               portage.elog._emerge_elog_listener = self._elog_listener
+               rval = os.EX_OK
+
+               try:
+                       self._main_loop()
+               finally:
+                       self._main_loop_cleanup()
+                       portage.locks._quiet = False
+                       portage.elog._emerge_elog_listener = None
+                       if failed_pkgs:
+                               rval = failed_pkgs[-1].returncode
+
+               return rval
+
+       def _main_loop_cleanup(self):
+               del self._pkg_queue[:]
+               self._completed_tasks.clear()
+               self._deep_system_deps.clear()
+               self._unsatisfied_system_deps.clear()
+               self._choose_pkg_return_early = False
+               self._status_display.reset()
+               self._digraph = None
+               self._task_queues.fetch.clear()
+
+       def _choose_pkg(self):
+               """
+               Choose a task that has all it's dependencies satisfied.
+               """
+
+               if self._choose_pkg_return_early:
+                       return None
+
+               if self._digraph is None:
+                       if (self._jobs or self._task_queues.merge) and \
+                               not ("--nodeps" in self.myopts and \
+                               (self._max_jobs is True or self._max_jobs > 1)):
+                               self._choose_pkg_return_early = True
+                               return None
+                       return self._pkg_queue.pop(0)
+
+               if not (self._jobs or self._task_queues.merge):
+                       return self._pkg_queue.pop(0)
+
+               self._prune_digraph()
+
+               chosen_pkg = None
+               later = set(self._pkg_queue)
+               for pkg in self._pkg_queue:
+                       later.remove(pkg)
+                       if not self._dependent_on_scheduled_merges(pkg, later):
+                               chosen_pkg = pkg
+                               break
+
+               if chosen_pkg is not None:
+                       self._pkg_queue.remove(chosen_pkg)
+
+               if chosen_pkg is None:
+                       # There's no point in searching for a package to
+                       # choose until at least one of the existing jobs
+                       # completes.
+                       self._choose_pkg_return_early = True
+
+               return chosen_pkg
+
+       def _dependent_on_scheduled_merges(self, pkg, later):
+               """
+               Traverse the subgraph of the given packages deep dependencies
+               to see if it contains any scheduled merges.
+               @param pkg: a package to check dependencies for
+               @type pkg: Package
+               @param later: packages for which dependence should be ignored
+                       since they will be merged later than pkg anyway and therefore
+                       delaying the merge of pkg will not result in a more optimal
+                       merge order
+               @type later: set
+               @rtype: bool
+               @returns: True if the package is dependent, False otherwise.
+               """
+
+               graph = self._digraph
+               completed_tasks = self._completed_tasks
+
+               dependent = False
+               traversed_nodes = set([pkg])
+               direct_deps = graph.child_nodes(pkg)
+               node_stack = direct_deps
+               direct_deps = frozenset(direct_deps)
+               while node_stack:
+                       node = node_stack.pop()
+                       if node in traversed_nodes:
+                               continue
+                       traversed_nodes.add(node)
+                       if not ((node.installed and node.operation == "nomerge") or \
+                               (node.operation == "uninstall" and \
+                               node not in direct_deps) or \
+                               node in completed_tasks or \
+                               node in later):
+                               dependent = True
+                               break
+                       node_stack.extend(graph.child_nodes(node))
+
+               return dependent
+
+       def _allocate_config(self, root):
+               """
+               Allocate a unique config instance for a task in order
+               to prevent interference between parallel tasks.
+               """
+               if self._config_pool[root]:
+                       temp_settings = self._config_pool[root].pop()
+               else:
+                       temp_settings = portage.config(clone=self.pkgsettings[root])
+               # Since config.setcpv() isn't guaranteed to call config.reset() due to
+               # performance reasons, call it here to make sure all settings from the
+               # previous package get flushed out (such as PORTAGE_LOG_FILE).
+               temp_settings.reload()
+               temp_settings.reset()
+               return temp_settings
+
+       def _deallocate_config(self, settings):
+               self._config_pool[settings["ROOT"]].append(settings)
+
+       def _main_loop(self):
+
+               # Only allow 1 job max if a restart is scheduled
+               # due to portage update.
+               if self._is_restart_scheduled() or \
+                       self._opts_no_background.intersection(self.myopts):
+                       self._set_max_jobs(1)
+
+               merge_queue = self._task_queues.merge
+
+               while self._schedule():
+                       if self._poll_event_handlers:
+                               self._poll_loop()
+
+               while True:
+                       self._schedule()
+                       if not (self._jobs or merge_queue):
+                               break
+                       if self._poll_event_handlers:
+                               self._poll_loop()
+
+       def _keep_scheduling(self):
+               return bool(self._pkg_queue and \
+                       not (self._failed_pkgs and not self._build_opts.fetchonly))
+
+       def _schedule_tasks(self):
+
+               # When the number of jobs drops to zero, process all waiting merges.
+               if not self._jobs and self._merge_wait_queue:
+                       for task in self._merge_wait_queue:
+                               task.addExitListener(self._merge_wait_exit_handler)
+                               self._task_queues.merge.add(task)
+                       self._status_display.merges = len(self._task_queues.merge)
+                       self._merge_wait_scheduled.extend(self._merge_wait_queue)
+                       del self._merge_wait_queue[:]
+
+               self._schedule_tasks_imp()
+               self._status_display.display()
+
+               state_change = 0
+               for q in self._task_queues.values():
+                       if q.schedule():
+                               state_change += 1
+
+               # Cancel prefetchers if they're the only reason
+               # the main poll loop is still running.
+               if self._failed_pkgs and not self._build_opts.fetchonly and \
+                       not (self._jobs or self._task_queues.merge) and \
+                       self._task_queues.fetch:
+                       self._task_queues.fetch.clear()
+                       state_change += 1
+
+               if state_change:
+                       self._schedule_tasks_imp()
+                       self._status_display.display()
+
+               return self._keep_scheduling()
+
+       def _job_delay(self):
+               """
+               @rtype: bool
+               @returns: True if job scheduling should be delayed, False otherwise.
+               """
+
+               if self._jobs and self._max_load is not None:
+
+                       current_time = time.time()
+
+                       delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
+                       if delay > self._job_delay_max:
+                               delay = self._job_delay_max
+                       if (current_time - self._previous_job_start_time) < delay:
+                               return True
+
+               return False
+
+       def _schedule_tasks_imp(self):
+               """
+               @rtype: bool
+               @returns: True if state changed, False otherwise.
+               """
+
+               state_change = 0
+
+               while True:
+
+                       if not self._keep_scheduling():
+                               return bool(state_change)
+
+                       if self._choose_pkg_return_early or \
+                               self._merge_wait_scheduled or \
+                               (self._jobs and self._unsatisfied_system_deps) or \
+                               not self._can_add_job() or \
+                               self._job_delay():
+                               return bool(state_change)
+
+                       pkg = self._choose_pkg()
+                       if pkg is None:
+                               return bool(state_change)
+
+                       state_change += 1
+
+                       if not pkg.installed:
+                               self._pkg_count.curval += 1
+
+                       task = self._task(pkg)
+
+                       if pkg.installed:
+                               merge = PackageMerge(merge=task)
+                               merge.addExitListener(self._merge_exit)
+                               self._task_queues.merge.add(merge)
+
+                       elif pkg.built:
+                               self._jobs += 1
+                               self._previous_job_start_time = time.time()
+                               self._status_display.running = self._jobs
+                               task.addExitListener(self._extract_exit)
+                               self._task_queues.jobs.add(task)
+
+                       else:
+                               self._jobs += 1
+                               self._previous_job_start_time = time.time()
+                               self._status_display.running = self._jobs
+                               task.addExitListener(self._build_exit)
+                               self._task_queues.jobs.add(task)
+
+               return bool(state_change)
+
+       def _task(self, pkg):
+
+               pkg_to_replace = None
+               if pkg.operation != "uninstall":
+                       vardb = pkg.root_config.trees["vartree"].dbapi
+                       previous_cpv = vardb.match(pkg.slot_atom)
+                       if previous_cpv:
+                               previous_cpv = previous_cpv.pop()
+                               pkg_to_replace = self._pkg(previous_cpv,
+                                       "installed", pkg.root_config, installed=True)
+
+               task = MergeListItem(args_set=self._args_set,
+                       background=self._background, binpkg_opts=self._binpkg_opts,
+                       build_opts=self._build_opts,
+                       config_pool=self._ConfigPool(pkg.root,
+                       self._allocate_config, self._deallocate_config),
+                       emerge_opts=self.myopts,
+                       find_blockers=self._find_blockers(pkg), logger=self._logger,
+                       mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
+                       pkg_to_replace=pkg_to_replace,
+                       prefetcher=self._prefetchers.get(pkg),
+                       scheduler=self._sched_iface,
+                       settings=self._allocate_config(pkg.root),
+                       statusMessage=self._status_msg,
+                       world_atom=self._world_atom)
+
+               return task
+
+       def _failed_pkg_msg(self, failed_pkg, action, preposition):
+               pkg = failed_pkg.pkg
+               msg = "%s to %s %s" % \
+                       (bad("Failed"), action, colorize("INFORM", pkg.cpv))
+               if pkg.root != "/":
+                       msg += " %s %s" % (preposition, pkg.root)
+
+               log_path = self._locate_failure_log(failed_pkg)
+               if log_path is not None:
+                       msg += ", Log file:"
+               self._status_msg(msg)
+
+               if log_path is not None:
+                       self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
+
+       def _status_msg(self, msg):
+               """
+               Display a brief status message (no newlines) in the status display.
+               This is called by tasks to provide feedback to the user. This
+               delegates the resposibility of generating \r and \n control characters,
+               to guarantee that lines are created or erased when necessary and
+               appropriate.
+
+               @type msg: str
+               @param msg: a brief status message (no newlines allowed)
+               """
+               if not self._background:
+                       writemsg_level("\n")
+               self._status_display.displayMessage(msg)
+
+       def _save_resume_list(self):
+               """
+               Do this before verifying the ebuild Manifests since it might
+               be possible for the user to use --resume --skipfirst get past
+               a non-essential package with a broken digest.
+               """
+               mtimedb = self._mtimedb
+               mtimedb["resume"]["mergelist"] = [list(x) \
+                       for x in self._mergelist \
+                       if isinstance(x, Package) and x.operation == "merge"]
+
+               mtimedb.commit()
+
+       def _calc_resume_list(self):
+               """
+               Use the current resume list to calculate a new one,
+               dropping any packages with unsatisfied deps.
+               @rtype: bool
+               @returns: True if successful, False otherwise.
+               """
+               print colorize("GOOD", "*** Resuming merge...")
+
+               if self._show_list():
+                       if "--tree" in self.myopts:
+                               portage.writemsg_stdout("\n" + \
+                                       darkgreen("These are the packages that " + \
+                                       "would be merged, in reverse order:\n\n"))
+
+                       else:
+                               portage.writemsg_stdout("\n" + \
+                                       darkgreen("These are the packages that " + \
+                                       "would be merged, in order:\n\n"))
+
+               show_spinner = "--quiet" not in self.myopts and \
+                       "--nodeps" not in self.myopts
+
+               if show_spinner:
+                       print "Calculating dependencies  ",
+
+               myparams = create_depgraph_params(self.myopts, None)
+               success = False
+               e = None
+               try:
+                       success, mydepgraph, dropped_tasks = resume_depgraph(
+                               self.settings, self.trees, self._mtimedb, self.myopts,
+                               myparams, self._spinner)
+               except depgraph.UnsatisfiedResumeDep, exc:
+                       # rename variable to avoid python-3.0 error:
+                       # SyntaxError: can not delete variable 'e' referenced in nested
+                       #              scope
+                       e = exc
+                       mydepgraph = e.depgraph
+                       dropped_tasks = set()
+
+               if show_spinner:
+                       print "\b\b... done!"
+
+               if e is not None:
+                       def unsatisfied_resume_dep_msg():
+                               mydepgraph.display_problems()
+                               out = portage.output.EOutput()
+                               out.eerror("One or more packages are either masked or " + \
+                                       "have missing dependencies:")
+                               out.eerror("")
+                               indent = "  "
+                               show_parents = set()
+                               for dep in e.value:
+                                       if dep.parent in show_parents:
+                                               continue
+                                       show_parents.add(dep.parent)
+                                       if dep.atom is None:
+                                               out.eerror(indent + "Masked package:")
+                                               out.eerror(2 * indent + str(dep.parent))
+                                               out.eerror("")
+                                       else:
+                                               out.eerror(indent + str(dep.atom) + " pulled in by:")
+                                               out.eerror(2 * indent + str(dep.parent))
+                                               out.eerror("")
+                               msg = "The resume list contains packages " + \
+                                       "that are either masked or have " + \
+                                       "unsatisfied dependencies. " + \
+                                       "Please restart/continue " + \
+                                       "the operation manually, or use --skipfirst " + \
+                                       "to skip the first package in the list and " + \
+                                       "any other packages that may be " + \
+                                       "masked or have missing dependencies."
+                               for line in textwrap.wrap(msg, 72):
+                                       out.eerror(line)
+                       self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
+                       return False
+
+               if success and self._show_list():
+                       mylist = mydepgraph.altlist()
+                       if mylist:
+                               if "--tree" in self.myopts:
+                                       mylist.reverse()
+                               mydepgraph.display(mylist, favorites=self._favorites)
+
+               if not success:
+                       self._post_mod_echo_msgs.append(mydepgraph.display_problems)
+                       return False
+               mydepgraph.display_problems()
+
+               mylist = mydepgraph.altlist()
+               mydepgraph.break_refs(mylist)
+               mydepgraph.break_refs(dropped_tasks)
+               self._mergelist = mylist
+               self._set_digraph(mydepgraph.schedulerGraph())
+
+               msg_width = 75
+               for task in dropped_tasks:
+                       if not (isinstance(task, Package) and task.operation == "merge"):
+                               continue
+                       pkg = task
+                       msg = "emerge --keep-going:" + \
+                               " %s" % (pkg.cpv,)
+                       if pkg.root != "/":
+                               msg += " for %s" % (pkg.root,)
+                       msg += " dropped due to unsatisfied dependency."
+                       for line in textwrap.wrap(msg, msg_width):
+                               eerror(line, phase="other", key=pkg.cpv)
+                       settings = self.pkgsettings[pkg.root]
+                       # Ensure that log collection from $T is disabled inside
+                       # elog_process(), since any logs that might exist are
+                       # not valid here.
+                       settings.pop("T", None)
+                       portage.elog.elog_process(pkg.cpv, settings)
+                       self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
+
+               return True
+
+       def _show_list(self):
+               myopts = self.myopts
+               if "--quiet" not in myopts and \
+                       ("--ask" in myopts or "--tree" in myopts or \
+                       "--verbose" in myopts):
+                       return True
+               return False
+
+       def _world_atom(self, pkg):
+               """
+               Add the package to the world file, but only if
+               it's supposed to be added. Otherwise, do nothing.
+               """
+
+               if set(("--buildpkgonly", "--fetchonly",
+                       "--fetch-all-uri",
+                       "--oneshot", "--onlydeps",
+                       "--pretend")).intersection(self.myopts):
+                       return
+
+               if pkg.root != self.target_root:
+                       return
+
+               args_set = self._args_set
+               if not args_set.findAtomForPackage(pkg):
+                       return
+
+               logger = self._logger
+               pkg_count = self._pkg_count
+               root_config = pkg.root_config
+               world_set = root_config.sets["world"]
+               world_locked = False
+               if hasattr(world_set, "lock"):
+                       world_set.lock()
+                       world_locked = True
+
+               try:
+                       if hasattr(world_set, "load"):
+                               world_set.load() # maybe it's changed on disk
+
+                       atom = create_world_atom(pkg, args_set, root_config)
+                       if atom:
+                               if hasattr(world_set, "add"):
+                                       self._status_msg(('Recording %s in "world" ' + \
+                                               'favorites file...') % atom)
+                                       logger.log(" === (%s of %s) Updating world file (%s)" % \
+                                               (pkg_count.curval, pkg_count.maxval, pkg.cpv))
+                                       world_set.add(atom)
+                               else:
+                                       writemsg_level('\n!!! Unable to record %s in "world"\n' % \
+                                               (atom,), level=logging.WARN, noiselevel=-1)
+               finally:
+                       if world_locked:
+                               world_set.unlock()
+
+       def _pkg(self, cpv, type_name, root_config, installed=False):
+               """
+               Get a package instance from the cache, or create a new
+               one if necessary. Raises KeyError from aux_get if it
+               failures for some reason (package does not exist or is
+               corrupt).
+               """
+               operation = "merge"
+               if installed:
+                       operation = "nomerge"
+
+               if self._digraph is not None:
+                       # Reuse existing instance when available.
+                       pkg = self._digraph.get(
+                               (type_name, root_config.root, cpv, operation))
+                       if pkg is not None:
+                               return pkg
+
+               tree_type = depgraph.pkg_tree_map[type_name]
+               db = root_config.trees[tree_type].dbapi
+               db_keys = list(self.trees[root_config.root][
+                       tree_type].dbapi._aux_cache_keys)
+               metadata = izip(db_keys, db.aux_get(cpv, db_keys))
+               pkg = Package(cpv=cpv, metadata=metadata,
+                       root_config=root_config, installed=installed)
+               if type_name == "ebuild":
+                       settings = self.pkgsettings[root_config.root]
+                       settings.setcpv(pkg)
+                       pkg.metadata["USE"] = settings["PORTAGE_USE"]
+                       pkg.metadata['CHOST'] = settings.get('CHOST', '')
+
+               return pkg
index f0b994a4c7018859de77571f56ae1fa88c6db22b..47c20a2f3c63a90055c21ae098c32c8d03d0ba25 100644 (file)
@@ -9,8 +9,6 @@ import shlex
 import signal
 import sys
 import textwrap
-import weakref
-import gc
 import os, stat
 import platform
 
@@ -23,13 +21,11 @@ except ImportError:
 
 from portage import digraph
 from portage.const import NEWS_LIB_PATH
-from portage.cache.mappings import slot_dict_class
 
 import _emerge.help
 import portage.xpak, commands, errno, re, socket, time
-from portage.output import blue, bold, colorize, darkblue, darkgreen, green, \
-       nc_len, red, teal, turquoise, \
-       xtermTitleReset, yellow
+from portage.output import blue, bold, colorize, darkgreen, \
+       red, xtermTitleReset, yellow
 from portage.output import create_color_func
 good = create_color_func("GOOD")
 bad = create_color_func("BAD")
@@ -42,7 +38,6 @@ import portage.locks
 import portage.exception
 from portage.cache.cache_errors import CacheError
 from portage.data import secpass
-from portage.elog.messages import eerror
 from portage.util import normalize_path as normpath
 from portage.util import cmp_sort_key, writemsg, writemsg_level
 from portage.sets import load_default_config, SETPREFIX
@@ -50,6704 +45,146 @@ from portage.sets.base import InternalPackageSet
 
 from itertools import chain, izip
 
-from _emerge.SlotObject import SlotObject
-from _emerge.DepPriority import DepPriority
-from _emerge.BlockerDepPriority import BlockerDepPriority
-from _emerge.UnmergeDepPriority import UnmergeDepPriority
-from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
+from _emerge.clear_caches import clear_caches
+from _emerge.countdown import countdown
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.Dependency import Dependency
+from _emerge.depgraph import depgraph, resume_depgraph
 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.emergelog import emergelog
+from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
+from _emerge.is_valid_package_atom import is_valid_package_atom
+from _emerge.MetadataRegen import MetadataRegen
 from _emerge.Package import Package
-from _emerge.Blocker import Blocker
-from _emerge.BlockerDB import BlockerDB
-from _emerge.EbuildFetcher import EbuildFetcher
-from _emerge.EbuildPhase import EbuildPhase
-from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
-from _emerge.PackageMerge import PackageMerge
-from _emerge.DependencyArg import DependencyArg
-from _emerge.AtomArg import AtomArg
-from _emerge.PackageArg import PackageArg
-from _emerge.SetArg import SetArg
-from _emerge.Dependency import Dependency
-from _emerge.BlockerCache import BlockerCache
-from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
-from _emerge.RepoDisplay import RepoDisplay
-from _emerge.UseFlagDisplay import UseFlagDisplay
-from _emerge.SequentialTaskQueue import SequentialTaskQueue
 from _emerge.ProgressHandler import ProgressHandler
-from _emerge.stdout_spinner import stdout_spinner
-from _emerge.JobStatusDisplay import JobStatusDisplay
-from _emerge.PollScheduler import PollScheduler
-from _emerge.search import search
-from _emerge.visible import visible
-from _emerge.emergelog import emergelog, _emerge_log_dir
-from _emerge.userquery import userquery
-from _emerge.countdown import countdown
-from _emerge.unmerge import unmerge
-from _emerge.MergeListItem import MergeListItem
-from _emerge.MetadataRegen import MetadataRegen
 from _emerge.RootConfig import RootConfig
-from _emerge.format_size import format_size
-from _emerge.PackageCounters import PackageCounters
-from _emerge.FakeVartree import FakeVartree
+from _emerge.Scheduler import Scheduler
+from _emerge.search import search
+from _emerge.SetArg import SetArg
 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+from _emerge.stdout_spinner import stdout_spinner
+from _emerge.unmerge import unmerge
+from _emerge.UnmergeDepPriority import UnmergeDepPriority
+from _emerge.UseFlagDisplay import UseFlagDisplay
+from _emerge.userquery import userquery
 
-actions = frozenset([
-"clean", "config", "depclean",
-"info", "list-sets", "metadata",
-"prune", "regen",  "search",
-"sync",  "unmerge", "version",
-])
-options=[
-"--ask",          "--alphabetical",
-"--buildpkg",     "--buildpkgonly",
-"--changelog",    "--columns",
-"--complete-graph",
-"--debug",        "--deep",
-"--digest",
-"--emptytree",
-"--fetchonly",    "--fetch-all-uri",
-"--getbinpkg",    "--getbinpkgonly",
-"--help",         "--ignore-default-opts",
-"--keep-going",
-"--noconfmem",
-"--newuse",
-"--nodeps",       "--noreplace",
-"--nospinner",    "--oneshot",
-"--onlydeps",     "--pretend",
-"--quiet",        "--resume",
-"--searchdesc",   "--selective",
-"--skipfirst",
-"--tree",
-"--update",
-"--usepkg",       "--usepkgonly",
-"--verbose",
-]
-
-shortmapping={
-"1":"--oneshot",
-"a":"--ask",
-"b":"--buildpkg",  "B":"--buildpkgonly",
-"c":"--clean",     "C":"--unmerge",
-"d":"--debug",     "D":"--deep",
-"e":"--emptytree",
-"f":"--fetchonly", "F":"--fetch-all-uri",
-"g":"--getbinpkg", "G":"--getbinpkgonly",
-"h":"--help",
-"k":"--usepkg",    "K":"--usepkgonly",
-"l":"--changelog",
-"n":"--noreplace", "N":"--newuse",
-"o":"--onlydeps",  "O":"--nodeps",
-"p":"--pretend",   "P":"--prune",
-"q":"--quiet",
-"s":"--search",    "S":"--searchdesc",
-"t":"--tree",
-"u":"--update",
-"v":"--verbose",   "V":"--version"
-}
-
-def getgccversion(chost):
-       """
-       rtype: C{str}
-       return:  the current in-use gcc version
-       """
-
-       gcc_ver_command = 'gcc -dumpversion'
-       gcc_ver_prefix = 'gcc-'
-
-       gcc_not_found_error = red(
-       "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
-       "!!! to update the environment of this terminal and possibly\n" +
-       "!!! other terminals also.\n"
-       )
-
-       mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
-       if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
-               return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
-
-       mystatus, myoutput = commands.getstatusoutput(
-               chost + "-" + gcc_ver_command)
-       if mystatus == os.EX_OK:
-               return gcc_ver_prefix + myoutput
-
-       mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
-       if mystatus == os.EX_OK:
-               return gcc_ver_prefix + myoutput
-
-       portage.writemsg(gcc_not_found_error, noiselevel=-1)
-       return "[unavailable]"
-
-def getportageversion(portdir, target_root, profile, chost, vardb):
-       profilever = "unavailable"
-       if profile:
-               realpath = os.path.realpath(profile)
-               basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
-               if realpath.startswith(basepath):
-                       profilever = realpath[1 + len(basepath):]
-               else:
-                       try:
-                               profilever = "!" + os.readlink(profile)
-                       except (OSError):
-                               pass
-               del realpath, basepath
-
-       libcver=[]
-       libclist  = vardb.match("virtual/libc")
-       libclist += vardb.match("virtual/glibc")
-       libclist  = portage.util.unique_array(libclist)
-       for x in libclist:
-               xs=portage.catpkgsplit(x)
-               if libcver:
-                       libcver+=","+"-".join(xs[1:])
-               else:
-                       libcver="-".join(xs[1:])
-       if libcver==[]:
-               libcver="unavailable"
-
-       gccver = getgccversion(chost)
-       unameout=platform.release()+" "+platform.machine()
-
-       return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
-
-def create_depgraph_params(myopts, myaction):
-       #configure emerge engine parameters
-       #
-       # self:      include _this_ package regardless of if it is merged.
-       # selective: exclude the package if it is merged
-       # recurse:   go into the dependencies
-       # deep:      go into the dependencies of already merged packages
-       # empty:     pretend nothing is merged
-       # complete:  completely account for all known dependencies
-       # remove:    build graph for use in removing packages
-       myparams = set(["recurse"])
-
-       if myaction == "remove":
-               myparams.add("remove")
-               myparams.add("complete")
-               return myparams
-
-       if "--update" in myopts or \
-               "--newuse" in myopts or \
-               "--reinstall" in myopts or \
-               "--noreplace" in myopts:
-               myparams.add("selective")
-       if "--emptytree" in myopts:
-               myparams.add("empty")
-               myparams.discard("selective")
-       if "--nodeps" in myopts:
-               myparams.discard("recurse")
-       if "--deep" in myopts:
-               myparams.add("deep")
-       if "--complete-graph" in myopts:
-               myparams.add("complete")
-       return myparams
-
-def create_world_atom(pkg, args_set, root_config):
-       """Create a new atom for the world file if one does not exist.  If the
-       argument atom is precise enough to identify a specific slot then a slot
-       atom will be returned. Atoms that are in the system set may also be stored
-       in world since system atoms can only match one slot while world atoms can
-       be greedy with respect to slots.  Unslotted system packages will not be
-       stored in world."""
-
-       arg_atom = args_set.findAtomForPackage(pkg)
-       if not arg_atom:
-               return None
-       cp = portage.dep_getkey(arg_atom)
-       new_world_atom = cp
-       sets = root_config.sets
-       portdb = root_config.trees["porttree"].dbapi
-       vardb = root_config.trees["vartree"].dbapi
-       available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
-               for cpv in portdb.match(cp))
-       slotted = len(available_slots) > 1 or \
-               (len(available_slots) == 1 and "0" not in available_slots)
-       if not slotted:
-               # check the vdb in case this is multislot
-               available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
-                       for cpv in vardb.match(cp))
-               slotted = len(available_slots) > 1 or \
-                       (len(available_slots) == 1 and "0" not in available_slots)
-       if slotted and arg_atom != cp:
-               # If the user gave a specific atom, store it as a
-               # slot atom in the world file.
-               slot_atom = pkg.slot_atom
-
-               # For USE=multislot, there are a couple of cases to
-               # handle here:
-               #
-               # 1) SLOT="0", but the real SLOT spontaneously changed to some
-               #    unknown value, so just record an unslotted atom.
-               #
-               # 2) SLOT comes from an installed package and there is no
-               #    matching SLOT in the portage tree.
-               #
-               # Make sure that the slot atom is available in either the
-               # portdb or the vardb, since otherwise the user certainly
-               # doesn't want the SLOT atom recorded in the world file
-               # (case 1 above).  If it's only available in the vardb,
-               # the user may be trying to prevent a USE=multislot
-               # package from being removed by --depclean (case 2 above).
-
-               mydb = portdb
-               if not portdb.match(slot_atom):
-                       # SLOT seems to come from an installed multislot package
-                       mydb = vardb
-               # If there is no installed package matching the SLOT atom,
-               # it probably changed SLOT spontaneously due to USE=multislot,
-               # so just record an unslotted atom.
-               if vardb.match(slot_atom):
-                       # Now verify that the argument is precise
-                       # enough to identify a specific slot.
-                       matches = mydb.match(arg_atom)
-                       matched_slots = set()
-                       for cpv in matches:
-                               matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
-                       if len(matched_slots) == 1:
-                               new_world_atom = slot_atom
-
-       if new_world_atom == sets["world"].findAtomForPackage(pkg):
-               # Both atoms would be identical, so there's nothing to add.
-               return None
-       if not slotted:
-               # Unlike world atoms, system atoms are not greedy for slots, so they
-               # can't be safely excluded from world if they are slotted.
-               system_atom = sets["system"].findAtomForPackage(pkg)
-               if system_atom:
-                       if not portage.dep_getkey(system_atom).startswith("virtual/"):
-                               return None
-                       # System virtuals aren't safe to exclude from world since they can
-                       # match multiple old-style virtuals but only one of them will be
-                       # pulled in by update or depclean.
-                       providers = portdb.mysettings.getvirtuals().get(
-                               portage.dep_getkey(system_atom))
-                       if providers and len(providers) == 1 and providers[0] == cp:
-                               return None
-       return new_world_atom
-
-def filter_iuse_defaults(iuse):
-       for flag in iuse:
-               if flag.startswith("+") or flag.startswith("-"):
-                       yield flag[1:]
-               else:
-                       yield flag
-
-def _find_deep_system_runtime_deps(graph):
-       deep_system_deps = set()
-       node_stack = []
-       for node in graph:
-               if not isinstance(node, Package) or \
-                       node.operation == 'uninstall':
-                       continue
-               if node.root_config.sets['system'].findAtomForPackage(node):
-                       node_stack.append(node)
-
-       def ignore_priority(priority):
-               """
-               Ignore non-runtime priorities.
-               """
-               if isinstance(priority, DepPriority) and \
-                       (priority.runtime or priority.runtime_post):
-                       return False
-               return True
-
-       while node_stack:
-               node = node_stack.pop()
-               if node in deep_system_deps:
-                       continue
-               deep_system_deps.add(node)
-               for child in graph.child_nodes(node, ignore_priority=ignore_priority):
-                       if not isinstance(child, Package) or \
-                               child.operation == 'uninstall':
-                               continue
-                       node_stack.append(child)
-
-       return deep_system_deps
-
-def get_masking_status(pkg, pkgsettings, root_config):
-
-       mreasons = portage.getmaskingstatus(
-               pkg, settings=pkgsettings,
-               portdb=root_config.trees["porttree"].dbapi)
-
-       if not pkg.installed:
-               if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
-                       mreasons.append("CHOST: %s" % \
-                               pkg.metadata["CHOST"])
-
-       if not pkg.metadata["SLOT"]:
-               mreasons.append("invalid: SLOT is undefined")
-
-       return mreasons
-
-def get_mask_info(root_config, cpv, pkgsettings,
-       db, pkg_type, built, installed, db_keys):
-       eapi_masked = False
-       try:
-               metadata = dict(izip(db_keys,
-                       db.aux_get(cpv, db_keys)))
-       except KeyError:
-               metadata = None
-       if metadata and not built:
-               pkgsettings.setcpv(cpv, mydb=metadata)
-               metadata["USE"] = pkgsettings["PORTAGE_USE"]
-               metadata['CHOST'] = pkgsettings.get('CHOST', '')
-       if metadata is None:
-               mreasons = ["corruption"]
-       else:
-               eapi = metadata['EAPI']
-               if eapi[:1] == '-':
-                       eapi = eapi[1:]
-               if not portage.eapi_is_supported(eapi):
-                       mreasons = ['EAPI %s' % eapi]
-               else:
-                       pkg = Package(type_name=pkg_type, root_config=root_config,
-                               cpv=cpv, built=built, installed=installed, metadata=metadata)
-                       mreasons = get_masking_status(pkg, pkgsettings, root_config)
-       return metadata, mreasons
-
-def show_masked_packages(masked_packages):
-       shown_licenses = set()
-       shown_comments = set()
-       # Maybe there is both an ebuild and a binary. Only
-       # show one of them to avoid redundant appearance.
-       shown_cpvs = set()
-       have_eapi_mask = False
-       for (root_config, pkgsettings, cpv,
-               metadata, mreasons) in masked_packages:
-               if cpv in shown_cpvs:
-                       continue
-               shown_cpvs.add(cpv)
-               comment, filename = None, None
-               if "package.mask" in mreasons:
-                       comment, filename = \
-                               portage.getmaskingreason(
-                               cpv, metadata=metadata,
-                               settings=pkgsettings,
-                               portdb=root_config.trees["porttree"].dbapi,
-                               return_location=True)
-               missing_licenses = []
-               if metadata:
-                       if not portage.eapi_is_supported(metadata["EAPI"]):
-                               have_eapi_mask = True
-                       try:
-                               missing_licenses = \
-                                       pkgsettings._getMissingLicenses(
-                                               cpv, metadata)
-                       except portage.exception.InvalidDependString:
-                               # This will have already been reported
-                               # above via mreasons.
-                               pass
-
-               print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
-               if comment and comment not in shown_comments:
-                       print filename+":"
-                       print comment
-                       shown_comments.add(comment)
-               portdb = root_config.trees["porttree"].dbapi
-               for l in missing_licenses:
-                       l_path = portdb.findLicensePath(l)
-                       if l in shown_licenses:
-                               continue
-                       msg = ("A copy of the '%s' license" + \
-                       " is located at '%s'.") % (l, l_path)
-                       print msg
-                       print
-                       shown_licenses.add(l)
-       return have_eapi_mask
-
-class depgraph(object):
-
-       pkg_tree_map = RootConfig.pkg_tree_map
-
-       _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
-
-       def __init__(self, settings, trees, myopts, myparams, spinner):
-               self.settings = settings
-               self.target_root = settings["ROOT"]
-               self.myopts = myopts
-               self.myparams = myparams
-               self.edebug = 0
-               if settings.get("PORTAGE_DEBUG", "") == "1":
-                       self.edebug = 1
-               self.spinner = spinner
-               self._running_root = trees["/"]["root_config"]
-               self._opts_no_restart = Scheduler._opts_no_restart
-               self.pkgsettings = {}
-               # Maps slot atom to package for each Package added to the graph.
-               self._slot_pkg_map = {}
-               # Maps nodes to the reasons they were selected for reinstallation.
-               self._reinstall_nodes = {}
-               self.mydbapi = {}
-               self.trees = {}
-               self._trees_orig = trees
-               self.roots = {}
-               # Contains a filtered view of preferred packages that are selected
-               # from available repositories.
-               self._filtered_trees = {}
-               # Contains installed packages and new packages that have been added
-               # to the graph.
-               self._graph_trees = {}
-               # All Package instances
-               self._pkg_cache = {}
-               for myroot in trees:
-                       self.trees[myroot] = {}
-                       # Create a RootConfig instance that references
-                       # the FakeVartree instead of the real one.
-                       self.roots[myroot] = RootConfig(
-                               trees[myroot]["vartree"].settings,
-                               self.trees[myroot],
-                               trees[myroot]["root_config"].setconfig)
-                       for tree in ("porttree", "bintree"):
-                               self.trees[myroot][tree] = trees[myroot][tree]
-                       self.trees[myroot]["vartree"] = \
-                               FakeVartree(trees[myroot]["root_config"],
-                                       pkg_cache=self._pkg_cache)
-                       self.pkgsettings[myroot] = portage.config(
-                               clone=self.trees[myroot]["vartree"].settings)
-                       self._slot_pkg_map[myroot] = {}
-                       vardb = self.trees[myroot]["vartree"].dbapi
-                       preload_installed_pkgs = "--nodeps" not in self.myopts and \
-                               "--buildpkgonly" not in self.myopts
-                       # This fakedbapi instance will model the state that the vdb will
-                       # have after new packages have been installed.
-                       fakedb = PackageVirtualDbapi(vardb.settings)
-                       if preload_installed_pkgs:
-                               for pkg in vardb:
-                                       self.spinner.update()
-                                       # This triggers metadata updates via FakeVartree.
-                                       vardb.aux_get(pkg.cpv, [])
-                                       fakedb.cpv_inject(pkg)
-
-                       # Now that the vardb state is cached in our FakeVartree,
-                       # we won't be needing the real vartree cache for awhile.
-                       # To make some room on the heap, clear the vardbapi
-                       # caches.
-                       trees[myroot]["vartree"].dbapi._clear_cache()
-                       gc.collect()
-
-                       self.mydbapi[myroot] = fakedb
-                       def graph_tree():
-                               pass
-                       graph_tree.dbapi = fakedb
-                       self._graph_trees[myroot] = {}
-                       self._filtered_trees[myroot] = {}
-                       # Substitute the graph tree for the vartree in dep_check() since we
-                       # want atom selections to be consistent with package selections
-                       # have already been made.
-                       self._graph_trees[myroot]["porttree"]   = graph_tree
-                       self._graph_trees[myroot]["vartree"]    = graph_tree
-                       def filtered_tree():
-                               pass
-                       filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
-                       self._filtered_trees[myroot]["porttree"] = filtered_tree
-
-                       # Passing in graph_tree as the vartree here could lead to better
-                       # atom selections in some cases by causing atoms for packages that
-                       # have been added to the graph to be preferred over other choices.
-                       # However, it can trigger atom selections that result in
-                       # unresolvable direct circular dependencies. For example, this
-                       # happens with gwydion-dylan which depends on either itself or
-                       # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
-                       # gwydion-dylan-bin needs to be selected in order to avoid a
-                       # an unresolvable direct circular dependency.
-                       #
-                       # To solve the problem described above, pass in "graph_db" so that
-                       # packages that have been added to the graph are distinguishable
-                       # from other available packages and installed packages. Also, pass
-                       # the parent package into self._select_atoms() calls so that
-                       # unresolvable direct circular dependencies can be detected and
-                       # avoided when possible.
-                       self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
-                       self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
-
-                       dbs = []
-                       portdb = self.trees[myroot]["porttree"].dbapi
-                       bindb  = self.trees[myroot]["bintree"].dbapi
-                       vardb  = self.trees[myroot]["vartree"].dbapi
-                       #               (db, pkg_type, built, installed, db_keys)
-                       if "--usepkgonly" not in self.myopts:
-                               db_keys = list(portdb._aux_cache_keys)
-                               dbs.append((portdb, "ebuild", False, False, db_keys))
-                       if "--usepkg" in self.myopts:
-                               db_keys = list(bindb._aux_cache_keys)
-                               dbs.append((bindb,  "binary", True, False, db_keys))
-                       db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
-                       dbs.append((vardb, "installed", True, True, db_keys))
-                       self._filtered_trees[myroot]["dbs"] = dbs
-                       if "--usepkg" in self.myopts:
-                               self.trees[myroot]["bintree"].populate(
-                                       "--getbinpkg" in self.myopts,
-                                       "--getbinpkgonly" in self.myopts)
-               del trees
-
-               self.digraph=portage.digraph()
-               # contains all sets added to the graph
-               self._sets = {}
-               # contains atoms given as arguments
-               self._sets["args"] = InternalPackageSet()
-               # contains all atoms from all sets added to the graph, including
-               # atoms given as arguments
-               self._set_atoms = InternalPackageSet()
-               self._atom_arg_map = {}
-               # contains all nodes pulled in by self._set_atoms
-               self._set_nodes = set()
-               # Contains only Blocker -> Uninstall edges
-               self._blocker_uninstalls = digraph()
-               # Contains only Package -> Blocker edges
-               self._blocker_parents = digraph()
-               # Contains only irrelevant Package -> Blocker edges
-               self._irrelevant_blockers = digraph()
-               # Contains only unsolvable Package -> Blocker edges
-               self._unsolvable_blockers = digraph()
-               # Contains all Blocker -> Blocked Package edges
-               self._blocked_pkgs = digraph()
-               # Contains world packages that have been protected from
-               # uninstallation but may not have been added to the graph
-               # if the graph is not complete yet.
-               self._blocked_world_pkgs = {}
-               self._slot_collision_info = {}
-               # Slot collision nodes are not allowed to block other packages since
-               # blocker validation is only able to account for one package per slot.
-               self._slot_collision_nodes = set()
-               self._parent_atoms = {}
-               self._slot_conflict_parent_atoms = set()
-               self._serialized_tasks_cache = None
-               self._scheduler_graph = None
-               self._displayed_list = None
-               self._pprovided_args = []
-               self._missing_args = []
-               self._masked_installed = set()
-               self._unsatisfied_deps_for_display = []
-               self._unsatisfied_blockers_for_display = None
-               self._circular_deps_for_display = None
-               self._dep_stack = []
-               self._dep_disjunctive_stack = []
-               self._unsatisfied_deps = []
-               self._initially_unsatisfied_deps = []
-               self._ignored_deps = []
-               self._required_set_names = set(["system", "world"])
-               self._select_atoms = self._select_atoms_highest_available
-               self._select_package = self._select_pkg_highest_available
-               self._highest_pkg_cache = {}
-
-       def _show_slot_collision_notice(self):
-               """Show an informational message advising the user to mask one of the
-               the packages. In some cases it may be possible to resolve this
-               automatically, but support for backtracking (removal nodes that have
-               already been selected) will be required in order to handle all possible
-               cases.
-               """
-
-               if not self._slot_collision_info:
-                       return
-
-               self._show_merge_list()
-
-               msg = []
-               msg.append("\n!!! Multiple package instances within a single " + \
-                       "package slot have been pulled\n")
-               msg.append("!!! into the dependency graph, resulting" + \
-                       " in a slot conflict:\n\n")
-               indent = "  "
-               # Max number of parents shown, to avoid flooding the display.
-               max_parents = 3
-               explanation_columns = 70
-               explanations = 0
-               for (slot_atom, root), slot_nodes \
-                       in self._slot_collision_info.iteritems():
-                       msg.append(str(slot_atom))
-                       msg.append("\n\n")
-
-                       for node in slot_nodes:
-                               msg.append(indent)
-                               msg.append(str(node))
-                               parent_atoms = self._parent_atoms.get(node)
-                               if parent_atoms:
-                                       pruned_list = set()
-                                       # Prefer conflict atoms over others.
-                                       for parent_atom in parent_atoms:
-                                               if len(pruned_list) >= max_parents:
-                                                       break
-                                               if parent_atom in self._slot_conflict_parent_atoms:
-                                                       pruned_list.add(parent_atom)
-
-                                       # If this package was pulled in by conflict atoms then
-                                       # show those alone since those are the most interesting.
-                                       if not pruned_list:
-                                               # When generating the pruned list, prefer instances
-                                               # of DependencyArg over instances of Package.
-                                               for parent_atom in parent_atoms:
-                                                       if len(pruned_list) >= max_parents:
-                                                               break
-                                                       parent, atom = parent_atom
-                                                       if isinstance(parent, DependencyArg):
-                                                               pruned_list.add(parent_atom)
-                                               # Prefer Packages instances that themselves have been
-                                               # pulled into collision slots.
-                                               for parent_atom in parent_atoms:
-                                                       if len(pruned_list) >= max_parents:
-                                                               break
-                                                       parent, atom = parent_atom
-                                                       if isinstance(parent, Package) and \
-                                                               (parent.slot_atom, parent.root) \
-                                                               in self._slot_collision_info:
-                                                               pruned_list.add(parent_atom)
-                                               for parent_atom in parent_atoms:
-                                                       if len(pruned_list) >= max_parents:
-                                                               break
-                                                       pruned_list.add(parent_atom)
-                                       omitted_parents = len(parent_atoms) - len(pruned_list)
-                                       parent_atoms = pruned_list
-                                       msg.append(" pulled in by\n")
-                                       for parent_atom in parent_atoms:
-                                               parent, atom = parent_atom
-                                               msg.append(2*indent)
-                                               if isinstance(parent,
-                                                       (PackageArg, AtomArg)):
-                                                       # For PackageArg and AtomArg types, it's
-                                                       # redundant to display the atom attribute.
-                                                       msg.append(str(parent))
-                                               else:
-                                                       # Display the specific atom from SetArg or
-                                                       # Package types.
-                                                       msg.append("%s required by %s" % (atom, parent))
-                                               msg.append("\n")
-                                       if omitted_parents:
-                                               msg.append(2*indent)
-                                               msg.append("(and %d more)\n" % omitted_parents)
-                               else:
-                                       msg.append(" (no parents)\n")
-                               msg.append("\n")
-                       explanation = self._slot_conflict_explanation(slot_nodes)
-                       if explanation:
-                               explanations += 1
-                               msg.append(indent + "Explanation:\n\n")
-                               for line in textwrap.wrap(explanation, explanation_columns):
-                                       msg.append(2*indent + line + "\n")
-                               msg.append("\n")
-               msg.append("\n")
-               sys.stderr.write("".join(msg))
-               sys.stderr.flush()
-
-               explanations_for_all = explanations == len(self._slot_collision_info)
-
-               if explanations_for_all or "--quiet" in self.myopts:
-                       return
-
-               msg = []
-               msg.append("It may be possible to solve this problem ")
-               msg.append("by using package.mask to prevent one of ")
-               msg.append("those packages from being selected. ")
-               msg.append("However, it is also possible that conflicting ")
-               msg.append("dependencies exist such that they are impossible to ")
-               msg.append("satisfy simultaneously.  If such a conflict exists in ")
-               msg.append("the dependencies of two different packages, then those ")
-               msg.append("packages can not be installed simultaneously.")
-
-               from formatter import AbstractFormatter, DumbWriter
-               f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
-               for x in msg:
-                       f.add_flowing_data(x)
-               f.end_paragraph(1)
-
-               msg = []
-               msg.append("For more information, see MASKED PACKAGES ")
-               msg.append("section in the emerge man page or refer ")
-               msg.append("to the Gentoo Handbook.")
-               for x in msg:
-                       f.add_flowing_data(x)
-               f.end_paragraph(1)
-               f.writer.flush()
-
-       def _slot_conflict_explanation(self, slot_nodes):
-               """
-               When a slot conflict occurs due to USE deps, there are a few
-               different cases to consider:
-
-               1) New USE are correctly set but --newuse wasn't requested so an
-                  installed package with incorrect USE happened to get pulled
-                  into graph before the new one.
-
-               2) New USE are incorrectly set but an installed package has correct
-                  USE so it got pulled into the graph, and a new instance also got
-                  pulled in due to --newuse or an upgrade.
-
-               3) Multiple USE deps exist that can't be satisfied simultaneously,
-                  and multiple package instances got pulled into the same slot to
-                  satisfy the conflicting deps.
-
-               Currently, explanations and suggested courses of action are generated
-               for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
-               """
-
-               if len(slot_nodes) != 2:
-                       # Suggestions are only implemented for
-                       # conflicts between two packages.
-                       return None
-
-               all_conflict_atoms = self._slot_conflict_parent_atoms
-               matched_node = None
-               matched_atoms = None
-               unmatched_node = None
-               for node in slot_nodes:
-                       parent_atoms = self._parent_atoms.get(node)
-                       if not parent_atoms:
-                               # Normally, there are always parent atoms. If there are
-                               # none then something unexpected is happening and there's
-                               # currently no suggestion for this case.
-                               return None
-                       conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
-                       for parent_atom in conflict_atoms:
-                               parent, atom = parent_atom
-                               if not atom.use:
-                                       # Suggestions are currently only implemented for cases
-                                       # in which all conflict atoms have USE deps.
-                                       return None
-                       if conflict_atoms:
-                               if matched_node is not None:
-                                       # If conflict atoms match multiple nodes
-                                       # then there's no suggestion.
-                                       return None
-                               matched_node = node
-                               matched_atoms = conflict_atoms
-                       else:
-                               if unmatched_node is not None:
-                                       # Neither node is matched by conflict atoms, and
-                                       # there is no suggestion for this case.
-                                       return None
-                               unmatched_node = node
-
-               if matched_node is None or unmatched_node is None:
-                       # This shouldn't happen.
-                       return None
-
-               if unmatched_node.installed and not matched_node.installed and \
-                       unmatched_node.cpv == matched_node.cpv:
-                       # If the conflicting packages are the same version then
-                       # --newuse should be all that's needed. If they are different
-                       # versions then there's some other problem.
-                       return "New USE are correctly set, but --newuse wasn't" + \
-                               " requested, so an installed package with incorrect USE " + \
-                               "happened to get pulled into the dependency graph. " + \
-                               "In order to solve " + \
-                               "this, either specify the --newuse option or explicitly " + \
-                               " reinstall '%s'." % matched_node.slot_atom
-
-               if matched_node.installed and not unmatched_node.installed:
-                       atoms = sorted(set(atom for parent, atom in matched_atoms))
-                       explanation = ("New USE for '%s' are incorrectly set. " + \
-                               "In order to solve this, adjust USE to satisfy '%s'") % \
-                               (matched_node.slot_atom, atoms[0])
-                       if len(atoms) > 1:
-                               for atom in atoms[1:-1]:
-                                       explanation += ", '%s'" % (atom,)
-                               if len(atoms) > 2:
-                                       explanation += ","
-                               explanation += " and '%s'" % (atoms[-1],)
-                       explanation += "."
-                       return explanation
-
-               return None
-
-       def _process_slot_conflicts(self):
-               """
-               Process slot conflict data to identify specific atoms which
-               lead to conflict. These atoms only match a subset of the
-               packages that have been pulled into a given slot.
-               """
-               for (slot_atom, root), slot_nodes \
-                       in self._slot_collision_info.iteritems():
-
-                       all_parent_atoms = set()
-                       for pkg in slot_nodes:
-                               parent_atoms = self._parent_atoms.get(pkg)
-                               if not parent_atoms:
-                                       continue
-                               all_parent_atoms.update(parent_atoms)
-
-                       for pkg in slot_nodes:
-                               parent_atoms = self._parent_atoms.get(pkg)
-                               if parent_atoms is None:
-                                       parent_atoms = set()
-                                       self._parent_atoms[pkg] = parent_atoms
-                               for parent_atom in all_parent_atoms:
-                                       if parent_atom in parent_atoms:
-                                               continue
-                                       # Use package set for matching since it will match via
-                                       # PROVIDE when necessary, while match_from_list does not.
-                                       parent, atom = parent_atom
-                                       atom_set = InternalPackageSet(
-                                               initial_atoms=(atom,))
-                                       if atom_set.findAtomForPackage(pkg):
-                                               parent_atoms.add(parent_atom)
-                                       else:
-                                               self._slot_conflict_parent_atoms.add(parent_atom)
-
-       def _reinstall_for_flags(self, forced_flags,
-               orig_use, orig_iuse, cur_use, cur_iuse):
-               """Return a set of flags that trigger reinstallation, or None if there
-               are no such flags."""
-               if "--newuse" in self.myopts:
-                       flags = set(orig_iuse.symmetric_difference(
-                               cur_iuse).difference(forced_flags))
-                       flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
-                               cur_iuse.intersection(cur_use)))
-                       if flags:
-                               return flags
-               elif "changed-use" == self.myopts.get("--reinstall"):
-                       flags = orig_iuse.intersection(orig_use).symmetric_difference(
-                               cur_iuse.intersection(cur_use))
-                       if flags:
-                               return flags
-               return None
-
-       def _create_graph(self, allow_unsatisfied=False):
-               dep_stack = self._dep_stack
-               dep_disjunctive_stack = self._dep_disjunctive_stack
-               while dep_stack or dep_disjunctive_stack:
-                       self.spinner.update()
-                       while dep_stack:
-                               dep = dep_stack.pop()
-                               if isinstance(dep, Package):
-                                       if not self._add_pkg_deps(dep,
-                                               allow_unsatisfied=allow_unsatisfied):
-                                               return 0
-                                       continue
-                               if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
-                                       return 0
-                       if dep_disjunctive_stack:
-                               if not self._pop_disjunction(allow_unsatisfied):
-                                       return 0
-               return 1
-
-       def _add_dep(self, dep, allow_unsatisfied=False):
-               debug = "--debug" in self.myopts
-               buildpkgonly = "--buildpkgonly" in self.myopts
-               nodeps = "--nodeps" in self.myopts
-               empty = "empty" in self.myparams
-               deep = "deep" in self.myparams
-               update = "--update" in self.myopts and dep.depth <= 1
-               if dep.blocker:
-                       if not buildpkgonly and \
-                               not nodeps and \
-                               dep.parent not in self._slot_collision_nodes:
-                               if dep.parent.onlydeps:
-                                       # It's safe to ignore blockers if the
-                                       # parent is an --onlydeps node.
-                                       return 1
-                               # The blocker applies to the root where
-                               # the parent is or will be installed.
-                               blocker = Blocker(atom=dep.atom,
-                                       eapi=dep.parent.metadata["EAPI"],
-                                       root=dep.parent.root)
-                               self._blocker_parents.add(blocker, dep.parent)
-                       return 1
-               dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
-                       onlydeps=dep.onlydeps)
-               if not dep_pkg:
-                       if dep.priority.optional:
-                               # This could be an unecessary build-time dep
-                               # pulled in by --with-bdeps=y.
-                               return 1
-                       if allow_unsatisfied:
-                               self._unsatisfied_deps.append(dep)
-                               return 1
-                       self._unsatisfied_deps_for_display.append(
-                               ((dep.root, dep.atom), {"myparent":dep.parent}))
-                       return 0
-               # In some cases, dep_check will return deps that shouldn't
-               # be proccessed any further, so they are identified and
-               # discarded here. Try to discard as few as possible since
-               # discarded dependencies reduce the amount of information
-               # available for optimization of merge order.
-               if dep.priority.satisfied and \
-                       not dep_pkg.installed and \
-                       not (existing_node or empty or deep or update):
-                       myarg = None
-                       if dep.root == self.target_root:
-                               try:
-                                       myarg = self._iter_atoms_for_pkg(dep_pkg).next()
-                               except StopIteration:
-                                       pass
-                               except portage.exception.InvalidDependString:
-                                       if not dep_pkg.installed:
-                                               # This shouldn't happen since the package
-                                               # should have been masked.
-                                               raise
-                       if not myarg:
-                               self._ignored_deps.append(dep)
-                               return 1
-
-               if not self._add_pkg(dep_pkg, dep):
-                       return 0
-               return 1
-
-       def _add_pkg(self, pkg, dep):
-               myparent = None
-               priority = None
-               depth = 0
-               if dep is None:
-                       dep = Dependency()
-               else:
-                       myparent = dep.parent
-                       priority = dep.priority
-                       depth = dep.depth
-               if priority is None:
-                       priority = DepPriority()
-               """
-               Fills the digraph with nodes comprised of packages to merge.
-               mybigkey is the package spec of the package to merge.
-               myparent is the package depending on mybigkey ( or None )
-               addme = Should we add this package to the digraph or are we just looking at it's deps?
-                       Think --onlydeps, we need to ignore packages in that case.
-               #stuff to add:
-               #SLOT-aware emerge
-               #IUSE-aware emerge -> USE DEP aware depgraph
-               #"no downgrade" emerge
-               """
-               # Ensure that the dependencies of the same package
-               # are never processed more than once.
-               previously_added = pkg in self.digraph
-
-               # select the correct /var database that we'll be checking against
-               vardbapi = self.trees[pkg.root]["vartree"].dbapi
-               pkgsettings = self.pkgsettings[pkg.root]
-
-               arg_atoms = None
-               if True:
-                       try:
-                               arg_atoms = list(self._iter_atoms_for_pkg(pkg))
-                       except portage.exception.InvalidDependString, e:
-                               if not pkg.installed:
-                                       show_invalid_depstring_notice(
-                                               pkg, pkg.metadata["PROVIDE"], str(e))
-                                       return 0
-                               del e
-
-               if not pkg.onlydeps:
-                       if not pkg.installed and \
-                               "empty" not in self.myparams and \
-                               vardbapi.match(pkg.slot_atom):
-                               # Increase the priority of dependencies on packages that
-                               # are being rebuilt. This optimizes merge order so that
-                               # dependencies are rebuilt/updated as soon as possible,
-                               # which is needed especially when emerge is called by
-                               # revdep-rebuild since dependencies may be affected by ABI
-                               # breakage that has rendered them useless. Don't adjust
-                               # priority here when in "empty" mode since all packages
-                               # are being merged in that case.
-                               priority.rebuild = True
-
-                       existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
-                       slot_collision = False
-                       if existing_node:
-                               existing_node_matches = pkg.cpv == existing_node.cpv
-                               if existing_node_matches and \
-                                       pkg != existing_node and \
-                                       dep.atom is not None:
-                                       # Use package set for matching since it will match via
-                                       # PROVIDE when necessary, while match_from_list does not.
-                                       atom_set = InternalPackageSet(initial_atoms=[dep.atom])
-                                       if not atom_set.findAtomForPackage(existing_node):
-                                               existing_node_matches = False
-                               if existing_node_matches:
-                                       # The existing node can be reused.
-                                       if arg_atoms:
-                                               for parent_atom in arg_atoms:
-                                                       parent, atom = parent_atom
-                                                       self.digraph.add(existing_node, parent,
-                                                               priority=priority)
-                                                       self._add_parent_atom(existing_node, parent_atom)
-                                       # If a direct circular dependency is not an unsatisfied
-                                       # buildtime dependency then drop it here since otherwise
-                                       # it can skew the merge order calculation in an unwanted
-                                       # way.
-                                       if existing_node != myparent or \
-                                               (priority.buildtime and not priority.satisfied):
-                                               self.digraph.addnode(existing_node, myparent,
-                                                       priority=priority)
-                                               if dep.atom is not None and dep.parent is not None:
-                                                       self._add_parent_atom(existing_node,
-                                                               (dep.parent, dep.atom))
-                                       return 1
-                               else:
-
-                                       # A slot collision has occurred.  Sometimes this coincides
-                                       # with unresolvable blockers, so the slot collision will be
-                                       # shown later if there are no unresolvable blockers.
-                                       self._add_slot_conflict(pkg)
-                                       slot_collision = True
-
-                       if slot_collision:
-                               # Now add this node to the graph so that self.display()
-                               # can show use flags and --tree portage.output.  This node is
-                               # only being partially added to the graph.  It must not be
-                               # allowed to interfere with the other nodes that have been
-                               # added.  Do not overwrite data for existing nodes in
-                               # self.mydbapi since that data will be used for blocker
-                               # validation.
-                               # Even though the graph is now invalid, continue to process
-                               # dependencies so that things like --fetchonly can still
-                               # function despite collisions.
-                               pass
-                       elif not previously_added:
-                               self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
-                               self.mydbapi[pkg.root].cpv_inject(pkg)
-                               self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
-
-                       if not pkg.installed:
-                               # Allow this package to satisfy old-style virtuals in case it
-                               # doesn't already. Any pre-existing providers will be preferred
-                               # over this one.
-                               try:
-                                       pkgsettings.setinst(pkg.cpv, pkg.metadata)
-                                       # For consistency, also update the global virtuals.
-                                       settings = self.roots[pkg.root].settings
-                                       settings.unlock()
-                                       settings.setinst(pkg.cpv, pkg.metadata)
-                                       settings.lock()
-                               except portage.exception.InvalidDependString, e:
-                                       show_invalid_depstring_notice(
-                                               pkg, pkg.metadata["PROVIDE"], str(e))
-                                       del e
-                                       return 0
-
-               if arg_atoms:
-                       self._set_nodes.add(pkg)
-
-               # Do this even when addme is False (--onlydeps) so that the
-               # parent/child relationship is always known in case
-               # self._show_slot_collision_notice() needs to be called later.
-               self.digraph.add(pkg, myparent, priority=priority)
-               if dep.atom is not None and dep.parent is not None:
-                       self._add_parent_atom(pkg, (dep.parent, dep.atom))
-
-               if arg_atoms:
-                       for parent_atom in arg_atoms:
-                               parent, atom = parent_atom
-                               self.digraph.add(pkg, parent, priority=priority)
-                               self._add_parent_atom(pkg, parent_atom)
-
-               """ This section determines whether we go deeper into dependencies or not.
-                   We want to go deeper on a few occasions:
-                   Installing package A, we need to make sure package A's deps are met.
-                   emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
-                   If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
-               """
-               dep_stack = self._dep_stack
-               if "recurse" not in self.myparams:
-                       return 1
-               elif pkg.installed and \
-                       "deep" not in self.myparams:
-                       dep_stack = self._ignored_deps
-
-               self.spinner.update()
-
-               if arg_atoms:
-                       depth = 0
-               pkg.depth = depth
-               if not previously_added:
-                       dep_stack.append(pkg)
-               return 1
-
-       def _add_parent_atom(self, pkg, parent_atom):
-               parent_atoms = self._parent_atoms.get(pkg)
-               if parent_atoms is None:
-                       parent_atoms = set()
-                       self._parent_atoms[pkg] = parent_atoms
-               parent_atoms.add(parent_atom)
-
-       def _add_slot_conflict(self, pkg):
-               self._slot_collision_nodes.add(pkg)
-               slot_key = (pkg.slot_atom, pkg.root)
-               slot_nodes = self._slot_collision_info.get(slot_key)
-               if slot_nodes is None:
-                       slot_nodes = set()
-                       slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
-                       self._slot_collision_info[slot_key] = slot_nodes
-               slot_nodes.add(pkg)
-
-       def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
-
-               mytype = pkg.type_name
-               myroot = pkg.root
-               mykey = pkg.cpv
-               metadata = pkg.metadata
-               myuse = pkg.use.enabled
-               jbigkey = pkg
-               depth = pkg.depth + 1
-               removal_action = "remove" in self.myparams
-
-               edepend={}
-               depkeys = ["DEPEND","RDEPEND","PDEPEND"]
-               for k in depkeys:
-                       edepend[k] = metadata[k]
-
-               if not pkg.built and \
-                       "--buildpkgonly" in self.myopts and \
-                       "deep" not in self.myparams and \
-                       "empty" not in self.myparams:
-                       edepend["RDEPEND"] = ""
-                       edepend["PDEPEND"] = ""
-               bdeps_optional = False
-
-               if pkg.built and not removal_action:
-                       if self.myopts.get("--with-bdeps", "n") == "y":
-                               # Pull in build time deps as requested, but marked them as
-                               # "optional" since they are not strictly required. This allows
-                               # more freedom in the merge order calculation for solving
-                               # circular dependencies. Don't convert to PDEPEND since that
-                               # could make --with-bdeps=y less effective if it is used to
-                               # adjust merge order to prevent built_with_use() calls from
-                               # failing.
-                               bdeps_optional = True
-                       else:
-                               # built packages do not have build time dependencies.
-                               edepend["DEPEND"] = ""
-
-               if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
-                       edepend["DEPEND"] = ""
-
-               bdeps_root = "/"
-               root_deps = self.myopts.get("--root-deps")
-               if root_deps is not None:
-                       if root_deps is True:
-                               bdeps_root = myroot
-                       elif root_deps == "rdeps":
-                               edepend["DEPEND"] = ""
-
-               deps = (
-                       (bdeps_root, edepend["DEPEND"],
-                               self._priority(buildtime=(not bdeps_optional),
-                               optional=bdeps_optional)),
-                       (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
-                       (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
-               )
-
-               debug = "--debug" in self.myopts
-               strict = mytype != "installed"
-               try:
-                       if not strict:
-                               portage.dep._dep_check_strict = False
-
-                       for dep_root, dep_string, dep_priority in deps:
-                               if not dep_string:
-                                       continue
-                               if debug:
-                                       print
-                                       print "Parent:   ", jbigkey
-                                       print "Depstring:", dep_string
-                                       print "Priority:", dep_priority
-
-                               try:
-
-                                       dep_string = portage.dep.paren_normalize(
-                                               portage.dep.use_reduce(
-                                               portage.dep.paren_reduce(dep_string),
-                                               uselist=pkg.use.enabled))
-
-                                       dep_string = list(self._queue_disjunctive_deps(
-                                               pkg, dep_root, dep_priority, dep_string))
-
-                               except portage.exception.InvalidDependString, e:
-                                       if pkg.installed:
-                                               del e
-                                               continue
-                                       show_invalid_depstring_notice(pkg, dep_string, str(e))
-                                       return 0
-
-                               if not dep_string:
-                                       continue
-
-                               dep_string = portage.dep.paren_enclose(dep_string)
-
-                               if not self._add_pkg_dep_string(
-                                       pkg, dep_root, dep_priority, dep_string,
-                                       allow_unsatisfied):
-                                       return 0
-
-               except portage.exception.AmbiguousPackageName, e:
-                       pkgs = e.args[0]
-                       portage.writemsg("\n\n!!! An atom in the dependencies " + \
-                               "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
-                       for cpv in pkgs:
-                               portage.writemsg("    %s\n" % cpv, noiselevel=-1)
-                       portage.writemsg("\n", noiselevel=-1)
-                       if mytype == "binary":
-                               portage.writemsg(
-                                       "!!! This binary package cannot be installed: '%s'\n" % \
-                                       mykey, noiselevel=-1)
-                       elif mytype == "ebuild":
-                               portdb = self.roots[myroot].trees["porttree"].dbapi
-                               myebuild, mylocation = portdb.findname2(mykey)
-                               portage.writemsg("!!! This ebuild cannot be installed: " + \
-                                       "'%s'\n" % myebuild, noiselevel=-1)
-                       portage.writemsg("!!! Please notify the package maintainer " + \
-                               "that atoms must be fully-qualified.\n", noiselevel=-1)
-                       return 0
-               finally:
-                       portage.dep._dep_check_strict = True
-               return 1
-
-       def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
-               allow_unsatisfied):
-               depth = pkg.depth + 1
-               debug = "--debug" in self.myopts
-               strict = pkg.type_name != "installed"
-
-               if debug:
-                       print
-                       print "Parent:   ", pkg
-                       print "Depstring:", dep_string
-                       print "Priority:", dep_priority
-
-               try:
-                       selected_atoms = self._select_atoms(dep_root,
-                               dep_string, myuse=pkg.use.enabled, parent=pkg,
-                               strict=strict, priority=dep_priority)
-               except portage.exception.InvalidDependString, e:
-                       show_invalid_depstring_notice(pkg, dep_string, str(e))
-                       del e
-                       if pkg.installed:
-                               return 1
-                       return 0
-
-               if debug:
-                       print "Candidates:", selected_atoms
-
-               vardb = self.roots[dep_root].trees["vartree"].dbapi
-
-               for atom in selected_atoms:
-                       try:
-
-                               atom = portage.dep.Atom(atom)
-
-                               mypriority = dep_priority.copy()
-                               if not atom.blocker and vardb.match(atom):
-                                       mypriority.satisfied = True
-
-                               if not self._add_dep(Dependency(atom=atom,
-                                       blocker=atom.blocker, depth=depth, parent=pkg,
-                                       priority=mypriority, root=dep_root),
-                                       allow_unsatisfied=allow_unsatisfied):
-                                       return 0
-
-                       except portage.exception.InvalidAtom, e:
-                               show_invalid_depstring_notice(
-                                       pkg, dep_string, str(e))
-                               del e
-                               if not pkg.installed:
-                                       return 0
-
-               if debug:
-                       print "Exiting...", pkg
-
-               return 1
-
-       def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
-               """
-               Queue disjunctive (virtual and ||) deps in self._dep_disjunctive_stack.
-               Yields non-disjunctive deps. Raises InvalidDependString when 
-               necessary.
-               """
-               i = 0
-               while i < len(dep_struct):
-                       x = dep_struct[i]
-                       if isinstance(x, list):
-                               for y in self._queue_disjunctive_deps(
-                                       pkg, dep_root, dep_priority, x):
-                                       yield y
-                       elif x == "||":
-                               self._queue_disjunction(pkg, dep_root, dep_priority,
-                                       [ x, dep_struct[ i + 1 ] ] )
-                               i += 1
-                       else:
-                               try:
-                                       x = portage.dep.Atom(x)
-                               except portage.exception.InvalidAtom:
-                                       if not pkg.installed:
-                                               raise portage.exception.InvalidDependString(
-                                                       "invalid atom: '%s'" % x)
-                               else:
-                                       # Note: Eventually this will check for PROPERTIES=virtual
-                                       # or whatever other metadata gets implemented for this
-                                       # purpose.
-                                       if x.cp.startswith('virtual/'):
-                                               self._queue_disjunction( pkg, dep_root,
-                                                       dep_priority, [ str(x) ] )
-                                       else:
-                                               yield str(x)
-                       i += 1
-
-       def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
-               self._dep_disjunctive_stack.append(
-                       (pkg, dep_root, dep_priority, dep_struct))
-
-       def _pop_disjunction(self, allow_unsatisfied):
-               """
-               Pop one disjunctive dep from self._dep_disjunctive_stack, and use it to
-               populate self._dep_stack.
-               """
-               pkg, dep_root, dep_priority, dep_struct = \
-                       self._dep_disjunctive_stack.pop()
-               dep_string = portage.dep.paren_enclose(dep_struct)
-               if not self._add_pkg_dep_string(
-                       pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
-                       return 0
-               return 1
-
-       def _priority(self, **kwargs):
-               if "remove" in self.myparams:
-                       priority_constructor = UnmergeDepPriority
-               else:
-                       priority_constructor = DepPriority
-               return priority_constructor(**kwargs)
-
-       def _dep_expand(self, root_config, atom_without_category):
-               """
-               @param root_config: a root config instance
-               @type root_config: RootConfig
-               @param atom_without_category: an atom without a category component
-               @type atom_without_category: String
-               @rtype: list
-               @returns: a list of atoms containing categories (possibly empty)
-               """
-               null_cp = portage.dep_getkey(insert_category_into_atom(
-                       atom_without_category, "null"))
-               cat, atom_pn = portage.catsplit(null_cp)
-
-               dbs = self._filtered_trees[root_config.root]["dbs"]
-               categories = set()
-               for db, pkg_type, built, installed, db_keys in dbs:
-                       for cat in db.categories:
-                               if db.cp_list("%s/%s" % (cat, atom_pn)):
-                                       categories.add(cat)
-
-               deps = []
-               for cat in categories:
-                       deps.append(insert_category_into_atom(
-                               atom_without_category, cat))
-               return deps
-
-       def _have_new_virt(self, root, atom_cp):
-               ret = False
-               for db, pkg_type, built, installed, db_keys in \
-                       self._filtered_trees[root]["dbs"]:
-                       if db.cp_list(atom_cp):
-                               ret = True
-                               break
-               return ret
-
-       def _iter_atoms_for_pkg(self, pkg):
-               # TODO: add multiple $ROOT support
-               if pkg.root != self.target_root:
-                       return
-               atom_arg_map = self._atom_arg_map
-               root_config = self.roots[pkg.root]
-               for atom in self._set_atoms.iterAtomsForPackage(pkg):
-                       atom_cp = portage.dep_getkey(atom)
-                       if atom_cp != pkg.cp and \
-                               self._have_new_virt(pkg.root, atom_cp):
-                               continue
-                       visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
-                       visible_pkgs.reverse() # descending order
-                       higher_slot = None
-                       for visible_pkg in visible_pkgs:
-                               if visible_pkg.cp != atom_cp:
-                                       continue
-                               if pkg >= visible_pkg:
-                                       # This is descending order, and we're not
-                                       # interested in any versions <= pkg given.
-                                       break
-                               if pkg.slot_atom != visible_pkg.slot_atom:
-                                       higher_slot = visible_pkg
-                                       break
-                       if higher_slot is not None:
-                               continue
-                       for arg in atom_arg_map[(atom, pkg.root)]:
-                               if isinstance(arg, PackageArg) and \
-                                       arg.package != pkg:
-                                       continue
-                               yield arg, atom
-
-       def select_files(self, myfiles):
-               """Given a list of .tbz2s, .ebuilds sets, and deps, create the
-               appropriate depgraph and return a favorite list."""
-               debug = "--debug" in self.myopts
-               root_config = self.roots[self.target_root]
-               sets = root_config.sets
-               getSetAtoms = root_config.setconfig.getSetAtoms
-               myfavorites=[]
-               myroot = self.target_root
-               dbs = self._filtered_trees[myroot]["dbs"]
-               vardb = self.trees[myroot]["vartree"].dbapi
-               real_vardb = self._trees_orig[myroot]["vartree"].dbapi
-               portdb = self.trees[myroot]["porttree"].dbapi
-               bindb = self.trees[myroot]["bintree"].dbapi
-               pkgsettings = self.pkgsettings[myroot]
-               args = []
-               onlydeps = "--onlydeps" in self.myopts
-               lookup_owners = []
-               for x in myfiles:
-                       ext = os.path.splitext(x)[1]
-                       if ext==".tbz2":
-                               if not os.path.exists(x):
-                                       if os.path.exists(
-                                               os.path.join(pkgsettings["PKGDIR"], "All", x)):
-                                               x = os.path.join(pkgsettings["PKGDIR"], "All", x)
-                                       elif os.path.exists(
-                                               os.path.join(pkgsettings["PKGDIR"], x)):
-                                               x = os.path.join(pkgsettings["PKGDIR"], x)
-                                       else:
-                                               print "\n\n!!! Binary package '"+str(x)+"' does not exist."
-                                               print "!!! Please ensure the tbz2 exists as specified.\n"
-                                               return 0, myfavorites
-                               mytbz2=portage.xpak.tbz2(x)
-                               mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
-                               if os.path.realpath(x) != \
-                                       os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
-                                       print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
-                                       return 0, myfavorites
-                               db_keys = list(bindb._aux_cache_keys)
-                               metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
-                               pkg = Package(type_name="binary", root_config=root_config,
-                                       cpv=mykey, built=True, metadata=metadata,
-                                       onlydeps=onlydeps)
-                               self._pkg_cache[pkg] = pkg
-                               args.append(PackageArg(arg=x, package=pkg,
-                                       root_config=root_config))
-                       elif ext==".ebuild":
-                               ebuild_path = portage.util.normalize_path(os.path.abspath(x))
-                               pkgdir = os.path.dirname(ebuild_path)
-                               tree_root = os.path.dirname(os.path.dirname(pkgdir))
-                               cp = pkgdir[len(tree_root)+1:]
-                               e = portage.exception.PackageNotFound(
-                                       ("%s is not in a valid portage tree " + \
-                                       "hierarchy or does not exist") % x)
-                               if not portage.isvalidatom(cp):
-                                       raise e
-                               cat = portage.catsplit(cp)[0]
-                               mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
-                               if not portage.isvalidatom("="+mykey):
-                                       raise e
-                               ebuild_path = portdb.findname(mykey)
-                               if ebuild_path:
-                                       if ebuild_path != os.path.join(os.path.realpath(tree_root),
-                                               cp, os.path.basename(ebuild_path)):
-                                               print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
-                                               return 0, myfavorites
-                                       if mykey not in portdb.xmatch(
-                                               "match-visible", portage.dep_getkey(mykey)):
-                                               print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
-                                               print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
-                                               print colorize("BAD", "*** page for details.")
-                                               countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
-                                                       "Continuing...")
-                               else:
-                                       raise portage.exception.PackageNotFound(
-                                               "%s is not in a valid portage tree hierarchy or does not exist" % x)
-                               db_keys = list(portdb._aux_cache_keys)
-                               metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
-                               pkg = Package(type_name="ebuild", root_config=root_config,
-                                       cpv=mykey, metadata=metadata, onlydeps=onlydeps)
-                               pkgsettings.setcpv(pkg)
-                               pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
-                               pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
-                               self._pkg_cache[pkg] = pkg
-                               args.append(PackageArg(arg=x, package=pkg,
-                                       root_config=root_config))
-                       elif x.startswith(os.path.sep):
-                               if not x.startswith(myroot):
-                                       portage.writemsg(("\n\n!!! '%s' does not start with" + \
-                                               " $ROOT.\n") % x, noiselevel=-1)
-                                       return 0, []
-                               # Queue these up since it's most efficient to handle
-                               # multiple files in a single iter_owners() call.
-                               lookup_owners.append(x)
-                       else:
-                               if x in ("system", "world"):
-                                       x = SETPREFIX + x
-                               if x.startswith(SETPREFIX):
-                                       s = x[len(SETPREFIX):]
-                                       if s not in sets:
-                                               raise portage.exception.PackageSetNotFound(s)
-                                       if s in self._sets:
-                                               continue
-                                       # Recursively expand sets so that containment tests in
-                                       # self._get_parent_sets() properly match atoms in nested
-                                       # sets (like if world contains system).
-                                       expanded_set = InternalPackageSet(
-                                               initial_atoms=getSetAtoms(s))
-                                       self._sets[s] = expanded_set
-                                       args.append(SetArg(arg=x, set=expanded_set,
-                                               root_config=root_config))
-                                       continue
-                               if not is_valid_package_atom(x):
-                                       portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
-                                               noiselevel=-1)
-                                       portage.writemsg("!!! Please check ebuild(5) for full details.\n")
-                                       portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
-                                       return (0,[])
-                               # Don't expand categories or old-style virtuals here unless
-                               # necessary. Expansion of old-style virtuals here causes at
-                               # least the following problems:
-                               #   1) It's more difficult to determine which set(s) an atom
-                               #      came from, if any.
-                               #   2) It takes away freedom from the resolver to choose other
-                               #      possible expansions when necessary.
-                               if "/" in x:
-                                       args.append(AtomArg(arg=x, atom=x,
-                                               root_config=root_config))
-                                       continue
-                               expanded_atoms = self._dep_expand(root_config, x)
-                               installed_cp_set = set()
-                               for atom in expanded_atoms:
-                                       atom_cp = portage.dep_getkey(atom)
-                                       if vardb.cp_list(atom_cp):
-                                               installed_cp_set.add(atom_cp)
-
-                               if len(installed_cp_set) > 1:
-                                       non_virtual_cps = set()
-                                       for atom_cp in installed_cp_set:
-                                               if not atom_cp.startswith("virtual/"):
-                                                       non_virtual_cps.add(atom_cp)
-                                       if len(non_virtual_cps) == 1:
-                                               installed_cp_set = non_virtual_cps
-
-                               if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
-                                       installed_cp = iter(installed_cp_set).next()
-                                       expanded_atoms = [atom for atom in expanded_atoms \
-                                               if portage.dep_getkey(atom) == installed_cp]
-
-                               if len(expanded_atoms) > 1:
-                                       print
-                                       print
-                                       ambiguous_package_name(x, expanded_atoms, root_config,
-                                               self.spinner, self.myopts)
-                                       return False, myfavorites
-                               if expanded_atoms:
-                                       atom = expanded_atoms[0]
-                               else:
-                                       null_atom = insert_category_into_atom(x, "null")
-                                       null_cp = portage.dep_getkey(null_atom)
-                                       cat, atom_pn = portage.catsplit(null_cp)
-                                       virts_p = root_config.settings.get_virts_p().get(atom_pn)
-                                       if virts_p:
-                                               # Allow the depgraph to choose which virtual.
-                                               atom = insert_category_into_atom(x, "virtual")
-                                       else:
-                                               atom = insert_category_into_atom(x, "null")
-
-                               args.append(AtomArg(arg=x, atom=atom,
-                                       root_config=root_config))
-
-               if lookup_owners:
-                       relative_paths = []
-                       search_for_multiple = False
-                       if len(lookup_owners) > 1:
-                               search_for_multiple = True
-
-                       for x in lookup_owners:
-                               if not search_for_multiple and os.path.isdir(x):
-                                       search_for_multiple = True
-                               relative_paths.append(x[len(myroot):])
-
-                       owners = set()
-                       for pkg, relative_path in \
-                               real_vardb._owners.iter_owners(relative_paths):
-                               owners.add(pkg.mycpv)
-                               if not search_for_multiple:
-                                       break
-
-                       if not owners:
-                               portage.writemsg(("\n\n!!! '%s' is not claimed " + \
-                                       "by any package.\n") % lookup_owners[0], noiselevel=-1)
-                               return 0, []
-
-                       for cpv in owners:
-                               slot = vardb.aux_get(cpv, ["SLOT"])[0]
-                               if not slot:
-                                       # portage now masks packages with missing slot, but it's
-                                       # possible that one was installed by an older version
-                                       atom = portage.cpv_getkey(cpv)
-                               else:
-                                       atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
-                               args.append(AtomArg(arg=atom, atom=atom,
-                                       root_config=root_config))
-
-               if "--update" in self.myopts:
-                       # In some cases, the greedy slots behavior can pull in a slot that
-                       # the user would want to uninstall due to it being blocked by a
-                       # newer version in a different slot. Therefore, it's necessary to
-                       # detect and discard any that should be uninstalled. Each time
-                       # that arguments are updated, package selections are repeated in
-                       # order to ensure consistency with the current arguments:
-                       #
-                       #  1) Initialize args
-                       #  2) Select packages and generate initial greedy atoms
-                       #  3) Update args with greedy atoms
-                       #  4) Select packages and generate greedy atoms again, while
-                       #     accounting for any blockers between selected packages
-                       #  5) Update args with revised greedy atoms
-
-                       self._set_args(args)
-                       greedy_args = []
-                       for arg in args:
-                               greedy_args.append(arg)
-                               if not isinstance(arg, AtomArg):
-                                       continue
-                               for atom in self._greedy_slots(arg.root_config, arg.atom):
-                                       greedy_args.append(
-                                               AtomArg(arg=arg.arg, atom=atom,
-                                                       root_config=arg.root_config))
-
-                       self._set_args(greedy_args)
-                       del greedy_args
-
-                       # Revise greedy atoms, accounting for any blockers
-                       # between selected packages.
-                       revised_greedy_args = []
-                       for arg in args:
-                               revised_greedy_args.append(arg)
-                               if not isinstance(arg, AtomArg):
-                                       continue
-                               for atom in self._greedy_slots(arg.root_config, arg.atom,
-                                       blocker_lookahead=True):
-                                       revised_greedy_args.append(
-                                               AtomArg(arg=arg.arg, atom=atom,
-                                                       root_config=arg.root_config))
-                       args = revised_greedy_args
-                       del revised_greedy_args
-
-               self._set_args(args)
-
-               myfavorites = set(myfavorites)
-               for arg in args:
-                       if isinstance(arg, (AtomArg, PackageArg)):
-                               myfavorites.add(arg.atom)
-                       elif isinstance(arg, SetArg):
-                               myfavorites.add(arg.arg)
-               myfavorites = list(myfavorites)
-
-               pprovideddict = pkgsettings.pprovideddict
-               if debug:
-                       portage.writemsg("\n", noiselevel=-1)
-               # Order needs to be preserved since a feature of --nodeps
-               # is to allow the user to force a specific merge order.
-               args.reverse()
-               while args:
-                       arg = args.pop()
-                       for atom in arg.set:
-                               self.spinner.update()
-                               dep = Dependency(atom=atom, onlydeps=onlydeps,
-                                       root=myroot, parent=arg)
-                               atom_cp = portage.dep_getkey(atom)
-                               try:
-                                       pprovided = pprovideddict.get(portage.dep_getkey(atom))
-                                       if pprovided and portage.match_from_list(atom, pprovided):
-                                               # A provided package has been specified on the command line.
-                                               self._pprovided_args.append((arg, atom))
-                                               continue
-                                       if isinstance(arg, PackageArg):
-                                               if not self._add_pkg(arg.package, dep) or \
-                                                       not self._create_graph():
-                                                       sys.stderr.write(("\n\n!!! Problem resolving " + \
-                                                               "dependencies for %s\n") % arg.arg)
-                                                       return 0, myfavorites
-                                               continue
-                                       if debug:
-                                               portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
-                                                       (arg, atom), noiselevel=-1)
-                                       pkg, existing_node = self._select_package(
-                                               myroot, atom, onlydeps=onlydeps)
-                                       if not pkg:
-                                               if not (isinstance(arg, SetArg) and \
-                                                       arg.name in ("system", "world")):
-                                                       self._unsatisfied_deps_for_display.append(
-                                                               ((myroot, atom), {}))
-                                                       return 0, myfavorites
-                                               self._missing_args.append((arg, atom))
-                                               continue
-                                       if atom_cp != pkg.cp:
-                                               # For old-style virtuals, we need to repeat the
-                                               # package.provided check against the selected package.
-                                               expanded_atom = atom.replace(atom_cp, pkg.cp)
-                                               pprovided = pprovideddict.get(pkg.cp)
-                                               if pprovided and \
-                                                       portage.match_from_list(expanded_atom, pprovided):
-                                                       # A provided package has been
-                                                       # specified on the command line.
-                                                       self._pprovided_args.append((arg, atom))
-                                                       continue
-                                       if pkg.installed and "selective" not in self.myparams:
-                                               self._unsatisfied_deps_for_display.append(
-                                                       ((myroot, atom), {}))
-                                               # Previous behavior was to bail out in this case, but
-                                               # since the dep is satisfied by the installed package,
-                                               # it's more friendly to continue building the graph
-                                               # and just show a warning message. Therefore, only bail
-                                               # out here if the atom is not from either the system or
-                                               # world set.
-                                               if not (isinstance(arg, SetArg) and \
-                                                       arg.name in ("system", "world")):
-                                                       return 0, myfavorites
-
-                                       # Add the selected package to the graph as soon as possible
-                                       # so that later dep_check() calls can use it as feedback
-                                       # for making more consistent atom selections.
-                                       if not self._add_pkg(pkg, dep):
-                                               if isinstance(arg, SetArg):
-                                                       sys.stderr.write(("\n\n!!! Problem resolving " + \
-                                                               "dependencies for %s from %s\n") % \
-                                                               (atom, arg.arg))
-                                               else:
-                                                       sys.stderr.write(("\n\n!!! Problem resolving " + \
-                                                               "dependencies for %s\n") % atom)
-                                               return 0, myfavorites
-
-                               except portage.exception.MissingSignature, e:
-                                       portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
-                                       portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
-                                       portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
-                                       portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
-                                       portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
-                                       return 0, myfavorites
-                               except portage.exception.InvalidSignature, e:
-                                       portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
-                                       portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
-                                       portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
-                                       portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
-                                       portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
-                                       return 0, myfavorites
-                               except SystemExit, e:
-                                       raise # Needed else can't exit
-                               except Exception, e:
-                                       print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
-                                       print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
-                                       raise
-
-               # Now that the root packages have been added to the graph,
-               # process the dependencies.
-               if not self._create_graph():
-                       return 0, myfavorites
-
-               missing=0
-               if "--usepkgonly" in self.myopts:
-                       for xs in self.digraph.all_nodes():
-                               if not isinstance(xs, Package):
-                                       continue
-                               if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
-                                       if missing == 0:
-                                               print
-                                       missing += 1
-                                       print "Missing binary for:",xs[2]
-
-               try:
-                       self.altlist()
-               except self._unknown_internal_error:
-                       return False, myfavorites
-
-               # We're true here unless we are missing binaries.
-               return (not missing,myfavorites)
-
-       def _set_args(self, args):
-               """
-               Create the "args" package set from atoms and packages given as
-               arguments. This method can be called multiple times if necessary.
-               The package selection cache is automatically invalidated, since
-               arguments influence package selections.
-               """
-               args_set = self._sets["args"]
-               args_set.clear()
-               for arg in args:
-                       if not isinstance(arg, (AtomArg, PackageArg)):
-                               continue
-                       atom = arg.atom
-                       if atom in args_set:
-                               continue
-                       args_set.add(atom)
-
-               self._set_atoms.clear()
-               self._set_atoms.update(chain(*self._sets.itervalues()))
-               atom_arg_map = self._atom_arg_map
-               atom_arg_map.clear()
-               for arg in args:
-                       for atom in arg.set:
-                               atom_key = (atom, arg.root_config.root)
-                               refs = atom_arg_map.get(atom_key)
-                               if refs is None:
-                                       refs = []
-                                       atom_arg_map[atom_key] = refs
-                                       if arg not in refs:
-                                               refs.append(arg)
-
-               # Invalidate the package selection cache, since
-               # arguments influence package selections.
-               self._highest_pkg_cache.clear()
-               for trees in self._filtered_trees.itervalues():
-                       trees["porttree"].dbapi._clear_cache()
-
-       def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
-               """
-               Return a list of slot atoms corresponding to installed slots that
-               differ from the slot of the highest visible match. When
-               blocker_lookahead is True, slot atoms that would trigger a blocker
-               conflict are automatically discarded, potentially allowing automatic
-               uninstallation of older slots when appropriate.
-               """
-               highest_pkg, in_graph = self._select_package(root_config.root, atom)
-               if highest_pkg is None:
-                       return []
-               vardb = root_config.trees["vartree"].dbapi
-               slots = set()
-               for cpv in vardb.match(atom):
-                       # don't mix new virtuals with old virtuals
-                       if portage.cpv_getkey(cpv) == highest_pkg.cp:
-                               slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
-
-               slots.add(highest_pkg.metadata["SLOT"])
-               if len(slots) == 1:
-                       return []
-               greedy_pkgs = []
-               slots.remove(highest_pkg.metadata["SLOT"])
-               while slots:
-                       slot = slots.pop()
-                       slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
-                       pkg, in_graph = self._select_package(root_config.root, slot_atom)
-                       if pkg is not None and \
-                               pkg.cp == highest_pkg.cp and pkg < highest_pkg:
-                               greedy_pkgs.append(pkg)
-               if not greedy_pkgs:
-                       return []
-               if not blocker_lookahead:
-                       return [pkg.slot_atom for pkg in greedy_pkgs]
-
-               blockers = {}
-               blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
-               for pkg in greedy_pkgs + [highest_pkg]:
-                       dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
-                       try:
-                               atoms = self._select_atoms(
-                                       pkg.root, dep_str, pkg.use.enabled,
-                                       parent=pkg, strict=True)
-                       except portage.exception.InvalidDependString:
-                               continue
-                       blocker_atoms = (x for x in atoms if x.blocker)
-                       blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
-
-               if highest_pkg not in blockers:
-                       return []
-
-               # filter packages with invalid deps
-               greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
-
-               # filter packages that conflict with highest_pkg
-               greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
-                       (blockers[highest_pkg].findAtomForPackage(pkg) or \
-                       blockers[pkg].findAtomForPackage(highest_pkg))]
-
-               if not greedy_pkgs:
-                       return []
-
-               # If two packages conflict, discard the lower version.
-               discard_pkgs = set()
-               greedy_pkgs.sort(reverse=True)
-               for i in xrange(len(greedy_pkgs) - 1):
-                       pkg1 = greedy_pkgs[i]
-                       if pkg1 in discard_pkgs:
-                               continue
-                       for j in xrange(i + 1, len(greedy_pkgs)):
-                               pkg2 = greedy_pkgs[j]
-                               if pkg2 in discard_pkgs:
-                                       continue
-                               if blockers[pkg1].findAtomForPackage(pkg2) or \
-                                       blockers[pkg2].findAtomForPackage(pkg1):
-                                       # pkg1 > pkg2
-                                       discard_pkgs.add(pkg2)
-
-               return [pkg.slot_atom for pkg in greedy_pkgs \
-                       if pkg not in discard_pkgs]
-
-       def _select_atoms_from_graph(self, *pargs, **kwargs):
-               """
-               Prefer atoms matching packages that have already been
-               added to the graph or those that are installed and have
-               not been scheduled for replacement.
-               """
-               kwargs["trees"] = self._graph_trees
-               return self._select_atoms_highest_available(*pargs, **kwargs)
-
-       def _select_atoms_highest_available(self, root, depstring,
-               myuse=None, parent=None, strict=True, trees=None, priority=None):
-               """This will raise InvalidDependString if necessary. If trees is
-               None then self._filtered_trees is used."""
-               pkgsettings = self.pkgsettings[root]
-               if trees is None:
-                       trees = self._filtered_trees
-               if not getattr(priority, "buildtime", False):
-                       # The parent should only be passed to dep_check() for buildtime
-                       # dependencies since that's the only case when it's appropriate
-                       # to trigger the circular dependency avoidance code which uses it.
-                       # It's important not to trigger the same circular dependency
-                       # avoidance code for runtime dependencies since it's not needed
-                       # and it can promote an incorrect package choice.
-                       parent = None
-               if True:
-                       try:
-                               if parent is not None:
-                                       trees[root]["parent"] = parent
-                               if not strict:
-                                       portage.dep._dep_check_strict = False
-                               mycheck = portage.dep_check(depstring, None,
-                                       pkgsettings, myuse=myuse,
-                                       myroot=root, trees=trees)
-                       finally:
-                               if parent is not None:
-                                       trees[root].pop("parent")
-                               portage.dep._dep_check_strict = True
-                       if not mycheck[0]:
-                               raise portage.exception.InvalidDependString(mycheck[1])
-                       selected_atoms = mycheck[1]
-               return selected_atoms
-
-       def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
-               atom = portage.dep.Atom(atom)
-               atom_set = InternalPackageSet(initial_atoms=(atom,))
-               atom_without_use = atom
-               if atom.use:
-                       atom_without_use = portage.dep.remove_slot(atom)
-                       if atom.slot:
-                               atom_without_use += ":" + atom.slot
-                       atom_without_use = portage.dep.Atom(atom_without_use)
-               xinfo = '"%s"' % atom
-               if arg:
-                       xinfo='"%s"' % arg
-               # Discard null/ from failed cpv_expand category expansion.
-               xinfo = xinfo.replace("null/", "")
-               masked_packages = []
-               missing_use = []
-               masked_pkg_instances = set()
-               missing_licenses = []
-               have_eapi_mask = False
-               pkgsettings = self.pkgsettings[root]
-               implicit_iuse = pkgsettings._get_implicit_iuse()
-               root_config = self.roots[root]
-               portdb = self.roots[root].trees["porttree"].dbapi
-               dbs = self._filtered_trees[root]["dbs"]
-               for db, pkg_type, built, installed, db_keys in dbs:
-                       if installed:
-                               continue
-                       match = db.match
-                       if hasattr(db, "xmatch"):
-                               cpv_list = db.xmatch("match-all", atom_without_use)
-                       else:
-                               cpv_list = db.match(atom_without_use)
-                       # descending order
-                       cpv_list.reverse()
-                       for cpv in cpv_list:
-                               metadata, mreasons  = get_mask_info(root_config, cpv,
-                                       pkgsettings, db, pkg_type, built, installed, db_keys)
-                               if metadata is not None:
-                                       pkg = Package(built=built, cpv=cpv,
-                                               installed=installed, metadata=metadata,
-                                               root_config=root_config)
-                                       if pkg.cp != atom.cp:
-                                               # A cpv can be returned from dbapi.match() as an
-                                               # old-style virtual match even in cases when the
-                                               # package does not actually PROVIDE the virtual.
-                                               # Filter out any such false matches here.
-                                               if not atom_set.findAtomForPackage(pkg):
-                                                       continue
-                                       if mreasons:
-                                               masked_pkg_instances.add(pkg)
-                                       if atom.use:
-                                               missing_use.append(pkg)
-                                               if not mreasons:
-                                                       continue
-                               masked_packages.append(
-                                       (root_config, pkgsettings, cpv, metadata, mreasons))
-
-               missing_use_reasons = []
-               missing_iuse_reasons = []
-               for pkg in missing_use:
-                       use = pkg.use.enabled
-                       iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
-                       iuse_re = re.compile("^(%s)$" % "|".join(iuse))
-                       missing_iuse = []
-                       for x in atom.use.required:
-                               if iuse_re.match(x) is None:
-                                       missing_iuse.append(x)
-                       mreasons = []
-                       if missing_iuse:
-                               mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
-                               missing_iuse_reasons.append((pkg, mreasons))
-                       else:
-                               need_enable = sorted(atom.use.enabled.difference(use))
-                               need_disable = sorted(atom.use.disabled.intersection(use))
-                               if need_enable or need_disable:
-                                       changes = []
-                                       changes.extend(colorize("red", "+" + x) \
-                                               for x in need_enable)
-                                       changes.extend(colorize("blue", "-" + x) \
-                                               for x in need_disable)
-                                       mreasons.append("Change USE: %s" % " ".join(changes))
-                                       missing_use_reasons.append((pkg, mreasons))
-
-               unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
-                       in missing_use_reasons if pkg not in masked_pkg_instances]
-
-               unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
-                       in missing_iuse_reasons if pkg not in masked_pkg_instances]
-
-               show_missing_use = False
-               if unmasked_use_reasons:
-                       # Only show the latest version.
-                       show_missing_use = unmasked_use_reasons[:1]
-               elif unmasked_iuse_reasons:
-                       if missing_use_reasons:
-                               # All packages with required IUSE are masked,
-                               # so display a normal masking message.
-                               pass
-                       else:
-                               show_missing_use = unmasked_iuse_reasons
-
-               if show_missing_use:
-                       print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
-                       print "!!! One of the following packages is required to complete your request:"
-                       for pkg, mreasons in show_missing_use:
-                               print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
-
-               elif masked_packages:
-                       print "\n!!! " + \
-                               colorize("BAD", "All ebuilds that could satisfy ") + \
-                               colorize("INFORM", xinfo) + \
-                               colorize("BAD", " have been masked.")
-                       print "!!! One of the following masked packages is required to complete your request:"
-                       have_eapi_mask = show_masked_packages(masked_packages)
-                       if have_eapi_mask:
-                               print
-                               msg = ("The current version of portage supports " + \
-                                       "EAPI '%s'. You must upgrade to a newer version" + \
-                                       " of portage before EAPI masked packages can" + \
-                                       " be installed.") % portage.const.EAPI
-                               from textwrap import wrap
-                               for line in wrap(msg, 75):
-                                       print line
-                       print
-                       show_mask_docs()
-               else:
-                       print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
-
-               # Show parent nodes and the argument that pulled them in.
-               traversed_nodes = set()
-               node = myparent
-               msg = []
-               while node is not None:
-                       traversed_nodes.add(node)
-                       msg.append('(dependency required by "%s" [%s])' % \
-                               (colorize('INFORM', str(node.cpv)), node.type_name))
-                       # When traversing to parents, prefer arguments over packages
-                       # since arguments are root nodes. Never traverse the same
-                       # package twice, in order to prevent an infinite loop.
-                       selected_parent = None
-                       for parent in self.digraph.parent_nodes(node):
-                               if isinstance(parent, DependencyArg):
-                                       msg.append('(dependency required by "%s" [argument])' % \
-                                               (colorize('INFORM', str(parent))))
-                                       selected_parent = None
-                                       break
-                               if parent not in traversed_nodes:
-                                       selected_parent = parent
-                       node = selected_parent
-               for line in msg:
-                       print line
-
-               print
-
-       def _select_pkg_highest_available(self, root, atom, onlydeps=False):
-               cache_key = (root, atom, onlydeps)
-               ret = self._highest_pkg_cache.get(cache_key)
-               if ret is not None:
-                       pkg, existing = ret
-                       if pkg and not existing:
-                               existing = self._slot_pkg_map[root].get(pkg.slot_atom)
-                               if existing and existing == pkg:
-                                       # Update the cache to reflect that the
-                                       # package has been added to the graph.
-                                       ret = pkg, pkg
-                                       self._highest_pkg_cache[cache_key] = ret
-                       return ret
-               ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
-               self._highest_pkg_cache[cache_key] = ret
-               pkg, existing = ret
-               if pkg is not None:
-                       settings = pkg.root_config.settings
-                       if visible(settings, pkg) and not (pkg.installed and \
-                               settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
-                               pkg.root_config.visible_pkgs.cpv_inject(pkg)
-               return ret
-
-       def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
-               root_config = self.roots[root]
-               pkgsettings = self.pkgsettings[root]
-               dbs = self._filtered_trees[root]["dbs"]
-               vardb = self.roots[root].trees["vartree"].dbapi
-               portdb = self.roots[root].trees["porttree"].dbapi
-               # List of acceptable packages, ordered by type preference.
-               matched_packages = []
-               highest_version = None
-               if not isinstance(atom, portage.dep.Atom):
-                       atom = portage.dep.Atom(atom)
-               atom_cp = atom.cp
-               atom_set = InternalPackageSet(initial_atoms=(atom,))
-               existing_node = None
-               myeb = None
-               usepkgonly = "--usepkgonly" in self.myopts
-               empty = "empty" in self.myparams
-               selective = "selective" in self.myparams
-               reinstall = False
-               noreplace = "--noreplace" in self.myopts
-               # Behavior of the "selective" parameter depends on
-               # whether or not a package matches an argument atom.
-               # If an installed package provides an old-style
-               # virtual that is no longer provided by an available
-               # package, the installed package may match an argument
-               # atom even though none of the available packages do.
-               # Therefore, "selective" logic does not consider
-               # whether or not an installed package matches an
-               # argument atom. It only considers whether or not
-               # available packages match argument atoms, which is
-               # represented by the found_available_arg flag.
-               found_available_arg = False
-               for find_existing_node in True, False:
-                       if existing_node:
-                               break
-                       for db, pkg_type, built, installed, db_keys in dbs:
-                               if existing_node:
-                                       break
-                               if installed and not find_existing_node:
-                                       want_reinstall = reinstall or empty or \
-                                               (found_available_arg and not selective)
-                                       if want_reinstall and matched_packages:
-                                               continue
-                               if hasattr(db, "xmatch"):
-                                       cpv_list = db.xmatch("match-all", atom)
-                               else:
-                                       cpv_list = db.match(atom)
-
-                               # USE=multislot can make an installed package appear as if
-                               # it doesn't satisfy a slot dependency. Rebuilding the ebuild
-                               # won't do any good as long as USE=multislot is enabled since
-                               # the newly built package still won't have the expected slot.
-                               # Therefore, assume that such SLOT dependencies are already
-                               # satisfied rather than forcing a rebuild.
-                               if installed and not cpv_list and atom.slot:
-                                       for cpv in db.match(atom.cp):
-                                               slot_available = False
-                                               for other_db, other_type, other_built, \
-                                                       other_installed, other_keys in dbs:
-                                                       try:
-                                                               if atom.slot == \
-                                                                       other_db.aux_get(cpv, ["SLOT"])[0]:
-                                                                       slot_available = True
-                                                                       break
-                                                       except KeyError:
-                                                               pass
-                                               if not slot_available:
-                                                       continue
-                                               inst_pkg = self._pkg(cpv, "installed",
-                                                       root_config, installed=installed)
-                                               # Remove the slot from the atom and verify that
-                                               # the package matches the resulting atom.
-                                               atom_without_slot = portage.dep.remove_slot(atom)
-                                               if atom.use:
-                                                       atom_without_slot += str(atom.use)
-                                               atom_without_slot = portage.dep.Atom(atom_without_slot)
-                                               if portage.match_from_list(
-                                                       atom_without_slot, [inst_pkg]):
-                                                       cpv_list = [inst_pkg.cpv]
-                                               break
-
-                               if not cpv_list:
-                                       continue
-                               pkg_status = "merge"
-                               if installed or onlydeps:
-                                       pkg_status = "nomerge"
-                               # descending order
-                               cpv_list.reverse()
-                               for cpv in cpv_list:
-                                       # Make --noreplace take precedence over --newuse.
-                                       if not installed and noreplace and \
-                                               cpv in vardb.match(atom):
-                                               # If the installed version is masked, it may
-                                               # be necessary to look at lower versions,
-                                               # in case there is a visible downgrade.
-                                               continue
-                                       reinstall_for_flags = None
-                                       cache_key = (pkg_type, root, cpv, pkg_status)
-                                       calculated_use = True
-                                       pkg = self._pkg_cache.get(cache_key)
-                                       if pkg is None:
-                                               calculated_use = False
-                                               try:
-                                                       metadata = izip(db_keys, db.aux_get(cpv, db_keys))
-                                               except KeyError:
-                                                       continue
-                                               pkg = Package(built=built, cpv=cpv,
-                                                       installed=installed, metadata=metadata,
-                                                       onlydeps=onlydeps, root_config=root_config,
-                                                       type_name=pkg_type)
-                                               metadata = pkg.metadata
-                                               if not built:
-                                                       metadata['CHOST'] = pkgsettings.get('CHOST', '')
-                                               if not built and ("?" in metadata["LICENSE"] or \
-                                                       "?" in metadata["PROVIDE"]):
-                                                       # This is avoided whenever possible because
-                                                       # it's expensive. It only needs to be done here
-                                                       # if it has an effect on visibility.
-                                                       pkgsettings.setcpv(pkg)
-                                                       metadata["USE"] = pkgsettings["PORTAGE_USE"]
-                                                       calculated_use = True
-                                               self._pkg_cache[pkg] = pkg
-
-                                       if not installed or (built and matched_packages):
-                                               # Only enforce visibility on installed packages
-                                               # if there is at least one other visible package
-                                               # available. By filtering installed masked packages
-                                               # here, packages that have been masked since they
-                                               # were installed can be automatically downgraded
-                                               # to an unmasked version.
-                                               try:
-                                                       if not visible(pkgsettings, pkg):
-                                                               continue
-                                               except portage.exception.InvalidDependString:
-                                                       if not installed:
-                                                               continue
-
-                                               # Enable upgrade or downgrade to a version
-                                               # with visible KEYWORDS when the installed
-                                               # version is masked by KEYWORDS, but never
-                                               # reinstall the same exact version only due
-                                               # to a KEYWORDS mask.
-                                               if built and matched_packages:
-
-                                                       different_version = None
-                                                       for avail_pkg in matched_packages:
-                                                               if not portage.dep.cpvequal(
-                                                                       pkg.cpv, avail_pkg.cpv):
-                                                                       different_version = avail_pkg
-                                                                       break
-                                                       if different_version is not None:
-
-                                                               if installed and \
-                                                                       pkgsettings._getMissingKeywords(
-                                                                       pkg.cpv, pkg.metadata):
-                                                                       continue
-
-                                                               # If the ebuild no longer exists or it's
-                                                               # keywords have been dropped, reject built
-                                                               # instances (installed or binary).
-                                                               # If --usepkgonly is enabled, assume that
-                                                               # the ebuild status should be ignored.
-                                                               if not usepkgonly:
-                                                                       try:
-                                                                               pkg_eb = self._pkg(
-                                                                                       pkg.cpv, "ebuild", root_config)
-                                                                       except portage.exception.PackageNotFound:
-                                                                               continue
-                                                                       else:
-                                                                               if not visible(pkgsettings, pkg_eb):
-                                                                                       continue
-
-                                       if not pkg.built and not calculated_use:
-                                               # This is avoided whenever possible because
-                                               # it's expensive.
-                                               pkgsettings.setcpv(pkg)
-                                               pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
-
-                                       if pkg.cp != atom.cp:
-                                               # A cpv can be returned from dbapi.match() as an
-                                               # old-style virtual match even in cases when the
-                                               # package does not actually PROVIDE the virtual.
-                                               # Filter out any such false matches here.
-                                               if not atom_set.findAtomForPackage(pkg):
-                                                       continue
-
-                                       myarg = None
-                                       if root == self.target_root:
-                                               try:
-                                                       # Ebuild USE must have been calculated prior
-                                                       # to this point, in case atoms have USE deps.
-                                                       myarg = self._iter_atoms_for_pkg(pkg).next()
-                                               except StopIteration:
-                                                       pass
-                                               except portage.exception.InvalidDependString:
-                                                       if not installed:
-                                                               # masked by corruption
-                                                               continue
-                                       if not installed and myarg:
-                                               found_available_arg = True
-
-                                       if atom.use and not pkg.built:
-                                               use = pkg.use.enabled
-                                               if atom.use.enabled.difference(use):
-                                                       continue
-                                               if atom.use.disabled.intersection(use):
-                                                       continue
-                                       if pkg.cp == atom_cp:
-                                               if highest_version is None:
-                                                       highest_version = pkg
-                                               elif pkg > highest_version:
-                                                       highest_version = pkg
-                                       # At this point, we've found the highest visible
-                                       # match from the current repo. Any lower versions
-                                       # from this repo are ignored, so this so the loop
-                                       # will always end with a break statement below
-                                       # this point.
-                                       if find_existing_node:
-                                               e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
-                                               if not e_pkg:
-                                                       break
-                                               if portage.dep.match_from_list(atom, [e_pkg]):
-                                                       if highest_version and \
-                                                               e_pkg.cp == atom_cp and \
-                                                               e_pkg < highest_version and \
-                                                               e_pkg.slot_atom != highest_version.slot_atom:
-                                                               # There is a higher version available in a
-                                                               # different slot, so this existing node is
-                                                               # irrelevant.
-                                                               pass
-                                                       else:
-                                                               matched_packages.append(e_pkg)
-                                                               existing_node = e_pkg
-                                               break
-                                       # Compare built package to current config and
-                                       # reject the built package if necessary.
-                                       if built and not installed and \
-                                               ("--newuse" in self.myopts or \
-                                               "--reinstall" in self.myopts):
-                                               iuses = pkg.iuse.all
-                                               old_use = pkg.use.enabled
-                                               if myeb:
-                                                       pkgsettings.setcpv(myeb)
-                                               else:
-                                                       pkgsettings.setcpv(pkg)
-                                               now_use = pkgsettings["PORTAGE_USE"].split()
-                                               forced_flags = set()
-                                               forced_flags.update(pkgsettings.useforce)
-                                               forced_flags.update(pkgsettings.usemask)
-                                               cur_iuse = iuses
-                                               if myeb and not usepkgonly:
-                                                       cur_iuse = myeb.iuse.all
-                                               if self._reinstall_for_flags(forced_flags,
-                                                       old_use, iuses,
-                                                       now_use, cur_iuse):
-                                                       break
-                                       # Compare current config to installed package
-                                       # and do not reinstall if possible.
-                                       if not installed and \
-                                               ("--newuse" in self.myopts or \
-                                               "--reinstall" in self.myopts) and \
-                                               cpv in vardb.match(atom):
-                                               pkgsettings.setcpv(pkg)
-                                               forced_flags = set()
-                                               forced_flags.update(pkgsettings.useforce)
-                                               forced_flags.update(pkgsettings.usemask)
-                                               old_use = vardb.aux_get(cpv, ["USE"])[0].split()
-                                               old_iuse = set(filter_iuse_defaults(
-                                                       vardb.aux_get(cpv, ["IUSE"])[0].split()))
-                                               cur_use = pkg.use.enabled
-                                               cur_iuse = pkg.iuse.all
-                                               reinstall_for_flags = \
-                                                       self._reinstall_for_flags(
-                                                       forced_flags, old_use, old_iuse,
-                                                       cur_use, cur_iuse)
-                                               if reinstall_for_flags:
-                                                       reinstall = True
-                                       if not built:
-                                               myeb = pkg
-                                       matched_packages.append(pkg)
-                                       if reinstall_for_flags:
-                                               self._reinstall_nodes[pkg] = \
-                                                       reinstall_for_flags
-                                       break
-
-               if not matched_packages:
-                       return None, None
-
-               if "--debug" in self.myopts:
-                       for pkg in matched_packages:
-                               portage.writemsg("%s %s\n" % \
-                                       ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
-
-               # Filter out any old-style virtual matches if they are
-               # mixed with new-style virtual matches.
-               cp = portage.dep_getkey(atom)
-               if len(matched_packages) > 1 and \
-                       "virtual" == portage.catsplit(cp)[0]:
-                       for pkg in matched_packages:
-                               if pkg.cp != cp:
-                                       continue
-                               # Got a new-style virtual, so filter
-                               # out any old-style virtuals.
-                               matched_packages = [pkg for pkg in matched_packages \
-                                       if pkg.cp == cp]
-                               break
-
-               if len(matched_packages) > 1:
-                       bestmatch = portage.best(
-                               [pkg.cpv for pkg in matched_packages])
-                       matched_packages = [pkg for pkg in matched_packages \
-                               if portage.dep.cpvequal(pkg.cpv, bestmatch)]
-
-               # ordered by type preference ("ebuild" type is the last resort)
-               return  matched_packages[-1], existing_node
-
-       def _select_pkg_from_graph(self, root, atom, onlydeps=False):
-               """
-               Select packages that have already been added to the graph or
-               those that are installed and have not been scheduled for
-               replacement.
-               """
-               graph_db = self._graph_trees[root]["porttree"].dbapi
-               matches = graph_db.match_pkgs(atom)
-               if not matches:
-                       return None, None
-               pkg = matches[-1] # highest match
-               in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
-               return pkg, in_graph
-
-       def _complete_graph(self):
-               """
-               Add any deep dependencies of required sets (args, system, world) that
-               have not been pulled into the graph yet. This ensures that the graph
-               is consistent such that initially satisfied deep dependencies are not
-               broken in the new graph. Initially unsatisfied dependencies are
-               irrelevant since we only want to avoid breaking dependencies that are
-               intially satisfied.
-
-               Since this method can consume enough time to disturb users, it is
-               currently only enabled by the --complete-graph option.
-               """
-               if "--buildpkgonly" in self.myopts or \
-                       "recurse" not in self.myparams:
-                       return 1
-
-               if "complete" not in self.myparams:
-                       # Skip this to avoid consuming enough time to disturb users.
-                       return 1
-
-               # Put the depgraph into a mode that causes it to only
-               # select packages that have already been added to the
-               # graph or those that are installed and have not been
-               # scheduled for replacement. Also, toggle the "deep"
-               # parameter so that all dependencies are traversed and
-               # accounted for.
-               self._select_atoms = self._select_atoms_from_graph
-               self._select_package = self._select_pkg_from_graph
-               already_deep = "deep" in self.myparams
-               if not already_deep:
-                       self.myparams.add("deep")
-
-               for root in self.roots:
-                       required_set_names = self._required_set_names.copy()
-                       if root == self.target_root and \
-                               (already_deep or "empty" in self.myparams):
-                               required_set_names.difference_update(self._sets)
-                       if not required_set_names and not self._ignored_deps:
-                               continue
-                       root_config = self.roots[root]
-                       setconfig = root_config.setconfig
-                       args = []
-                       # Reuse existing SetArg instances when available.
-                       for arg in self.digraph.root_nodes():
-                               if not isinstance(arg, SetArg):
-                                       continue
-                               if arg.root_config != root_config:
-                                       continue
-                               if arg.name in required_set_names:
-                                       args.append(arg)
-                                       required_set_names.remove(arg.name)
-                       # Create new SetArg instances only when necessary.
-                       for s in required_set_names:
-                               expanded_set = InternalPackageSet(
-                                       initial_atoms=setconfig.getSetAtoms(s))
-                               atom = SETPREFIX + s
-                               args.append(SetArg(arg=atom, set=expanded_set,
-                                       root_config=root_config))
-                       vardb = root_config.trees["vartree"].dbapi
-                       for arg in args:
-                               for atom in arg.set:
-                                       self._dep_stack.append(
-                                               Dependency(atom=atom, root=root, parent=arg))
-                       if self._ignored_deps:
-                               self._dep_stack.extend(self._ignored_deps)
-                               self._ignored_deps = []
-                       if not self._create_graph(allow_unsatisfied=True):
-                               return 0
-                       # Check the unsatisfied deps to see if any initially satisfied deps
-                       # will become unsatisfied due to an upgrade. Initially unsatisfied
-                       # deps are irrelevant since we only want to avoid breaking deps
-                       # that are initially satisfied.
-                       while self._unsatisfied_deps:
-                               dep = self._unsatisfied_deps.pop()
-                               matches = vardb.match_pkgs(dep.atom)
-                               if not matches:
-                                       self._initially_unsatisfied_deps.append(dep)
-                                       continue
-                               # An scheduled installation broke a deep dependency.
-                               # Add the installed package to the graph so that it
-                               # will be appropriately reported as a slot collision
-                               # (possibly solvable via backtracking).
-                               pkg = matches[-1] # highest match
-                               if not self._add_pkg(pkg, dep):
-                                       return 0
-                               if not self._create_graph(allow_unsatisfied=True):
-                                       return 0
-               return 1
-
-       def _pkg(self, cpv, type_name, root_config, installed=False):
-               """
-               Get a package instance from the cache, or create a new
-               one if necessary. Raises KeyError from aux_get if it
-               failures for some reason (package does not exist or is
-               corrupt).
-               """
-               operation = "merge"
-               if installed:
-                       operation = "nomerge"
-               pkg = self._pkg_cache.get(
-                       (type_name, root_config.root, cpv, operation))
-               if pkg is None:
-                       tree_type = self.pkg_tree_map[type_name]
-                       db = root_config.trees[tree_type].dbapi
-                       db_keys = list(self._trees_orig[root_config.root][
-                               tree_type].dbapi._aux_cache_keys)
-                       try:
-                               metadata = izip(db_keys, db.aux_get(cpv, db_keys))
-                       except KeyError:
-                               raise portage.exception.PackageNotFound(cpv)
-                       pkg = Package(cpv=cpv, metadata=metadata,
-                               root_config=root_config, installed=installed)
-                       if type_name == "ebuild":
-                               settings = self.pkgsettings[root_config.root]
-                               settings.setcpv(pkg)
-                               pkg.metadata["USE"] = settings["PORTAGE_USE"]
-                               pkg.metadata['CHOST'] = settings.get('CHOST', '')
-                       self._pkg_cache[pkg] = pkg
-               return pkg
-
-       def validate_blockers(self):
-               """Remove any blockers from the digraph that do not match any of the
-               packages within the graph.  If necessary, create hard deps to ensure
-               correct merge order such that mutually blocking packages are never
-               installed simultaneously."""
-
-               if "--buildpkgonly" in self.myopts or \
-                       "--nodeps" in self.myopts:
-                       return True
-
-               #if "deep" in self.myparams:
-               if True:
-                       # Pull in blockers from all installed packages that haven't already
-                       # been pulled into the depgraph.  This is not enabled by default
-                       # due to the performance penalty that is incurred by all the
-                       # additional dep_check calls that are required.
-
-                       dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
-                       for myroot in self.trees:
-                               vardb = self.trees[myroot]["vartree"].dbapi
-                               portdb = self.trees[myroot]["porttree"].dbapi
-                               pkgsettings = self.pkgsettings[myroot]
-                               final_db = self.mydbapi[myroot]
-
-                               blocker_cache = BlockerCache(myroot, vardb)
-                               stale_cache = set(blocker_cache)
-                               for pkg in vardb:
-                                       cpv = pkg.cpv
-                                       stale_cache.discard(cpv)
-                                       pkg_in_graph = self.digraph.contains(pkg)
-
-                                       # Check for masked installed packages. Only warn about
-                                       # packages that are in the graph in order to avoid warning
-                                       # about those that will be automatically uninstalled during
-                                       # the merge process or by --depclean.
-                                       if pkg in final_db:
-                                               if pkg_in_graph and not visible(pkgsettings, pkg):
-                                                       self._masked_installed.add(pkg)
-
-                                       blocker_atoms = None
-                                       blockers = None
-                                       if pkg_in_graph:
-                                               blockers = []
-                                               try:
-                                                       blockers.extend(
-                                                               self._blocker_parents.child_nodes(pkg))
-                                               except KeyError:
-                                                       pass
-                                               try:
-                                                       blockers.extend(
-                                                               self._irrelevant_blockers.child_nodes(pkg))
-                                               except KeyError:
-                                                       pass
-                                       if blockers is not None:
-                                               blockers = set(str(blocker.atom) \
-                                                       for blocker in blockers)
-
-                                       # If this node has any blockers, create a "nomerge"
-                                       # node for it so that they can be enforced.
-                                       self.spinner.update()
-                                       blocker_data = blocker_cache.get(cpv)
-                                       if blocker_data is not None and \
-                                               blocker_data.counter != long(pkg.metadata["COUNTER"]):
-                                               blocker_data = None
-
-                                       # If blocker data from the graph is available, use
-                                       # it to validate the cache and update the cache if
-                                       # it seems invalid.
-                                       if blocker_data is not None and \
-                                               blockers is not None:
-                                               if not blockers.symmetric_difference(
-                                                       blocker_data.atoms):
-                                                       continue
-                                               blocker_data = None
-
-                                       if blocker_data is None and \
-                                               blockers is not None:
-                                               # Re-use the blockers from the graph.
-                                               blocker_atoms = sorted(blockers)
-                                               counter = long(pkg.metadata["COUNTER"])
-                                               blocker_data = \
-                                                       blocker_cache.BlockerData(counter, blocker_atoms)
-                                               blocker_cache[pkg.cpv] = blocker_data
-                                               continue
-
-                                       if blocker_data:
-                                               blocker_atoms = blocker_data.atoms
-                                       else:
-                                               # Use aux_get() to trigger FakeVartree global
-                                               # updates on *DEPEND when appropriate.
-                                               depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
-                                               # It is crucial to pass in final_db here in order to
-                                               # optimize dep_check calls by eliminating atoms via
-                                               # dep_wordreduce and dep_eval calls.
-                                               try:
-                                                       portage.dep._dep_check_strict = False
-                                                       try:
-                                                               success, atoms = portage.dep_check(depstr,
-                                                                       final_db, pkgsettings, myuse=pkg.use.enabled,
-                                                                       trees=self._graph_trees, myroot=myroot)
-                                                       except Exception, e:
-                                                               if isinstance(e, SystemExit):
-                                                                       raise
-                                                               # This is helpful, for example, if a ValueError
-                                                               # is thrown from cpv_expand due to multiple
-                                                               # matches (this can happen if an atom lacks a
-                                                               # category).
-                                                               show_invalid_depstring_notice(
-                                                                       pkg, depstr, str(e))
-                                                               del e
-                                                               raise
-                                               finally:
-                                                       portage.dep._dep_check_strict = True
-                                               if not success:
-                                                       replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
-                                                       if replacement_pkg and \
-                                                               replacement_pkg[0].operation == "merge":
-                                                               # This package is being replaced anyway, so
-                                                               # ignore invalid dependencies so as not to
-                                                               # annoy the user too much (otherwise they'd be
-                                                               # forced to manually unmerge it first).
-                                                               continue
-                                                       show_invalid_depstring_notice(pkg, depstr, atoms)
-                                                       return False
-                                               blocker_atoms = [myatom for myatom in atoms \
-                                                       if myatom.startswith("!")]
-                                               blocker_atoms.sort()
-                                               counter = long(pkg.metadata["COUNTER"])
-                                               blocker_cache[cpv] = \
-                                                       blocker_cache.BlockerData(counter, blocker_atoms)
-                                       if blocker_atoms:
-                                               try:
-                                                       for atom in blocker_atoms:
-                                                               blocker = Blocker(atom=portage.dep.Atom(atom),
-                                                                       eapi=pkg.metadata["EAPI"], root=myroot)
-                                                               self._blocker_parents.add(blocker, pkg)
-                                               except portage.exception.InvalidAtom, e:
-                                                       depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
-                                                       show_invalid_depstring_notice(
-                                                               pkg, depstr, "Invalid Atom: %s" % (e,))
-                                                       return False
-                               for cpv in stale_cache:
-                                       del blocker_cache[cpv]
-                               blocker_cache.flush()
-                               del blocker_cache
-
-               # Discard any "uninstall" tasks scheduled by previous calls
-               # to this method, since those tasks may not make sense given
-               # the current graph state.
-               previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
-               if previous_uninstall_tasks:
-                       self._blocker_uninstalls = digraph()
-                       self.digraph.difference_update(previous_uninstall_tasks)
-
-               for blocker in self._blocker_parents.leaf_nodes():
-                       self.spinner.update()
-                       root_config = self.roots[blocker.root]
-                       virtuals = root_config.settings.getvirtuals()
-                       myroot = blocker.root
-                       initial_db = self.trees[myroot]["vartree"].dbapi
-                       final_db = self.mydbapi[myroot]
-                       
-                       provider_virtual = False
-                       if blocker.cp in virtuals and \
-                               not self._have_new_virt(blocker.root, blocker.cp):
-                               provider_virtual = True
-
-                       # Use this to check PROVIDE for each matched package
-                       # when necessary.
-                       atom_set = InternalPackageSet(
-                               initial_atoms=[blocker.atom])
-
-                       if provider_virtual:
-                               atoms = []
-                               for provider_entry in virtuals[blocker.cp]:
-                                       provider_cp = \
-                                               portage.dep_getkey(provider_entry)
-                                       atoms.append(blocker.atom.replace(
-                                               blocker.cp, provider_cp))
-                       else:
-                               atoms = [blocker.atom]
-
-                       blocked_initial = set()
-                       for atom in atoms:
-                               for pkg in initial_db.match_pkgs(atom):
-                                       if atom_set.findAtomForPackage(pkg):
-                                               blocked_initial.add(pkg)
-
-                       blocked_final = set()
-                       for atom in atoms:
-                               for pkg in final_db.match_pkgs(atom):
-                                       if atom_set.findAtomForPackage(pkg):
-                                               blocked_final.add(pkg)
-
-                       if not blocked_initial and not blocked_final:
-                               parent_pkgs = self._blocker_parents.parent_nodes(blocker)
-                               self._blocker_parents.remove(blocker)
-                               # Discard any parents that don't have any more blockers.
-                               for pkg in parent_pkgs:
-                                       self._irrelevant_blockers.add(blocker, pkg)
-                                       if not self._blocker_parents.child_nodes(pkg):
-                                               self._blocker_parents.remove(pkg)
-                               continue
-                       for parent in self._blocker_parents.parent_nodes(blocker):
-                               unresolved_blocks = False
-                               depends_on_order = set()
-                               for pkg in blocked_initial:
-                                       if pkg.slot_atom == parent.slot_atom:
-                                               # TODO: Support blocks within slots in cases where it
-                                               # might make sense.  For example, a new version might
-                                               # require that the old version be uninstalled at build
-                                               # time.
-                                               continue
-                                       if parent.installed:
-                                               # Two currently installed packages conflict with
-                                               # eachother. Ignore this case since the damage
-                                               # is already done and this would be likely to
-                                               # confuse users if displayed like a normal blocker.
-                                               continue
-
-                                       self._blocked_pkgs.add(pkg, blocker)
-
-                                       if parent.operation == "merge":
-                                               # Maybe the blocked package can be replaced or simply
-                                               # unmerged to resolve this block.
-                                               depends_on_order.add((pkg, parent))
-                                               continue
-                                       # None of the above blocker resolutions techniques apply,
-                                       # so apparently this one is unresolvable.
-                                       unresolved_blocks = True
-                               for pkg in blocked_final:
-                                       if pkg.slot_atom == parent.slot_atom:
-                                               # TODO: Support blocks within slots.
-                                               continue
-                                       if parent.operation == "nomerge" and \
-                                               pkg.operation == "nomerge":
-                                               # This blocker will be handled the next time that a
-                                               # merge of either package is triggered.
-                                               continue
-
-                                       self._blocked_pkgs.add(pkg, blocker)
-
-                                       # Maybe the blocking package can be
-                                       # unmerged to resolve this block.
-                                       if parent.operation == "merge" and pkg.installed:
-                                               depends_on_order.add((pkg, parent))
-                                               continue
-                                       elif parent.operation == "nomerge":
-                                               depends_on_order.add((parent, pkg))
-                                               continue
-                                       # None of the above blocker resolutions techniques apply,
-                                       # so apparently this one is unresolvable.
-                                       unresolved_blocks = True
-
-                               # Make sure we don't unmerge any package that have been pulled
-                               # into the graph.
-                               if not unresolved_blocks and depends_on_order:
-                                       for inst_pkg, inst_task in depends_on_order:
-                                               if self.digraph.contains(inst_pkg) and \
-                                                       self.digraph.parent_nodes(inst_pkg):
-                                                       unresolved_blocks = True
-                                                       break
-
-                               if not unresolved_blocks and depends_on_order:
-                                       for inst_pkg, inst_task in depends_on_order:
-                                               uninst_task = Package(built=inst_pkg.built,
-                                                       cpv=inst_pkg.cpv, installed=inst_pkg.installed,
-                                                       metadata=inst_pkg.metadata,
-                                                       operation="uninstall",
-                                                       root_config=inst_pkg.root_config,
-                                                       type_name=inst_pkg.type_name)
-                                               self._pkg_cache[uninst_task] = uninst_task
-                                               # Enforce correct merge order with a hard dep.
-                                               self.digraph.addnode(uninst_task, inst_task,
-                                                       priority=BlockerDepPriority.instance)
-                                               # Count references to this blocker so that it can be
-                                               # invalidated after nodes referencing it have been
-                                               # merged.
-                                               self._blocker_uninstalls.addnode(uninst_task, blocker)
-                               if not unresolved_blocks and not depends_on_order:
-                                       self._irrelevant_blockers.add(blocker, parent)
-                                       self._blocker_parents.remove_edge(blocker, parent)
-                                       if not self._blocker_parents.parent_nodes(blocker):
-                                               self._blocker_parents.remove(blocker)
-                                       if not self._blocker_parents.child_nodes(parent):
-                                               self._blocker_parents.remove(parent)
-                               if unresolved_blocks:
-                                       self._unsolvable_blockers.add(blocker, parent)
-
-               return True
-
-       def _accept_blocker_conflicts(self):
-               acceptable = False
-               for x in ("--buildpkgonly", "--fetchonly",
-                       "--fetch-all-uri", "--nodeps"):
-                       if x in self.myopts:
-                               acceptable = True
-                               break
-               return acceptable
-
-       def _merge_order_bias(self, mygraph):
-               """
-               For optimal leaf node selection, promote deep system runtime deps and
-               order nodes from highest to lowest overall reference count.
-               """
-
-               node_info = {}
-               for node in mygraph.order:
-                       node_info[node] = len(mygraph.parent_nodes(node))
-               deep_system_deps = _find_deep_system_runtime_deps(mygraph)
-
-               def cmp_merge_preference(node1, node2):
-
-                       if node1.operation == 'uninstall':
-                               if node2.operation == 'uninstall':
-                                       return 0
-                               return 1
-
-                       if node2.operation == 'uninstall':
-                               if node1.operation == 'uninstall':
-                                       return 0
-                               return -1
-
-                       node1_sys = node1 in deep_system_deps
-                       node2_sys = node2 in deep_system_deps
-                       if node1_sys != node2_sys:
-                               if node1_sys:
-                                       return -1
-                               return 1
-
-                       return node_info[node2] - node_info[node1]
-
-               mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
-
-       def altlist(self, reversed=False):
-
-               while self._serialized_tasks_cache is None:
-                       self._resolve_conflicts()
-                       try:
-                               self._serialized_tasks_cache, self._scheduler_graph = \
-                                       self._serialize_tasks()
-                       except self._serialize_tasks_retry:
-                               pass
-
-               retlist = self._serialized_tasks_cache[:]
-               if reversed:
-                       retlist.reverse()
-               return retlist
-
-       def schedulerGraph(self):
-               """
-               The scheduler graph is identical to the normal one except that
-               uninstall edges are reversed in specific cases that require
-               conflicting packages to be temporarily installed simultaneously.
-               This is intended for use by the Scheduler in it's parallelization
-               logic. It ensures that temporary simultaneous installation of
-               conflicting packages is avoided when appropriate (especially for
-               !!atom blockers), but allowed in specific cases that require it.
-
-               Note that this method calls break_refs() which alters the state of
-               internal Package instances such that this depgraph instance should
-               not be used to perform any more calculations.
-               """
-               if self._scheduler_graph is None:
-                       self.altlist()
-               self.break_refs(self._scheduler_graph.order)
-               return self._scheduler_graph
-
-       def break_refs(self, nodes):
-               """
-               Take a mergelist like that returned from self.altlist() and
-               break any references that lead back to the depgraph. This is
-               useful if you want to hold references to packages without
-               also holding the depgraph on the heap.
-               """
-               for node in nodes:
-                       if hasattr(node, "root_config"):
-                               # The FakeVartree references the _package_cache which
-                               # references the depgraph. So that Package instances don't
-                               # hold the depgraph and FakeVartree on the heap, replace
-                               # the RootConfig that references the FakeVartree with the
-                               # original RootConfig instance which references the actual
-                               # vartree.
-                               node.root_config = \
-                                       self._trees_orig[node.root_config.root]["root_config"]
-
-       def _resolve_conflicts(self):
-               if not self._complete_graph():
-                       raise self._unknown_internal_error()
-
-               if not self.validate_blockers():
-                       raise self._unknown_internal_error()
-
-               if self._slot_collision_info:
-                       self._process_slot_conflicts()
-
-       def _serialize_tasks(self):
-
-               if "--debug" in self.myopts:
-                       writemsg("\ndigraph:\n\n", noiselevel=-1)
-                       self.digraph.debug_print()
-                       writemsg("\n", noiselevel=-1)
-
-               scheduler_graph = self.digraph.copy()
-
-               if '--nodeps' in self.myopts:
-                       # Preserve the package order given on the command line.
-                       return ([node for node in scheduler_graph \
-                               if isinstance(node, Package) \
-                               and node.operation == 'merge'], scheduler_graph)
-
-               mygraph=self.digraph.copy()
-               # Prune "nomerge" root nodes if nothing depends on them, since
-               # otherwise they slow down merge order calculation. Don't remove
-               # non-root nodes since they help optimize merge order in some cases
-               # such as revdep-rebuild.
-               removed_nodes = set()
-               while True:
-                       for node in mygraph.root_nodes():
-                               if not isinstance(node, Package) or \
-                                       node.installed or node.onlydeps:
-                                       removed_nodes.add(node)
-                       if removed_nodes:
-                               self.spinner.update()
-                               mygraph.difference_update(removed_nodes)
-                       if not removed_nodes:
-                               break
-                       removed_nodes.clear()
-               self._merge_order_bias(mygraph)
-               def cmp_circular_bias(n1, n2):
-                       """
-                       RDEPEND is stronger than PDEPEND and this function
-                       measures such a strength bias within a circular
-                       dependency relationship.
-                       """
-                       n1_n2_medium = n2 in mygraph.child_nodes(n1,
-                               ignore_priority=priority_range.ignore_medium_soft)
-                       n2_n1_medium = n1 in mygraph.child_nodes(n2,
-                               ignore_priority=priority_range.ignore_medium_soft)
-                       if n1_n2_medium == n2_n1_medium:
-                               return 0
-                       elif n1_n2_medium:
-                               return 1
-                       return -1
-               myblocker_uninstalls = self._blocker_uninstalls.copy()
-               retlist=[]
-               # Contains uninstall tasks that have been scheduled to
-               # occur after overlapping blockers have been installed.
-               scheduled_uninstalls = set()
-               # Contains any Uninstall tasks that have been ignored
-               # in order to avoid the circular deps code path. These
-               # correspond to blocker conflicts that could not be
-               # resolved.
-               ignored_uninstall_tasks = set()
-               have_uninstall_task = False
-               complete = "complete" in self.myparams
-               asap_nodes = []
-
-               def get_nodes(**kwargs):
-                       """
-                       Returns leaf nodes excluding Uninstall instances
-                       since those should be executed as late as possible.
-                       """
-                       return [node for node in mygraph.leaf_nodes(**kwargs) \
-                               if isinstance(node, Package) and \
-                                       (node.operation != "uninstall" or \
-                                       node in scheduled_uninstalls)]
-
-               # sys-apps/portage needs special treatment if ROOT="/"
-               running_root = self._running_root.root
-               from portage.const import PORTAGE_PACKAGE_ATOM
-               runtime_deps = InternalPackageSet(
-                       initial_atoms=[PORTAGE_PACKAGE_ATOM])
-               running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
-                       PORTAGE_PACKAGE_ATOM)
-               replacement_portage = self.mydbapi[running_root].match_pkgs(
-                       PORTAGE_PACKAGE_ATOM)
-
-               if running_portage:
-                       running_portage = running_portage[0]
-               else:
-                       running_portage = None
-
-               if replacement_portage:
-                       replacement_portage = replacement_portage[0]
-               else:
-                       replacement_portage = None
-
-               if replacement_portage == running_portage:
-                       replacement_portage = None
-
-               if replacement_portage is not None:
-                       # update from running_portage to replacement_portage asap
-                       asap_nodes.append(replacement_portage)
-
-               if running_portage is not None:
-                       try:
-                               portage_rdepend = self._select_atoms_highest_available(
-                                       running_root, running_portage.metadata["RDEPEND"],
-                                       myuse=running_portage.use.enabled,
-                                       parent=running_portage, strict=False)
-                       except portage.exception.InvalidDependString, e:
-                               portage.writemsg("!!! Invalid RDEPEND in " + \
-                                       "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
-                                       (running_root, running_portage.cpv, e), noiselevel=-1)
-                               del e
-                               portage_rdepend = []
-                       runtime_deps.update(atom for atom in portage_rdepend \
-                               if not atom.startswith("!"))
-
-               def gather_deps(ignore_priority, mergeable_nodes,
-                       selected_nodes, node):
-                       """
-                       Recursively gather a group of nodes that RDEPEND on
-                       eachother. This ensures that they are merged as a group
-                       and get their RDEPENDs satisfied as soon as possible.
-                       """
-                       if node in selected_nodes:
-                               return True
-                       if node not in mergeable_nodes:
-                               return False
-                       if node == replacement_portage and \
-                               mygraph.child_nodes(node,
-                               ignore_priority=priority_range.ignore_medium_soft):
-                               # Make sure that portage always has all of it's
-                               # RDEPENDs installed first.
-                               return False
-                       selected_nodes.add(node)
-                       for child in mygraph.child_nodes(node,
-                               ignore_priority=ignore_priority):
-                               if not gather_deps(ignore_priority,
-                                       mergeable_nodes, selected_nodes, child):
-                                       return False
-                       return True
-
-               def ignore_uninst_or_med(priority):
-                       if priority is BlockerDepPriority.instance:
-                               return True
-                       return priority_range.ignore_medium(priority)
-
-               def ignore_uninst_or_med_soft(priority):
-                       if priority is BlockerDepPriority.instance:
-                               return True
-                       return priority_range.ignore_medium_soft(priority)
-
-               tree_mode = "--tree" in self.myopts
-               # Tracks whether or not the current iteration should prefer asap_nodes
-               # if available.  This is set to False when the previous iteration
-               # failed to select any nodes.  It is reset whenever nodes are
-               # successfully selected.
-               prefer_asap = True
-
-               # Controls whether or not the current iteration should drop edges that
-               # are "satisfied" by installed packages, in order to solve circular
-               # dependencies. The deep runtime dependencies of installed packages are
-               # not checked in this case (bug #199856), so it must be avoided
-               # whenever possible.
-               drop_satisfied = False
-
-               # State of variables for successive iterations that loosen the
-               # criteria for node selection.
-               #
-               # iteration   prefer_asap   drop_satisfied
-               # 1           True          False
-               # 2           False         False
-               # 3           False         True
-               #
-               # If no nodes are selected on the last iteration, it is due to
-               # unresolved blockers or circular dependencies.
-
-               while not mygraph.empty():
-                       self.spinner.update()
-                       selected_nodes = None
-                       ignore_priority = None
-                       if drop_satisfied or (prefer_asap and asap_nodes):
-                               priority_range = DepPrioritySatisfiedRange
-                       else:
-                               priority_range = DepPriorityNormalRange
-                       if prefer_asap and asap_nodes:
-                               # ASAP nodes are merged before their soft deps. Go ahead and
-                               # select root nodes here if necessary, since it's typical for
-                               # the parent to have been removed from the graph already.
-                               asap_nodes = [node for node in asap_nodes \
-                                       if mygraph.contains(node)]
-                               for node in asap_nodes:
-                                       if not mygraph.child_nodes(node,
-                                               ignore_priority=priority_range.ignore_soft):
-                                               selected_nodes = [node]
-                                               asap_nodes.remove(node)
-                                               break
-                       if not selected_nodes and \
-                               not (prefer_asap and asap_nodes):
-                               for i in xrange(priority_range.NONE,
-                                       priority_range.MEDIUM_SOFT + 1):
-                                       ignore_priority = priority_range.ignore_priority[i]
-                                       nodes = get_nodes(ignore_priority=ignore_priority)
-                                       if nodes:
-                                               # If there is a mix of uninstall nodes with other
-                                               # types, save the uninstall nodes for later since
-                                               # sometimes a merge node will render an uninstall
-                                               # node unnecessary (due to occupying the same slot),
-                                               # and we want to avoid executing a separate uninstall
-                                               # task in that case.
-                                               if len(nodes) > 1:
-                                                       good_uninstalls = []
-                                                       with_some_uninstalls_excluded = []
-                                                       for node in nodes:
-                                                               if node.operation == "uninstall":
-                                                                       slot_node = self.mydbapi[node.root
-                                                                               ].match_pkgs(node.slot_atom)
-                                                                       if slot_node and \
-                                                                               slot_node[0].operation == "merge":
-                                                                               continue
-                                                                       good_uninstalls.append(node)
-                                                               with_some_uninstalls_excluded.append(node)
-                                                       if good_uninstalls:
-                                                               nodes = good_uninstalls
-                                                       elif with_some_uninstalls_excluded:
-                                                               nodes = with_some_uninstalls_excluded
-                                                       else:
-                                                               nodes = nodes
-
-                                               if ignore_priority is None and not tree_mode:
-                                                       # Greedily pop all of these nodes since no
-                                                       # relationship has been ignored. This optimization
-                                                       # destroys --tree output, so it's disabled in tree
-                                                       # mode.
-                                                       selected_nodes = nodes
-                                               else:
-                                                       # For optimal merge order:
-                                                       #  * Only pop one node.
-                                                       #  * Removing a root node (node without a parent)
-                                                       #    will not produce a leaf node, so avoid it.
-                                                       #  * It's normal for a selected uninstall to be a
-                                                       #    root node, so don't check them for parents.
-                                                       for node in nodes:
-                                                               if node.operation == "uninstall" or \
-                                                                       mygraph.parent_nodes(node):
-                                                                       selected_nodes = [node]
-                                                                       break
-
-                                               if selected_nodes:
-                                                       break
-
-                       if not selected_nodes:
-                               nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
-                               if nodes:
-                                       mergeable_nodes = set(nodes)
-                                       if prefer_asap and asap_nodes:
-                                               nodes = asap_nodes
-                                       for i in xrange(priority_range.SOFT,
-                                               priority_range.MEDIUM_SOFT + 1):
-                                               ignore_priority = priority_range.ignore_priority[i]
-                                               for node in nodes:
-                                                       if not mygraph.parent_nodes(node):
-                                                               continue
-                                                       selected_nodes = set()
-                                                       if gather_deps(ignore_priority,
-                                                               mergeable_nodes, selected_nodes, node):
-                                                               break
-                                                       else:
-                                                               selected_nodes = None
-                                               if selected_nodes:
-                                                       break
-
-                                       if prefer_asap and asap_nodes and not selected_nodes:
-                                               # We failed to find any asap nodes to merge, so ignore
-                                               # them for the next iteration.
-                                               prefer_asap = False
-                                               continue
-
-                       if selected_nodes and ignore_priority is not None:
-                               # Try to merge ignored medium_soft deps as soon as possible
-                               # if they're not satisfied by installed packages.
-                               for node in selected_nodes:
-                                       children = set(mygraph.child_nodes(node))
-                                       soft = children.difference(
-                                               mygraph.child_nodes(node,
-                                               ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
-                                       medium_soft = children.difference(
-                                               mygraph.child_nodes(node,
-                                                       ignore_priority = \
-                                                       DepPrioritySatisfiedRange.ignore_medium_soft))
-                                       medium_soft.difference_update(soft)
-                                       for child in medium_soft:
-                                               if child in selected_nodes:
-                                                       continue
-                                               if child in asap_nodes:
-                                                       continue
-                                               asap_nodes.append(child)
-
-                       if selected_nodes and len(selected_nodes) > 1:
-                               if not isinstance(selected_nodes, list):
-                                       selected_nodes = list(selected_nodes)
-                               selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
-
-                       if not selected_nodes and not myblocker_uninstalls.is_empty():
-                               # An Uninstall task needs to be executed in order to
-                               # avoid conflict if possible.
-
-                               if drop_satisfied:
-                                       priority_range = DepPrioritySatisfiedRange
-                               else:
-                                       priority_range = DepPriorityNormalRange
-
-                               mergeable_nodes = get_nodes(
-                                       ignore_priority=ignore_uninst_or_med)
-
-                               min_parent_deps = None
-                               uninst_task = None
-                               for task in myblocker_uninstalls.leaf_nodes():
-                                       # Do some sanity checks so that system or world packages
-                                       # don't get uninstalled inappropriately here (only really
-                                       # necessary when --complete-graph has not been enabled).
-
-                                       if task in ignored_uninstall_tasks:
-                                               continue
-
-                                       if task in scheduled_uninstalls:
-                                               # It's been scheduled but it hasn't
-                                               # been executed yet due to dependence
-                                               # on installation of blocking packages.
-                                               continue
-
-                                       root_config = self.roots[task.root]
-                                       inst_pkg = self._pkg_cache[
-                                               ("installed", task.root, task.cpv, "nomerge")]
-
-                                       if self.digraph.contains(inst_pkg):
-                                               continue
-
-                                       forbid_overlap = False
-                                       heuristic_overlap = False
-                                       for blocker in myblocker_uninstalls.parent_nodes(task):
-                                               if blocker.eapi in ("0", "1"):
-                                                       heuristic_overlap = True
-                                               elif blocker.atom.blocker.overlap.forbid:
-                                                       forbid_overlap = True
-                                                       break
-                                       if forbid_overlap and running_root == task.root:
-                                               continue
-
-                                       if heuristic_overlap and running_root == task.root:
-                                               # Never uninstall sys-apps/portage or it's essential
-                                               # dependencies, except through replacement.
-                                               try:
-                                                       runtime_dep_atoms = \
-                                                               list(runtime_deps.iterAtomsForPackage(task))
-                                               except portage.exception.InvalidDependString, e:
-                                                       portage.writemsg("!!! Invalid PROVIDE in " + \
-                                                               "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
-                                                               (task.root, task.cpv, e), noiselevel=-1)
-                                                       del e
-                                                       continue
-
-                                               # Don't uninstall a runtime dep if it appears
-                                               # to be the only suitable one installed.
-                                               skip = False
-                                               vardb = root_config.trees["vartree"].dbapi
-                                               for atom in runtime_dep_atoms:
-                                                       other_version = None
-                                                       for pkg in vardb.match_pkgs(atom):
-                                                               if pkg.cpv == task.cpv and \
-                                                                       pkg.metadata["COUNTER"] == \
-                                                                       task.metadata["COUNTER"]:
-                                                                       continue
-                                                               other_version = pkg
-                                                               break
-                                                       if other_version is None:
-                                                               skip = True
-                                                               break
-                                               if skip:
-                                                       continue
-
-                                               # For packages in the system set, don't take
-                                               # any chances. If the conflict can't be resolved
-                                               # by a normal replacement operation then abort.
-                                               skip = False
-                                               try:
-                                                       for atom in root_config.sets[
-                                                               "system"].iterAtomsForPackage(task):
-                                                               skip = True
-                                                               break
-                                               except portage.exception.InvalidDependString, e:
-                                                       portage.writemsg("!!! Invalid PROVIDE in " + \
-                                                               "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
-                                                               (task.root, task.cpv, e), noiselevel=-1)
-                                                       del e
-                                                       skip = True
-                                               if skip:
-                                                       continue
-
-                                       # Note that the world check isn't always
-                                       # necessary since self._complete_graph() will
-                                       # add all packages from the system and world sets to the
-                                       # graph. This just allows unresolved conflicts to be
-                                       # detected as early as possible, which makes it possible
-                                       # to avoid calling self._complete_graph() when it is
-                                       # unnecessary due to blockers triggering an abortion.
-                                       if not complete:
-                                               # For packages in the world set, go ahead an uninstall
-                                               # when necessary, as long as the atom will be satisfied
-                                               # in the final state.
-                                               graph_db = self.mydbapi[task.root]
-                                               skip = False
-                                               try:
-                                                       for atom in root_config.sets[
-                                                               "world"].iterAtomsForPackage(task):
-                                                               satisfied = False
-                                                               for pkg in graph_db.match_pkgs(atom):
-                                                                       if pkg == inst_pkg:
-                                                                               continue
-                                                                       satisfied = True
-                                                                       break
-                                                               if not satisfied:
-                                                                       skip = True
-                                                                       self._blocked_world_pkgs[inst_pkg] = atom
-                                                                       break
-                                               except portage.exception.InvalidDependString, e:
-                                                       portage.writemsg("!!! Invalid PROVIDE in " + \
-                                                               "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
-                                                               (task.root, task.cpv, e), noiselevel=-1)
-                                                       del e
-                                                       skip = True
-                                               if skip:
-                                                       continue
-
-                                       # Check the deps of parent nodes to ensure that
-                                       # the chosen task produces a leaf node. Maybe
-                                       # this can be optimized some more to make the
-                                       # best possible choice, but the current algorithm
-                                       # is simple and should be near optimal for most
-                                       # common cases.
-                                       mergeable_parent = False
-                                       parent_deps = set()
-                                       for parent in mygraph.parent_nodes(task):
-                                               parent_deps.update(mygraph.child_nodes(parent,
-                                                       ignore_priority=priority_range.ignore_medium_soft))
-                                               if parent in mergeable_nodes and \
-                                                       gather_deps(ignore_uninst_or_med_soft,
-                                                       mergeable_nodes, set(), parent):
-                                                       mergeable_parent = True
-
-                                       if not mergeable_parent:
-                                               continue
-
-                                       parent_deps.remove(task)
-                                       if min_parent_deps is None or \
-                                               len(parent_deps) < min_parent_deps:
-                                               min_parent_deps = len(parent_deps)
-                                               uninst_task = task
-
-                               if uninst_task is not None:
-                                       # The uninstall is performed only after blocking
-                                       # packages have been merged on top of it. File
-                                       # collisions between blocking packages are detected
-                                       # and removed from the list of files to be uninstalled.
-                                       scheduled_uninstalls.add(uninst_task)
-                                       parent_nodes = mygraph.parent_nodes(uninst_task)
-
-                                       # Reverse the parent -> uninstall edges since we want
-                                       # to do the uninstall after blocking packages have
-                                       # been merged on top of it.
-                                       mygraph.remove(uninst_task)
-                                       for blocked_pkg in parent_nodes:
-                                               mygraph.add(blocked_pkg, uninst_task,
-                                                       priority=BlockerDepPriority.instance)
-                                               scheduler_graph.remove_edge(uninst_task, blocked_pkg)
-                                               scheduler_graph.add(blocked_pkg, uninst_task,
-                                                       priority=BlockerDepPriority.instance)
-
-                                       # Reset the state variables for leaf node selection and
-                                       # continue trying to select leaf nodes.
-                                       prefer_asap = True
-                                       drop_satisfied = False
-                                       continue
-
-                       if not selected_nodes:
-                               # Only select root nodes as a last resort. This case should
-                               # only trigger when the graph is nearly empty and the only
-                               # remaining nodes are isolated (no parents or children). Since
-                               # the nodes must be isolated, ignore_priority is not needed.
-                               selected_nodes = get_nodes()
-
-                       if not selected_nodes and not drop_satisfied:
-                               drop_satisfied = True
-                               continue
-
-                       if not selected_nodes and not myblocker_uninstalls.is_empty():
-                               # If possible, drop an uninstall task here in order to avoid
-                               # the circular deps code path. The corresponding blocker will
-                               # still be counted as an unresolved conflict.
-                               uninst_task = None
-                               for node in myblocker_uninstalls.leaf_nodes():
-                                       try:
-                                               mygraph.remove(node)
-                                       except KeyError:
-                                               pass
-                                       else:
-                                               uninst_task = node
-                                               ignored_uninstall_tasks.add(node)
-                                               break
-
-                               if uninst_task is not None:
-                                       # Reset the state variables for leaf node selection and
-                                       # continue trying to select leaf nodes.
-                                       prefer_asap = True
-                                       drop_satisfied = False
-                                       continue
-
-                       if not selected_nodes:
-                               self._circular_deps_for_display = mygraph
-                               raise self._unknown_internal_error()
-
-                       # At this point, we've succeeded in selecting one or more nodes, so
-                       # reset state variables for leaf node selection.
-                       prefer_asap = True
-                       drop_satisfied = False
-
-                       mygraph.difference_update(selected_nodes)
-
-                       for node in selected_nodes:
-                               if isinstance(node, Package) and \
-                                       node.operation == "nomerge":
-                                       continue
-
-                               # Handle interactions between blockers
-                               # and uninstallation tasks.
-                               solved_blockers = set()
-                               uninst_task = None
-                               if isinstance(node, Package) and \
-                                       "uninstall" == node.operation:
-                                       have_uninstall_task = True
-                                       uninst_task = node
-                               else:
-                                       vardb = self.trees[node.root]["vartree"].dbapi
-                                       previous_cpv = vardb.match(node.slot_atom)
-                                       if previous_cpv:
-                                               # The package will be replaced by this one, so remove
-                                               # the corresponding Uninstall task if necessary.
-                                               previous_cpv = previous_cpv[0]
-                                               uninst_task = \
-                                                       ("installed", node.root, previous_cpv, "uninstall")
-                                               try:
-                                                       mygraph.remove(uninst_task)
-                                               except KeyError:
-                                                       pass
-
-                               if uninst_task is not None and \
-                                       uninst_task not in ignored_uninstall_tasks and \
-                                       myblocker_uninstalls.contains(uninst_task):
-                                       blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
-                                       myblocker_uninstalls.remove(uninst_task)
-                                       # Discard any blockers that this Uninstall solves.
-                                       for blocker in blocker_nodes:
-                                               if not myblocker_uninstalls.child_nodes(blocker):
-                                                       myblocker_uninstalls.remove(blocker)
-                                                       solved_blockers.add(blocker)
-
-                               retlist.append(node)
-
-                               if (isinstance(node, Package) and \
-                                       "uninstall" == node.operation) or \
-                                       (uninst_task is not None and \
-                                       uninst_task in scheduled_uninstalls):
-                                       # Include satisfied blockers in the merge list
-                                       # since the user might be interested and also
-                                       # it serves as an indicator that blocking packages
-                                       # will be temporarily installed simultaneously.
-                                       for blocker in solved_blockers:
-                                               retlist.append(Blocker(atom=blocker.atom,
-                                                       root=blocker.root, eapi=blocker.eapi,
-                                                       satisfied=True))
-
-               unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
-               for node in myblocker_uninstalls.root_nodes():
-                       unsolvable_blockers.add(node)
-
-               for blocker in unsolvable_blockers:
-                       retlist.append(blocker)
-
-               # If any Uninstall tasks need to be executed in order
-               # to avoid a conflict, complete the graph with any
-               # dependencies that may have been initially
-               # neglected (to ensure that unsafe Uninstall tasks
-               # are properly identified and blocked from execution).
-               if have_uninstall_task and \
-                       not complete and \
-                       not unsolvable_blockers:
-                       self.myparams.add("complete")
-                       raise self._serialize_tasks_retry("")
-
-               if unsolvable_blockers and \
-                       not self._accept_blocker_conflicts():
-                       self._unsatisfied_blockers_for_display = unsolvable_blockers
-                       self._serialized_tasks_cache = retlist[:]
-                       self._scheduler_graph = scheduler_graph
-                       raise self._unknown_internal_error()
-
-               if self._slot_collision_info and \
-                       not self._accept_blocker_conflicts():
-                       self._serialized_tasks_cache = retlist[:]
-                       self._scheduler_graph = scheduler_graph
-                       raise self._unknown_internal_error()
-
-               return retlist, scheduler_graph
-
-       def _show_circular_deps(self, mygraph):
-               # No leaf nodes are available, so we have a circular
-               # dependency panic situation.  Reduce the noise level to a
-               # minimum via repeated elimination of root nodes since they
-               # have no parents and thus can not be part of a cycle.
-               while True:
-                       root_nodes = mygraph.root_nodes(
-                               ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
-                       if not root_nodes:
-                               break
-                       mygraph.difference_update(root_nodes)
-               # Display the USE flags that are enabled on nodes that are part
-               # of dependency cycles in case that helps the user decide to
-               # disable some of them.
-               display_order = []
-               tempgraph = mygraph.copy()
-               while not tempgraph.empty():
-                       nodes = tempgraph.leaf_nodes()
-                       if not nodes:
-                               node = tempgraph.order[0]
-                       else:
-                               node = nodes[0]
-                       display_order.append(node)
-                       tempgraph.remove(node)
-               display_order.reverse()
-               self.myopts.pop("--quiet", None)
-               self.myopts.pop("--verbose", None)
-               self.myopts["--tree"] = True
-               portage.writemsg("\n\n", noiselevel=-1)
-               self.display(display_order)
-               prefix = colorize("BAD", " * ")
-               portage.writemsg("\n", noiselevel=-1)
-               portage.writemsg(prefix + "Error: circular dependencies:\n",
-                       noiselevel=-1)
-               portage.writemsg("\n", noiselevel=-1)
-               mygraph.debug_print()
-               portage.writemsg("\n", noiselevel=-1)
-               portage.writemsg(prefix + "Note that circular dependencies " + \
-                       "can often be avoided by temporarily\n", noiselevel=-1)
-               portage.writemsg(prefix + "disabling USE flags that trigger " + \
-                       "optional dependencies.\n", noiselevel=-1)
-
-       def _show_merge_list(self):
-               if self._serialized_tasks_cache is not None and \
-                       not (self._displayed_list and \
-                       (self._displayed_list == self._serialized_tasks_cache or \
-                       self._displayed_list == \
-                               list(reversed(self._serialized_tasks_cache)))):
-                       display_list = self._serialized_tasks_cache[:]
-                       if "--tree" in self.myopts:
-                               display_list.reverse()
-                       self.display(display_list)
-
-       def _show_unsatisfied_blockers(self, blockers):
-               self._show_merge_list()
-               msg = "Error: The above package list contains " + \
-                       "packages which cannot be installed " + \
-                       "at the same time on the same system."
-               prefix = colorize("BAD", " * ")
-               from textwrap import wrap
-               portage.writemsg("\n", noiselevel=-1)
-               for line in wrap(msg, 70):
-                       portage.writemsg(prefix + line + "\n", noiselevel=-1)
-
-               # Display the conflicting packages along with the packages
-               # that pulled them in. This is helpful for troubleshooting
-               # cases in which blockers don't solve automatically and
-               # the reasons are not apparent from the normal merge list
-               # display.
-
-               conflict_pkgs = {}
-               for blocker in blockers:
-                       for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
-                               self._blocker_parents.parent_nodes(blocker)):
-                               parent_atoms = self._parent_atoms.get(pkg)
-                               if not parent_atoms:
-                                       atom = self._blocked_world_pkgs.get(pkg)
-                                       if atom is not None:
-                                               parent_atoms = set([("@world", atom)])
-                               if parent_atoms:
-                                       conflict_pkgs[pkg] = parent_atoms
-
-               if conflict_pkgs:
-                       # Reduce noise by pruning packages that are only
-                       # pulled in by other conflict packages.
-                       pruned_pkgs = set()
-                       for pkg, parent_atoms in conflict_pkgs.iteritems():
-                               relevant_parent = False
-                               for parent, atom in parent_atoms:
-                                       if parent not in conflict_pkgs:
-                                               relevant_parent = True
-                                               break
-                               if not relevant_parent:
-                                       pruned_pkgs.add(pkg)
-                       for pkg in pruned_pkgs:
-                               del conflict_pkgs[pkg]
-
-               if conflict_pkgs:
-                       msg = []
-                       msg.append("\n")
-                       indent = "  "
-                       # Max number of parents shown, to avoid flooding the display.
-                       max_parents = 3
-                       for pkg, parent_atoms in conflict_pkgs.iteritems():
-
-                               pruned_list = set()
-
-                               # Prefer packages that are not directly involved in a conflict.
-                               for parent_atom in parent_atoms:
-                                       if len(pruned_list) >= max_parents:
-                                               break
-                                       parent, atom = parent_atom
-                                       if parent not in conflict_pkgs:
-                                               pruned_list.add(parent_atom)
-
-                               for parent_atom in parent_atoms:
-                                       if len(pruned_list) >= max_parents:
-                                               break
-                                       pruned_list.add(parent_atom)
-
-                               omitted_parents = len(parent_atoms) - len(pruned_list)
-                               msg.append(indent + "%s pulled in by\n" % pkg)
-
-                               for parent_atom in pruned_list:
-                                       parent, atom = parent_atom
-                                       msg.append(2*indent)
-                                       if isinstance(parent,
-                                               (PackageArg, AtomArg)):
-                                               # For PackageArg and AtomArg types, it's
-                                               # redundant to display the atom attribute.
-                                               msg.append(str(parent))
-                                       else:
-                                               # Display the specific atom from SetArg or
-                                               # Package types.
-                                               msg.append("%s required by %s" % (atom, parent))
-                                       msg.append("\n")
-
-                               if omitted_parents:
-                                       msg.append(2*indent)
-                                       msg.append("(and %d more)\n" % omitted_parents)
-
-                               msg.append("\n")
-
-                       sys.stderr.write("".join(msg))
-                       sys.stderr.flush()
-
-               if "--quiet" not in self.myopts:
-                       show_blocker_docs_link()
-
-       def display(self, mylist, favorites=[], verbosity=None):
-
-               # This is used to prevent display_problems() from
-               # redundantly displaying this exact same merge list
-               # again via _show_merge_list().
-               self._displayed_list = mylist
-
-               if verbosity is None:
-                       verbosity = ("--quiet" in self.myopts and 1 or \
-                               "--verbose" in self.myopts and 3 or 2)
-               favorites_set = InternalPackageSet(favorites)
-               oneshot = "--oneshot" in self.myopts or \
-                       "--onlydeps" in self.myopts
-               columns = "--columns" in self.myopts
-               changelogs=[]
-               p=[]
-               blockers = []
-
-               counters = PackageCounters()
-
-               if verbosity == 1 and "--verbose" not in self.myopts:
-                       def create_use_string(*args):
-                               return ""
-               else:
-                       def create_use_string(name, cur_iuse, iuse_forced, cur_use,
-                               old_iuse, old_use,
-                               is_new, reinst_flags,
-                               all_flags=(verbosity == 3 or "--quiet" in self.myopts),
-                               alphabetical=("--alphabetical" in self.myopts)):
-                               enabled = []
-                               if alphabetical:
-                                       disabled = enabled
-                                       removed = enabled
-                               else:
-                                       disabled = []
-                                       removed = []
-                               cur_iuse = set(cur_iuse)
-                               enabled_flags = cur_iuse.intersection(cur_use)
-                               removed_iuse = set(old_iuse).difference(cur_iuse)
-                               any_iuse = cur_iuse.union(old_iuse)
-                               any_iuse = list(any_iuse)
-                               any_iuse.sort()
-                               for flag in any_iuse:
-                                       flag_str = None
-                                       isEnabled = False
-                                       reinst_flag = reinst_flags and flag in reinst_flags
-                                       if flag in enabled_flags:
-                                               isEnabled = True
-                                               if is_new or flag in old_use and \
-                                                       (all_flags or reinst_flag):
-                                                       flag_str = red(flag)
-                                               elif flag not in old_iuse:
-                                                       flag_str = yellow(flag) + "%*"
-                                               elif flag not in old_use:
-                                                       flag_str = green(flag) + "*"
-                                       elif flag in removed_iuse:
-                                               if all_flags or reinst_flag:
-                                                       flag_str = yellow("-" + flag) + "%"
-                                                       if flag in old_use:
-                                                               flag_str += "*"
-                                                       flag_str = "(" + flag_str + ")"
-                                                       removed.append(flag_str)
-                                               continue
-                                       else:
-                                               if is_new or flag in old_iuse and \
-                                                       flag not in old_use and \
-                                                       (all_flags or reinst_flag):
-                                                       flag_str = blue("-" + flag)
-                                               elif flag not in old_iuse:
-                                                       flag_str = yellow("-" + flag)
-                                                       if flag not in iuse_forced:
-                                                               flag_str += "%"
-                                               elif flag in old_use:
-                                                       flag_str = green("-" + flag) + "*"
-                                       if flag_str:
-                                               if flag in iuse_forced:
-                                                       flag_str = "(" + flag_str + ")"
-                                               if isEnabled:
-                                                       enabled.append(flag_str)
-                                               else:
-                                                       disabled.append(flag_str)
-
-                               if alphabetical:
-                                       ret = " ".join(enabled)
-                               else:
-                                       ret = " ".join(enabled + disabled + removed)
-                               if ret:
-                                       ret = '%s="%s" ' % (name, ret)
-                               return ret
-
-               repo_display = RepoDisplay(self.roots)
-
-               tree_nodes = []
-               display_list = []
-               mygraph = self.digraph.copy()
-
-               # If there are any Uninstall instances, add the corresponding
-               # blockers to the digraph (useful for --tree display).
-
-               executed_uninstalls = set(node for node in mylist \
-                       if isinstance(node, Package) and node.operation == "unmerge")
-
-               for uninstall in self._blocker_uninstalls.leaf_nodes():
-                       uninstall_parents = \
-                               self._blocker_uninstalls.parent_nodes(uninstall)
-                       if not uninstall_parents:
-                               continue
-
-                       # Remove the corresponding "nomerge" node and substitute
-                       # the Uninstall node.
-                       inst_pkg = self._pkg_cache[
-                               ("installed", uninstall.root, uninstall.cpv, "nomerge")]
-                       try:
-                               mygraph.remove(inst_pkg)
-                       except KeyError:
-                               pass
-
-                       try:
-                               inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
-                       except KeyError:
-                               inst_pkg_blockers = []
-
-                       # Break the Package -> Uninstall edges.
-                       mygraph.remove(uninstall)
-
-                       # Resolution of a package's blockers
-                       # depend on it's own uninstallation.
-                       for blocker in inst_pkg_blockers:
-                               mygraph.add(uninstall, blocker)
-
-                       # Expand Package -> Uninstall edges into
-                       # Package -> Blocker -> Uninstall edges.
-                       for blocker in uninstall_parents:
-                               mygraph.add(uninstall, blocker)
-                               for parent in self._blocker_parents.parent_nodes(blocker):
-                                       if parent != inst_pkg:
-                                               mygraph.add(blocker, parent)
-
-                       # If the uninstall task did not need to be executed because
-                       # of an upgrade, display Blocker -> Upgrade edges since the
-                       # corresponding Blocker -> Uninstall edges will not be shown.
-                       upgrade_node = \
-                               self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
-                       if upgrade_node is not None and \
-                               uninstall not in executed_uninstalls:
-                               for blocker in uninstall_parents:
-                                       mygraph.add(upgrade_node, blocker)
-
-               unsatisfied_blockers = []
-               i = 0
-               depth = 0
-               shown_edges = set()
-               for x in mylist:
-                       if isinstance(x, Blocker) and not x.satisfied:
-                               unsatisfied_blockers.append(x)
-                               continue
-                       graph_key = x
-                       if "--tree" in self.myopts:
-                               depth = len(tree_nodes)
-                               while depth and graph_key not in \
-                                       mygraph.child_nodes(tree_nodes[depth-1]):
-                                               depth -= 1
-                               if depth:
-                                       tree_nodes = tree_nodes[:depth]
-                                       tree_nodes.append(graph_key)
-                                       display_list.append((x, depth, True))
-                                       shown_edges.add((graph_key, tree_nodes[depth-1]))
-                               else:
-                                       traversed_nodes = set() # prevent endless circles
-                                       traversed_nodes.add(graph_key)
-                                       def add_parents(current_node, ordered):
-                                               parent_nodes = None
-                                               # Do not traverse to parents if this node is an
-                                               # an argument or a direct member of a set that has
-                                               # been specified as an argument (system or world).
-                                               if current_node not in self._set_nodes:
-                                                       parent_nodes = mygraph.parent_nodes(current_node)
-                                               if parent_nodes:
-                                                       child_nodes = set(mygraph.child_nodes(current_node))
-                                                       selected_parent = None
-                                                       # First, try to avoid a direct cycle.
-                                                       for node in parent_nodes:
-                                                               if not isinstance(node, (Blocker, Package)):
-                                                                       continue
-                                                               if node not in traversed_nodes and \
-                                                                       node not in child_nodes:
-                                                                       edge = (current_node, node)
-                                                                       if edge in shown_edges:
-                                                                               continue
-                                                                       selected_parent = node
-                                                                       break
-                                                       if not selected_parent:
-                                                               # A direct cycle is unavoidable.
-                                                               for node in parent_nodes:
-                                                                       if not isinstance(node, (Blocker, Package)):
-                                                                               continue
-                                                                       if node not in traversed_nodes:
-                                                                               edge = (current_node, node)
-                                                                               if edge in shown_edges:
-                                                                                       continue
-                                                                               selected_parent = node
-                                                                               break
-                                                       if selected_parent:
-                                                               shown_edges.add((current_node, selected_parent))
-                                                               traversed_nodes.add(selected_parent)
-                                                               add_parents(selected_parent, False)
-                                               display_list.append((current_node,
-                                                       len(tree_nodes), ordered))
-                                               tree_nodes.append(current_node)
-                                       tree_nodes = []
-                                       add_parents(graph_key, True)
-                       else:
-                               display_list.append((x, depth, True))
-               mylist = display_list
-               for x in unsatisfied_blockers:
-                       mylist.append((x, 0, True))
-
-               last_merge_depth = 0
-               for i in xrange(len(mylist)-1,-1,-1):
-                       graph_key, depth, ordered = mylist[i]
-                       if not ordered and depth == 0 and i > 0 \
-                               and graph_key == mylist[i-1][0] and \
-                               mylist[i-1][1] == 0:
-                               # An ordered node got a consecutive duplicate when the tree was
-                               # being filled in.
-                               del mylist[i]
-                               continue
-                       if ordered and graph_key[-1] != "nomerge":
-                               last_merge_depth = depth
-                               continue
-                       if depth >= last_merge_depth or \
-                               i < len(mylist) - 1 and \
-                               depth >= mylist[i+1][1]:
-                                       del mylist[i]
-
-               from portage import flatten
-               from portage.dep import use_reduce, paren_reduce
-               # files to fetch list - avoids counting a same file twice
-               # in size display (verbose mode)
-               myfetchlist=[]
-
-               # Use this set to detect when all the "repoadd" strings are "[0]"
-               # and disable the entire repo display in this case.
-               repoadd_set = set()
-
-               for mylist_index in xrange(len(mylist)):
-                       x, depth, ordered = mylist[mylist_index]
-                       pkg_type = x[0]
-                       myroot = x[1]
-                       pkg_key = x[2]
-                       portdb = self.trees[myroot]["porttree"].dbapi
-                       bindb  = self.trees[myroot]["bintree"].dbapi
-                       vardb = self.trees[myroot]["vartree"].dbapi
-                       vartree = self.trees[myroot]["vartree"]
-                       pkgsettings = self.pkgsettings[myroot]
-
-                       fetch=" "
-                       indent = " " * depth
-
-                       if isinstance(x, Blocker):
-                               if x.satisfied:
-                                       blocker_style = "PKG_BLOCKER_SATISFIED"
-                                       addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
-                               else:
-                                       blocker_style = "PKG_BLOCKER"
-                                       addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
-                               if ordered:
-                                       counters.blocks += 1
-                                       if x.satisfied:
-                                               counters.blocks_satisfied += 1
-                               resolved = portage.key_expand(
-                                       str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
-                               if "--columns" in self.myopts and "--quiet" in self.myopts:
-                                       addl += " " + colorize(blocker_style, resolved)
-                               else:
-                                       addl = "[%s %s] %s%s" % \
-                                               (colorize(blocker_style, "blocks"),
-                                               addl, indent, colorize(blocker_style, resolved))
-                               block_parents = self._blocker_parents.parent_nodes(x)
-                               block_parents = set([pnode[2] for pnode in block_parents])
-                               block_parents = ", ".join(block_parents)
-                               if resolved!=x[2]:
-                                       addl += colorize(blocker_style,
-                                               " (\"%s\" is blocking %s)") % \
-                                               (str(x.atom).lstrip("!"), block_parents)
-                               else:
-                                       addl += colorize(blocker_style,
-                                               " (is blocking %s)") % block_parents
-                               if isinstance(x, Blocker) and x.satisfied:
-                                       if columns:
-                                               continue
-                                       p.append(addl)
-                               else:
-                                       blockers.append(addl)
-                       else:
-                               pkg_status = x[3]
-                               pkg_merge = ordered and pkg_status == "merge"
-                               if not pkg_merge and pkg_status == "merge":
-                                       pkg_status = "nomerge"
-                               built = pkg_type != "ebuild"
-                               installed = pkg_type == "installed"
-                               pkg = x
-                               metadata = pkg.metadata
-                               ebuild_path = None
-                               repo_name = metadata["repository"]
-                               if pkg_type == "ebuild":
-                                       ebuild_path = portdb.findname(pkg_key)
-                                       if not ebuild_path: # shouldn't happen
-                                               raise portage.exception.PackageNotFound(pkg_key)
-                                       repo_path_real = os.path.dirname(os.path.dirname(
-                                               os.path.dirname(ebuild_path)))
-                               else:
-                                       repo_path_real = portdb.getRepositoryPath(repo_name)
-                               pkg_use = list(pkg.use.enabled)
-                               try:
-                                       restrict = flatten(use_reduce(paren_reduce(
-                                               pkg.metadata["RESTRICT"]), uselist=pkg_use))
-                               except portage.exception.InvalidDependString, e:
-                                       if not pkg.installed:
-                                               show_invalid_depstring_notice(x,
-                                                       pkg.metadata["RESTRICT"], str(e))
-                                               del e
-                                               return 1
-                                       restrict = []
-                               if "ebuild" == pkg_type and x[3] != "nomerge" and \
-                                       "fetch" in restrict:
-                                       fetch = red("F")
-                                       if ordered:
-                                               counters.restrict_fetch += 1
-                                       if portdb.fetch_check(pkg_key, pkg_use):
-                                               fetch = green("f")
-                                               if ordered:
-                                                       counters.restrict_fetch_satisfied += 1
-
-                               #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
-                               #param is used for -u, where you still *do* want to see when something is being upgraded.
-                               myoldbest = []
-                               myinslotlist = None
-                               installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
-                               if vardb.cpv_exists(pkg_key):
-                                       addl="  "+yellow("R")+fetch+"  "
-                                       if ordered:
-                                               if pkg_merge:
-                                                       counters.reinst += 1
-                                               elif pkg_status == "uninstall":
-                                                       counters.uninst += 1
-                               # filter out old-style virtual matches
-                               elif installed_versions and \
-                                       portage.cpv_getkey(installed_versions[0]) == \
-                                       portage.cpv_getkey(pkg_key):
-                                       myinslotlist = vardb.match(pkg.slot_atom)
-                                       # If this is the first install of a new-style virtual, we
-                                       # need to filter out old-style virtual matches.
-                                       if myinslotlist and \
-                                               portage.cpv_getkey(myinslotlist[0]) != \
-                                               portage.cpv_getkey(pkg_key):
-                                               myinslotlist = None
-                                       if myinslotlist:
-                                               myoldbest = myinslotlist[:]
-                                               addl = "   " + fetch
-                                               if not portage.dep.cpvequal(pkg_key,
-                                                       portage.best([pkg_key] + myoldbest)):
-                                                       # Downgrade in slot
-                                                       addl += turquoise("U")+blue("D")
-                                                       if ordered:
-                                                               counters.downgrades += 1
-                                               else:
-                                                       # Update in slot
-                                                       addl += turquoise("U") + " "
-                                                       if ordered:
-                                                               counters.upgrades += 1
-                                       else:
-                                               # New slot, mark it new.
-                                               addl = " " + green("NS") + fetch + "  "
-                                               myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
-                                               if ordered:
-                                                       counters.newslot += 1
-
-                                       if "--changelog" in self.myopts:
-                                               inst_matches = vardb.match(pkg.slot_atom)
-                                               if inst_matches:
-                                                       changelogs.extend(self.calc_changelog(
-                                                               portdb.findname(pkg_key),
-                                                               inst_matches[0], pkg_key))
-                               else:
-                                       addl = " " + green("N") + " " + fetch + "  "
-                                       if ordered:
-                                               counters.new += 1
-
-                               verboseadd = ""
-                               repoadd = None
-
-                               if True:
-                                       # USE flag display
-                                       forced_flags = set()
-                                       pkgsettings.setcpv(pkg) # for package.use.{mask,force}
-                                       forced_flags.update(pkgsettings.useforce)
-                                       forced_flags.update(pkgsettings.usemask)
-
-                                       cur_use = [flag for flag in pkg.use.enabled \
-                                               if flag in pkg.iuse.all]
-                                       cur_iuse = sorted(pkg.iuse.all)
-
-                                       if myoldbest and myinslotlist:
-                                               previous_cpv = myoldbest[0]
-                                       else:
-                                               previous_cpv = pkg.cpv
-                                       if vardb.cpv_exists(previous_cpv):
-                                               old_iuse, old_use = vardb.aux_get(
-                                                               previous_cpv, ["IUSE", "USE"])
-                                               old_iuse = list(set(
-                                                       filter_iuse_defaults(old_iuse.split())))
-                                               old_iuse.sort()
-                                               old_use = old_use.split()
-                                               is_new = False
-                                       else:
-                                               old_iuse = []
-                                               old_use = []
-                                               is_new = True
-
-                                       old_use = [flag for flag in old_use if flag in old_iuse]
-
-                                       use_expand = pkgsettings["USE_EXPAND"].lower().split()
-                                       use_expand.sort()
-                                       use_expand.reverse()
-                                       use_expand_hidden = \
-                                               pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
-
-                                       def map_to_use_expand(myvals, forcedFlags=False,
-                                               removeHidden=True):
-                                               ret = {}
-                                               forced = {}
-                                               for exp in use_expand:
-                                                       ret[exp] = []
-                                                       forced[exp] = set()
-                                                       for val in myvals[:]:
-                                                               if val.startswith(exp.lower()+"_"):
-                                                                       if val in forced_flags:
-                                                                               forced[exp].add(val[len(exp)+1:])
-                                                                       ret[exp].append(val[len(exp)+1:])
-                                                                       myvals.remove(val)
-                                               ret["USE"] = myvals
-                                               forced["USE"] = [val for val in myvals \
-                                                       if val in forced_flags]
-                                               if removeHidden:
-                                                       for exp in use_expand_hidden:
-                                                               ret.pop(exp, None)
-                                               if forcedFlags:
-                                                       return ret, forced
-                                               return ret
-
-                                       # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
-                                       # are the only thing that triggered reinstallation.
-                                       reinst_flags_map = {}
-                                       reinstall_for_flags = self._reinstall_nodes.get(pkg)
-                                       reinst_expand_map = None
-                                       if reinstall_for_flags:
-                                               reinst_flags_map = map_to_use_expand(
-                                                       list(reinstall_for_flags), removeHidden=False)
-                                               for k in list(reinst_flags_map):
-                                                       if not reinst_flags_map[k]:
-                                                               del reinst_flags_map[k]
-                                               if not reinst_flags_map.get("USE"):
-                                                       reinst_expand_map = reinst_flags_map.copy()
-                                                       reinst_expand_map.pop("USE", None)
-                                       if reinst_expand_map and \
-                                               not set(reinst_expand_map).difference(
-                                               use_expand_hidden):
-                                               use_expand_hidden = \
-                                                       set(use_expand_hidden).difference(
-                                                       reinst_expand_map)
-
-                                       cur_iuse_map, iuse_forced = \
-                                               map_to_use_expand(cur_iuse, forcedFlags=True)
-                                       cur_use_map = map_to_use_expand(cur_use)
-                                       old_iuse_map = map_to_use_expand(old_iuse)
-                                       old_use_map = map_to_use_expand(old_use)
-
-                                       use_expand.sort()
-                                       use_expand.insert(0, "USE")
-                                       
-                                       for key in use_expand:
-                                               if key in use_expand_hidden:
-                                                       continue
-                                               verboseadd += create_use_string(key.upper(),
-                                                       cur_iuse_map[key], iuse_forced[key],
-                                                       cur_use_map[key], old_iuse_map[key],
-                                                       old_use_map[key], is_new,
-                                                       reinst_flags_map.get(key))
-
-                               if verbosity == 3:
-                                       # size verbose
-                                       mysize=0
-                                       if pkg_type == "ebuild" and pkg_merge:
-                                               try:
-                                                       myfilesdict = portdb.getfetchsizes(pkg_key,
-                                                               useflags=pkg_use, debug=self.edebug)
-                                               except portage.exception.InvalidDependString, e:
-                                                       src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
-                                                       show_invalid_depstring_notice(x, src_uri, str(e))
-                                                       del e
-                                                       return 1
-                                               if myfilesdict is None:
-                                                       myfilesdict="[empty/missing/bad digest]"
-                                               else:
-                                                       for myfetchfile in myfilesdict:
-                                                               if myfetchfile not in myfetchlist:
-                                                                       mysize+=myfilesdict[myfetchfile]
-                                                                       myfetchlist.append(myfetchfile)
-                                                       if ordered:
-                                                               counters.totalsize += mysize
-                                               verboseadd += format_size(mysize)
-
-                                       # overlay verbose
-                                       # assign index for a previous version in the same slot
-                                       has_previous = False
-                                       repo_name_prev = None
-                                       slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
-                                               metadata["SLOT"])
-                                       slot_matches = vardb.match(slot_atom)
-                                       if slot_matches:
-                                               has_previous = True
-                                               repo_name_prev = vardb.aux_get(slot_matches[0],
-                                                       ["repository"])[0]
-
-                                       # now use the data to generate output
-                                       if pkg.installed or not has_previous:
-                                               repoadd = repo_display.repoStr(repo_path_real)
-                                       else:
-                                               repo_path_prev = None
-                                               if repo_name_prev:
-                                                       repo_path_prev = portdb.getRepositoryPath(
-                                                               repo_name_prev)
-                                               if repo_path_prev == repo_path_real:
-                                                       repoadd = repo_display.repoStr(repo_path_real)
-                                               else:
-                                                       repoadd = "%s=>%s" % (
-                                                               repo_display.repoStr(repo_path_prev),
-                                                               repo_display.repoStr(repo_path_real))
-                                       if repoadd:
-                                               repoadd_set.add(repoadd)
-
-                               xs = [portage.cpv_getkey(pkg_key)] + \
-                                       list(portage.catpkgsplit(pkg_key)[2:])
-                               if xs[2] == "r0":
-                                       xs[2] = ""
-                               else:
-                                       xs[2] = "-" + xs[2]
-
-                               mywidth = 130
-                               if "COLUMNWIDTH" in self.settings:
-                                       try:
-                                               mywidth = int(self.settings["COLUMNWIDTH"])
-                                       except ValueError, e:
-                                               portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
-                                               portage.writemsg(
-                                                       "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
-                                                       self.settings["COLUMNWIDTH"], noiselevel=-1)
-                                               del e
-                               oldlp = mywidth - 30
-                               newlp = oldlp - 30
-
-                               # Convert myoldbest from a list to a string.
-                               if not myoldbest:
-                                       myoldbest = ""
-                               else:
-                                       for pos, key in enumerate(myoldbest):
-                                               key = portage.catpkgsplit(key)[2] + \
-                                                       "-" + portage.catpkgsplit(key)[3]
-                                               if key[-3:] == "-r0":
-                                                       key = key[:-3]
-                                               myoldbest[pos] = key
-                                       myoldbest = blue("["+", ".join(myoldbest)+"]")
-
-                               pkg_cp = xs[0]
-                               root_config = self.roots[myroot]
-                               system_set = root_config.sets["system"]
-                               world_set  = root_config.sets["world"]
-
-                               pkg_system = False
-                               pkg_world = False
-                               try:
-                                       pkg_system = system_set.findAtomForPackage(pkg)
-                                       pkg_world  = world_set.findAtomForPackage(pkg)
-                                       if not (oneshot or pkg_world) and \
-                                               myroot == self.target_root and \
-                                               favorites_set.findAtomForPackage(pkg):
-                                               # Maybe it will be added to world now.
-                                               if create_world_atom(pkg, favorites_set, root_config):
-                                                       pkg_world = True
-                               except portage.exception.InvalidDependString:
-                                       # This is reported elsewhere if relevant.
-                                       pass
-
-                               def pkgprint(pkg_str):
-                                       if pkg_merge:
-                                               if pkg_system:
-                                                       return colorize("PKG_MERGE_SYSTEM", pkg_str)
-                                               elif pkg_world:
-                                                       return colorize("PKG_MERGE_WORLD", pkg_str)
-                                               else:
-                                                       return colorize("PKG_MERGE", pkg_str)
-                                       elif pkg_status == "uninstall":
-                                               return colorize("PKG_UNINSTALL", pkg_str)
-                                       else:
-                                               if pkg_system:
-                                                       return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
-                                               elif pkg_world:
-                                                       return colorize("PKG_NOMERGE_WORLD", pkg_str)
-                                               else:
-                                                       return colorize("PKG_NOMERGE", pkg_str)
-
-                               try:
-                                       properties = flatten(use_reduce(paren_reduce(
-                                               pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
-                               except portage.exception.InvalidDependString, e:
-                                       if not pkg.installed:
-                                               show_invalid_depstring_notice(pkg,
-                                                       pkg.metadata["PROPERTIES"], str(e))
-                                               del e
-                                               return 1
-                                       properties = []
-                               interactive = "interactive" in properties
-                               if interactive and pkg.operation == "merge":
-                                       addl = colorize("WARN", "I") + addl[1:]
-                                       if ordered:
-                                               counters.interactive += 1
-
-                               if x[1]!="/":
-                                       if myoldbest:
-                                               myoldbest +=" "
-                                       if "--columns" in self.myopts:
-                                               if "--quiet" in self.myopts:
-                                                       myprint=addl+" "+indent+pkgprint(pkg_cp)
-                                                       myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
-                                                       myprint=myprint+myoldbest
-                                                       myprint=myprint+darkgreen("to "+x[1])
-                                                       verboseadd = None
-                                               else:
-                                                       if not pkg_merge:
-                                                               myprint = "[%s] %s%s" % \
-                                                                       (pkgprint(pkg_status.ljust(13)),
-                                                                       indent, pkgprint(pkg.cp))
-                                                       else:
-                                                               myprint = "[%s %s] %s%s" % \
-                                                                       (pkgprint(pkg.type_name), addl,
-                                                                       indent, pkgprint(pkg.cp))
-                                                       if (newlp-nc_len(myprint)) > 0:
-                                                               myprint=myprint+(" "*(newlp-nc_len(myprint)))
-                                                       myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
-                                                       if (oldlp-nc_len(myprint)) > 0:
-                                                               myprint=myprint+" "*(oldlp-nc_len(myprint))
-                                                       myprint=myprint+myoldbest
-                                                       myprint += darkgreen("to " + pkg.root)
-                                       else:
-                                               if not pkg_merge:
-                                                       myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
-                                               else:
-                                                       myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
-                                               myprint += indent + pkgprint(pkg_key) + " " + \
-                                                       myoldbest + darkgreen("to " + myroot)
-                               else:
-                                       if "--columns" in self.myopts:
-                                               if "--quiet" in self.myopts:
-                                                       myprint=addl+" "+indent+pkgprint(pkg_cp)
-                                                       myprint=myprint+" "+green(xs[1]+xs[2])+" "
-                                                       myprint=myprint+myoldbest
-                                                       verboseadd = None
-                                               else:
-                                                       if not pkg_merge:
-                                                               myprint = "[%s] %s%s" % \
-                                                                       (pkgprint(pkg_status.ljust(13)),
-                                                                       indent, pkgprint(pkg.cp))
-                                                       else:
-                                                               myprint = "[%s %s] %s%s" % \
-                                                                       (pkgprint(pkg.type_name), addl,
-                                                                       indent, pkgprint(pkg.cp))
-                                                       if (newlp-nc_len(myprint)) > 0:
-                                                               myprint=myprint+(" "*(newlp-nc_len(myprint)))
-                                                       myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
-                                                       if (oldlp-nc_len(myprint)) > 0:
-                                                               myprint=myprint+(" "*(oldlp-nc_len(myprint)))
-                                                       myprint += myoldbest
-                                       else:
-                                               if not pkg_merge:
-                                                       myprint = "[%s] %s%s %s" % \
-                                                               (pkgprint(pkg_status.ljust(13)),
-                                                               indent, pkgprint(pkg.cpv),
-                                                               myoldbest)
-                                               else:
-                                                       myprint = "[%s %s] %s%s %s" % \
-                                                               (pkgprint(pkg_type), addl, indent,
-                                                               pkgprint(pkg.cpv), myoldbest)
-
-                               if columns and pkg.operation == "uninstall":
-                                       continue
-                               p.append((myprint, verboseadd, repoadd))
-
-                               if "--tree" not in self.myopts and \
-                                       "--quiet" not in self.myopts and \
-                                       not self._opts_no_restart.intersection(self.myopts) and \
-                                       pkg.root == self._running_root.root and \
-                                       portage.match_from_list(
-                                       portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
-                                       not vardb.cpv_exists(pkg.cpv) and \
-                                       "--quiet" not in self.myopts:
-                                               if mylist_index < len(mylist) - 1:
-                                                       p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
-                                                       p.append(colorize("WARN", "    then resume the merge."))
-
-               out = sys.stdout
-               show_repos = repoadd_set and repoadd_set != set(["0"])
-
-               for x in p:
-                       if isinstance(x, basestring):
-                               out.write("%s\n" % (x,))
-                               continue
-
-                       myprint, verboseadd, repoadd = x
-
-                       if verboseadd:
-                               myprint += " " + verboseadd
-
-                       if show_repos and repoadd:
-                               myprint += " " + teal("[%s]" % repoadd)
-
-                       out.write("%s\n" % (myprint,))
-
-               for x in blockers:
-                       print x
-
-               if verbosity == 3:
-                       print
-                       print counters
-                       if show_repos:
-                               sys.stdout.write(str(repo_display))
-
-               if "--changelog" in self.myopts:
-                       print
-                       for revision,text in changelogs:
-                               print bold('*'+revision)
-                               sys.stdout.write(text)
-
-               sys.stdout.flush()
-               return os.EX_OK
-
-       def display_problems(self):
-               """
-               Display problems with the dependency graph such as slot collisions.
-               This is called internally by display() to show the problems _after_
-               the merge list where it is most likely to be seen, but if display()
-               is not going to be called then this method should be called explicitly
-               to ensure that the user is notified of problems with the graph.
-
-               All output goes to stderr, except for unsatisfied dependencies which
-               go to stdout for parsing by programs such as autounmask.
-               """
-
-               # Note that show_masked_packages() sends it's output to
-               # stdout, and some programs such as autounmask parse the
-               # output in cases when emerge bails out. However, when
-               # show_masked_packages() is called for installed packages
-               # here, the message is a warning that is more appropriate
-               # to send to stderr, so temporarily redirect stdout to
-               # stderr. TODO: Fix output code so there's a cleaner way
-               # to redirect everything to stderr.
-               sys.stdout.flush()
-               sys.stderr.flush()
-               stdout = sys.stdout
-               try:
-                       sys.stdout = sys.stderr
-                       self._display_problems()
-               finally:
-                       sys.stdout = stdout
-                       sys.stdout.flush()
-                       sys.stderr.flush()
-
-               # This goes to stdout for parsing by programs like autounmask.
-               for pargs, kwargs in self._unsatisfied_deps_for_display:
-                       self._show_unsatisfied_dep(*pargs, **kwargs)
-
-       def _display_problems(self):
-               if self._circular_deps_for_display is not None:
-                       self._show_circular_deps(
-                               self._circular_deps_for_display)
-
-               # The user is only notified of a slot conflict if
-               # there are no unresolvable blocker conflicts.
-               if self._unsatisfied_blockers_for_display is not None:
-                       self._show_unsatisfied_blockers(
-                               self._unsatisfied_blockers_for_display)
-               else:
-                       self._show_slot_collision_notice()
-
-               # TODO: Add generic support for "set problem" handlers so that
-               # the below warnings aren't special cases for world only.
-
-               if self._missing_args:
-                       world_problems = False
-                       if "world" in self._sets:
-                               # Filter out indirect members of world (from nested sets)
-                               # since only direct members of world are desired here.
-                               world_set = self.roots[self.target_root].sets["world"]
-                               for arg, atom in self._missing_args:
-                                       if arg.name == "world" and atom in world_set:
-                                               world_problems = True
-                                               break
-
-                       if world_problems:
-                               sys.stderr.write("\n!!! Problems have been " + \
-                                       "detected with your world file\n")
-                               sys.stderr.write("!!! Please run " + \
-                                       green("emaint --check world")+"\n\n")
-
-               if self._missing_args:
-                       sys.stderr.write("\n" + colorize("BAD", "!!!") + \
-                               " Ebuilds for the following packages are either all\n")
-                       sys.stderr.write(colorize("BAD", "!!!") + \
-                               " masked or don't exist:\n")
-                       sys.stderr.write(" ".join(str(atom) for arg, atom in \
-                               self._missing_args) + "\n")
-
-               if self._pprovided_args:
-                       arg_refs = {}
-                       for arg, atom in self._pprovided_args:
-                               if isinstance(arg, SetArg):
-                                       parent = arg.name
-                                       arg_atom = (atom, atom)
-                               else:
-                                       parent = "args"
-                                       arg_atom = (arg.arg, atom)
-                               refs = arg_refs.setdefault(arg_atom, [])
-                               if parent not in refs:
-                                       refs.append(parent)
-                       msg = []
-                       msg.append(bad("\nWARNING: "))
-                       if len(self._pprovided_args) > 1:
-                               msg.append("Requested packages will not be " + \
-                                       "merged because they are listed in\n")
-                       else:
-                               msg.append("A requested package will not be " + \
-                                       "merged because it is listed in\n")
-                       msg.append("package.provided:\n\n")
-                       problems_sets = set()
-                       for (arg, atom), refs in arg_refs.iteritems():
-                               ref_string = ""
-                               if refs:
-                                       problems_sets.update(refs)
-                                       refs.sort()
-                                       ref_string = ", ".join(["'%s'" % name for name in refs])
-                                       ref_string = " pulled in by " + ref_string
-                               msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
-                       msg.append("\n")
-                       if "world" in problems_sets:
-                               msg.append("This problem can be solved in one of the following ways:\n\n")
-                               msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
-                               msg.append("  B) Uninstall offending packages (cleans them from world).\n")
-                               msg.append("  C) Remove offending entries from package.provided.\n\n")
-                               msg.append("The best course of action depends on the reason that an offending\n")
-                               msg.append("package.provided entry exists.\n\n")
-                       sys.stderr.write("".join(msg))
-
-               masked_packages = []
-               for pkg in self._masked_installed:
-                       root_config = pkg.root_config
-                       pkgsettings = self.pkgsettings[pkg.root]
-                       mreasons = get_masking_status(pkg, pkgsettings, root_config)
-                       masked_packages.append((root_config, pkgsettings,
-                               pkg.cpv, pkg.metadata, mreasons))
-               if masked_packages:
-                       sys.stderr.write("\n" + colorize("BAD", "!!!") + \
-                               " The following installed packages are masked:\n")
-                       show_masked_packages(masked_packages)
-                       show_mask_docs()
-                       print
-
-       def calc_changelog(self,ebuildpath,current,next):
-               if ebuildpath == None or not os.path.exists(ebuildpath):
-                       return []
-               current = '-'.join(portage.catpkgsplit(current)[1:])
-               if current.endswith('-r0'):
-                       current = current[:-3]
-               next = '-'.join(portage.catpkgsplit(next)[1:])
-               if next.endswith('-r0'):
-                       next = next[:-3]
-               changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
-               try:
-                       changelog = open(changelogpath).read()
-               except SystemExit, e:
-                       raise # Needed else can't exit
-               except:
-                       return []
-               divisions = self.find_changelog_tags(changelog)
-               #print 'XX from',current,'to',next
-               #for div,text in divisions: print 'XX',div
-               # skip entries for all revisions above the one we are about to emerge
-               for i in range(len(divisions)):
-                       if divisions[i][0]==next:
-                               divisions = divisions[i:]
-                               break
-               # find out how many entries we are going to display
-               for i in range(len(divisions)):
-                       if divisions[i][0]==current:
-                               divisions = divisions[:i]
-                               break
-               else:
-                   # couldnt find the current revision in the list. display nothing
-                       return []
-               return divisions
-
-       def find_changelog_tags(self,changelog):
-               divs = []
-               release = None
-               while 1:
-                       match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
-                       if match is None:
-                               if release is not None:
-                                       divs.append((release,changelog))
-                               return divs
-                       if release is not None:
-                               divs.append((release,changelog[:match.start()]))
-                       changelog = changelog[match.end():]
-                       release = match.group(1)
-                       if release.endswith('.ebuild'):
-                               release = release[:-7]
-                       if release.endswith('-r0'):
-                               release = release[:-3]
-
-       def saveNomergeFavorites(self):
-               """Find atoms in favorites that are not in the mergelist and add them
-               to the world file if necessary."""
-               for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
-                       "--oneshot", "--onlydeps", "--pretend"):
-                       if x in self.myopts:
-                               return
-               root_config = self.roots[self.target_root]
-               world_set = root_config.sets["world"]
-
-               world_locked = False
-               if hasattr(world_set, "lock"):
-                       world_set.lock()
-                       world_locked = True
-
-               if hasattr(world_set, "load"):
-                       world_set.load() # maybe it's changed on disk
-
-               args_set = self._sets["args"]
-               portdb = self.trees[self.target_root]["porttree"].dbapi
-               added_favorites = set()
-               for x in self._set_nodes:
-                       pkg_type, root, pkg_key, pkg_status = x
-                       if pkg_status != "nomerge":
-                               continue
-
-                       try:
-                               myfavkey = create_world_atom(x, args_set, root_config)
-                               if myfavkey:
-                                       if myfavkey in added_favorites:
-                                               continue
-                                       added_favorites.add(myfavkey)
-                       except portage.exception.InvalidDependString, e:
-                               writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
-                                       (pkg_key, str(e)), noiselevel=-1)
-                               writemsg("!!! see '%s'\n\n" % os.path.join(
-                                       root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
-                               del e
-               all_added = []
-               for k in self._sets:
-                       if k in ("args", "world") or not root_config.sets[k].world_candidate:
-                               continue
-                       s = SETPREFIX + k
-                       if s in world_set:
-                               continue
-                       all_added.append(SETPREFIX + k)
-               all_added.extend(added_favorites)
-               all_added.sort()
-               for a in all_added:
-                       print ">>> Recording %s in \"world\" favorites file..." % \
-                               colorize("INFORM", str(a))
-               if all_added:
-                       world_set.update(all_added)
-
-               if world_locked:
-                       world_set.unlock()
-
-       def loadResumeCommand(self, resume_data, skip_masked=True,
-               skip_missing=True):
-               """
-               Add a resume command to the graph and validate it in the process.  This
-               will raise a PackageNotFound exception if a package is not available.
-               """
-
-               if not isinstance(resume_data, dict):
-                       return False
-
-               mergelist = resume_data.get("mergelist")
-               if not isinstance(mergelist, list):
-                       mergelist = []
-
-               fakedb = self.mydbapi
-               trees = self.trees
-               serialized_tasks = []
-               masked_tasks = []
-               for x in mergelist:
-                       if not (isinstance(x, list) and len(x) == 4):
-                               continue
-                       pkg_type, myroot, pkg_key, action = x
-                       if pkg_type not in self.pkg_tree_map:
-                               continue
-                       if action != "merge":
-                               continue
-                       tree_type = self.pkg_tree_map[pkg_type]
-                       mydb = trees[myroot][tree_type].dbapi
-                       db_keys = list(self._trees_orig[myroot][
-                               tree_type].dbapi._aux_cache_keys)
-                       try:
-                               metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
-                       except KeyError:
-                               # It does no exist or it is corrupt.
-                               if action == "uninstall":
-                                       continue
-                               if skip_missing:
-                                       # TODO: log these somewhere
-                                       continue
-                               raise portage.exception.PackageNotFound(pkg_key)
-                       installed = action == "uninstall"
-                       built = pkg_type != "ebuild"
-                       root_config = self.roots[myroot]
-                       pkg = Package(built=built, cpv=pkg_key,
-                               installed=installed, metadata=metadata,
-                               operation=action, root_config=root_config,
-                               type_name=pkg_type)
-                       if pkg_type == "ebuild":
-                               pkgsettings = self.pkgsettings[myroot]
-                               pkgsettings.setcpv(pkg)
-                               pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
-                               pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
-                       self._pkg_cache[pkg] = pkg
-
-                       root_config = self.roots[pkg.root]
-                       if "merge" == pkg.operation and \
-                               not visible(root_config.settings, pkg):
-                               if skip_masked:
-                                       masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
-                               else:
-                                       self._unsatisfied_deps_for_display.append(
-                                               ((pkg.root, "="+pkg.cpv), {"myparent":None}))
-
-                       fakedb[myroot].cpv_inject(pkg)
-                       serialized_tasks.append(pkg)
-                       self.spinner.update()
-
-               if self._unsatisfied_deps_for_display:
-                       return False
-
-               if not serialized_tasks or "--nodeps" in self.myopts:
-                       self._serialized_tasks_cache = serialized_tasks
-                       self._scheduler_graph = self.digraph
-               else:
-                       self._select_package = self._select_pkg_from_graph
-                       self.myparams.add("selective")
-                       # Always traverse deep dependencies in order to account for
-                       # potentially unsatisfied dependencies of installed packages.
-                       # This is necessary for correct --keep-going or --resume operation
-                       # in case a package from a group of circularly dependent packages
-                       # fails. In this case, a package which has recently been installed
-                       # may have an unsatisfied circular dependency (pulled in by
-                       # PDEPEND, for example). So, even though a package is already
-                       # installed, it may not have all of it's dependencies satisfied, so
-                       # it may not be usable. If such a package is in the subgraph of
-                       # deep depenedencies of a scheduled build, that build needs to
-                       # be cancelled. In order for this type of situation to be
-                       # recognized, deep traversal of dependencies is required.
-                       self.myparams.add("deep")
-
-                       favorites = resume_data.get("favorites")
-                       args_set = self._sets["args"]
-                       if isinstance(favorites, list):
-                               args = self._load_favorites(favorites)
-                       else:
-                               args = []
-
-                       for task in serialized_tasks:
-                               if isinstance(task, Package) and \
-                                       task.operation == "merge":
-                                       if not self._add_pkg(task, None):
-                                               return False
-
-                       # Packages for argument atoms need to be explicitly
-                       # added via _add_pkg() so that they are included in the
-                       # digraph (needed at least for --tree display).
-                       for arg in args:
-                               for atom in arg.set:
-                                       pkg, existing_node = self._select_package(
-                                               arg.root_config.root, atom)
-                                       if existing_node is None and \
-                                               pkg is not None:
-                                               if not self._add_pkg(pkg, Dependency(atom=atom,
-                                                       root=pkg.root, parent=arg)):
-                                                       return False
-
-                       # Allow unsatisfied deps here to avoid showing a masking
-                       # message for an unsatisfied dep that isn't necessarily
-                       # masked.
-                       if not self._create_graph(allow_unsatisfied=True):
-                               return False
-
-                       unsatisfied_deps = []
-                       for dep in self._unsatisfied_deps:
-                               if not isinstance(dep.parent, Package):
-                                       continue
-                               if dep.parent.operation == "merge":
-                                       unsatisfied_deps.append(dep)
-                                       continue
-
-                               # For unsatisfied deps of installed packages, only account for
-                               # them if they are in the subgraph of dependencies of a package
-                               # which is scheduled to be installed.
-                               unsatisfied_install = False
-                               traversed = set()
-                               dep_stack = self.digraph.parent_nodes(dep.parent)
-                               while dep_stack:
-                                       node = dep_stack.pop()
-                                       if not isinstance(node, Package):
-                                               continue
-                                       if node.operation == "merge":
-                                               unsatisfied_install = True
-                                               break
-                                       if node in traversed:
-                                               continue
-                                       traversed.add(node)
-                                       dep_stack.extend(self.digraph.parent_nodes(node))
-
-                               if unsatisfied_install:
-                                       unsatisfied_deps.append(dep)
-
-                       if masked_tasks or unsatisfied_deps:
-                               # This probably means that a required package
-                               # was dropped via --skipfirst. It makes the
-                               # resume list invalid, so convert it to a
-                               # UnsatisfiedResumeDep exception.
-                               raise self.UnsatisfiedResumeDep(self,
-                                       masked_tasks + unsatisfied_deps)
-                       self._serialized_tasks_cache = None
-                       try:
-                               self.altlist()
-                       except self._unknown_internal_error:
-                               return False
-
-               return True
-
-       def _load_favorites(self, favorites):
-               """
-               Use a list of favorites to resume state from a
-               previous select_files() call. This creates similar
-               DependencyArg instances to those that would have
-               been created by the original select_files() call.
-               This allows Package instances to be matched with
-               DependencyArg instances during graph creation.
-               """
-               root_config = self.roots[self.target_root]
-               getSetAtoms = root_config.setconfig.getSetAtoms
-               sets = root_config.sets
-               args = []
-               for x in favorites:
-                       if not isinstance(x, basestring):
-                               continue
-                       if x in ("system", "world"):
-                               x = SETPREFIX + x
-                       if x.startswith(SETPREFIX):
-                               s = x[len(SETPREFIX):]
-                               if s not in sets:
-                                       continue
-                               if s in self._sets:
-                                       continue
-                               # Recursively expand sets so that containment tests in
-                               # self._get_parent_sets() properly match atoms in nested
-                               # sets (like if world contains system).
-                               expanded_set = InternalPackageSet(
-                                       initial_atoms=getSetAtoms(s))
-                               self._sets[s] = expanded_set
-                               args.append(SetArg(arg=x, set=expanded_set,
-                                       root_config=root_config))
-                       else:
-                               if not portage.isvalidatom(x):
-                                       continue
-                               args.append(AtomArg(arg=x, atom=x,
-                                       root_config=root_config))
-
-               self._set_args(args)
-               return args
-
-       class UnsatisfiedResumeDep(portage.exception.PortageException):
-               """
-               A dependency of a resume list is not installed. This
-               can occur when a required package is dropped from the
-               merge list via --skipfirst.
-               """
-               def __init__(self, depgraph, value):
-                       portage.exception.PortageException.__init__(self, value)
-                       self.depgraph = depgraph
-
-       class _internal_exception(portage.exception.PortageException):
-               def __init__(self, value=""):
-                       portage.exception.PortageException.__init__(self, value)
-
-       class _unknown_internal_error(_internal_exception):
-               """
-               Used by the depgraph internally to terminate graph creation.
-               The specific reason for the failure should have been dumped
-               to stderr, unfortunately, the exact reason for the failure
-               may not be known.
-               """
-
-       class _serialize_tasks_retry(_internal_exception):
-               """
-               This is raised by the _serialize_tasks() method when it needs to
-               be called again for some reason. The only case that it's currently
-               used for is when neglected dependencies need to be added to the
-               graph in order to avoid making a potentially unsafe decision.
-               """
-
-       class _dep_check_composite_db(portage.dbapi):
-               """
-               A dbapi-like interface that is optimized for use in dep_check() calls.
-               This is built on top of the existing depgraph package selection logic.
-               Some packages that have been added to the graph may be masked from this
-               view in order to influence the atom preference selection that occurs
-               via dep_check().
-               """
-               def __init__(self, depgraph, root):
-                       portage.dbapi.__init__(self)
-                       self._depgraph = depgraph
-                       self._root = root
-                       self._match_cache = {}
-                       self._cpv_pkg_map = {}
-
-               def _clear_cache(self):
-                       self._match_cache.clear()
-                       self._cpv_pkg_map.clear()
-
-               def match(self, atom):
-                       ret = self._match_cache.get(atom)
-                       if ret is not None:
-                               return ret[:]
-                       orig_atom = atom
-                       if "/" not in atom:
-                               atom = self._dep_expand(atom)
-                       pkg, existing = self._depgraph._select_package(self._root, atom)
-                       if not pkg:
-                               ret = []
-                       else:
-                               # Return the highest available from select_package() as well as
-                               # any matching slots in the graph db.
-                               slots = set()
-                               slots.add(pkg.metadata["SLOT"])
-                               atom_cp = portage.dep_getkey(atom)
-                               if pkg.cp.startswith("virtual/"):
-                                       # For new-style virtual lookahead that occurs inside
-                                       # dep_check(), examine all slots. This is needed
-                                       # so that newer slots will not unnecessarily be pulled in
-                                       # when a satisfying lower slot is already installed. For
-                                       # example, if virtual/jdk-1.4 is satisfied via kaffe then
-                                       # there's no need to pull in a newer slot to satisfy a
-                                       # virtual/jdk dependency.
-                                       for db, pkg_type, built, installed, db_keys in \
-                                               self._depgraph._filtered_trees[self._root]["dbs"]:
-                                               for cpv in db.match(atom):
-                                                       if portage.cpv_getkey(cpv) != pkg.cp:
-                                                               continue
-                                                       slots.add(db.aux_get(cpv, ["SLOT"])[0])
-                               ret = []
-                               if self._visible(pkg):
-                                       self._cpv_pkg_map[pkg.cpv] = pkg
-                                       ret.append(pkg.cpv)
-                               slots.remove(pkg.metadata["SLOT"])
-                               while slots:
-                                       slot_atom = "%s:%s" % (atom_cp, slots.pop())
-                                       pkg, existing = self._depgraph._select_package(
-                                               self._root, slot_atom)
-                                       if not pkg:
-                                               continue
-                                       if not self._visible(pkg):
-                                               continue
-                                       self._cpv_pkg_map[pkg.cpv] = pkg
-                                       ret.append(pkg.cpv)
-                               if ret:
-                                       self._cpv_sort_ascending(ret)
-                       self._match_cache[orig_atom] = ret
-                       return ret[:]
-
-               def _visible(self, pkg):
-                       if pkg.installed and "selective" not in self._depgraph.myparams:
-                               try:
-                                       arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
-                               except (StopIteration, portage.exception.InvalidDependString):
-                                       arg = None
-                               if arg:
-                                       return False
-                       if pkg.installed:
-                               try:
-                                       if not visible(
-                                               self._depgraph.pkgsettings[pkg.root], pkg):
-                                               return False
-                               except portage.exception.InvalidDependString:
-                                       pass
-                       in_graph = self._depgraph._slot_pkg_map[
-                               self._root].get(pkg.slot_atom)
-                       if in_graph is None:
-                               # Mask choices for packages which are not the highest visible
-                               # version within their slot (since they usually trigger slot
-                               # conflicts).
-                               highest_visible, in_graph = self._depgraph._select_package(
-                                       self._root, pkg.slot_atom)
-                               if pkg != highest_visible:
-                                       return False
-                       elif in_graph != pkg:
-                               # Mask choices for packages that would trigger a slot
-                               # conflict with a previously selected package.
-                               return False
-                       return True
-
-               def _dep_expand(self, atom):
-                       """
-                       This is only needed for old installed packages that may
-                       contain atoms that are not fully qualified with a specific
-                       category. Emulate the cpv_expand() function that's used by
-                       dbapi.match() in cases like this. If there are multiple
-                       matches, it's often due to a new-style virtual that has
-                       been added, so try to filter those out to avoid raising
-                       a ValueError.
-                       """
-                       root_config = self._depgraph.roots[self._root]
-                       orig_atom = atom
-                       expanded_atoms = self._depgraph._dep_expand(root_config, atom)
-                       if len(expanded_atoms) > 1:
-                               non_virtual_atoms = []
-                               for x in expanded_atoms:
-                                       if not portage.dep_getkey(x).startswith("virtual/"):
-                                               non_virtual_atoms.append(x)
-                               if len(non_virtual_atoms) == 1:
-                                       expanded_atoms = non_virtual_atoms
-                       if len(expanded_atoms) > 1:
-                               # compatible with portage.cpv_expand()
-                               raise portage.exception.AmbiguousPackageName(
-                                       [portage.dep_getkey(x) for x in expanded_atoms])
-                       if expanded_atoms:
-                               atom = expanded_atoms[0]
-                       else:
-                               null_atom = insert_category_into_atom(atom, "null")
-                               null_cp = portage.dep_getkey(null_atom)
-                               cat, atom_pn = portage.catsplit(null_cp)
-                               virts_p = root_config.settings.get_virts_p().get(atom_pn)
-                               if virts_p:
-                                       # Allow the resolver to choose which virtual.
-                                       atom = insert_category_into_atom(atom, "virtual")
-                               else:
-                                       atom = insert_category_into_atom(atom, "null")
-                       return atom
-
-               def aux_get(self, cpv, wants):
-                       metadata = self._cpv_pkg_map[cpv].metadata
-                       return [metadata.get(x, "") for x in wants]
-
-class Scheduler(PollScheduler):
-
-       _opts_ignore_blockers = \
-               frozenset(["--buildpkgonly",
-               "--fetchonly", "--fetch-all-uri",
-               "--nodeps", "--pretend"])
-
-       _opts_no_background = \
-               frozenset(["--pretend",
-               "--fetchonly", "--fetch-all-uri"])
-
-       _opts_no_restart = frozenset(["--buildpkgonly",
-               "--fetchonly", "--fetch-all-uri", "--pretend"])
-
-       _bad_resume_opts = set(["--ask", "--changelog",
-               "--resume", "--skipfirst"])
-
-       _fetch_log = os.path.join(_emerge_log_dir, 'emerge-fetch.log')
-
-       class _iface_class(SlotObject):
-               __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
-                       "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
-                       "scheduleSetup", "scheduleUnpack", "scheduleYield",
-                       "unregister")
-
-       class _fetch_iface_class(SlotObject):
-               __slots__ = ("log_file", "schedule")
-
-       _task_queues_class = slot_dict_class(
-               ("merge", "jobs", "fetch", "unpack"), prefix="")
-
-       class _build_opts_class(SlotObject):
-               __slots__ = ("buildpkg", "buildpkgonly",
-                       "fetch_all_uri", "fetchonly", "pretend")
-
-       class _binpkg_opts_class(SlotObject):
-               __slots__ = ("fetchonly", "getbinpkg", "pretend")
-
-       class _pkg_count_class(SlotObject):
-               __slots__ = ("curval", "maxval")
-
-       class _emerge_log_class(SlotObject):
-               __slots__ = ("xterm_titles",)
-
-               def log(self, *pargs, **kwargs):
-                       if not self.xterm_titles:
-                               # Avoid interference with the scheduler's status display.
-                               kwargs.pop("short_msg", None)
-                       emergelog(self.xterm_titles, *pargs, **kwargs)
-
-       class _failed_pkg(SlotObject):
-               __slots__ = ("build_dir", "build_log", "pkg", "returncode")
-
-       class _ConfigPool(object):
-               """Interface for a task to temporarily allocate a config
-               instance from a pool. This allows a task to be constructed
-               long before the config instance actually becomes needed, like
-               when prefetchers are constructed for the whole merge list."""
-               __slots__ = ("_root", "_allocate", "_deallocate")
-               def __init__(self, root, allocate, deallocate):
-                       self._root = root
-                       self._allocate = allocate
-                       self._deallocate = deallocate
-               def allocate(self):
-                       return self._allocate(self._root)
-               def deallocate(self, settings):
-                       self._deallocate(settings)
-
-       class _unknown_internal_error(portage.exception.PortageException):
-               """
-               Used internally to terminate scheduling. The specific reason for
-               the failure should have been dumped to stderr.
-               """
-               def __init__(self, value=""):
-                       portage.exception.PortageException.__init__(self, value)
-
-       def __init__(self, settings, trees, mtimedb, myopts,
-               spinner, mergelist, favorites, digraph):
-               PollScheduler.__init__(self)
-               self.settings = settings
-               self.target_root = settings["ROOT"]
-               self.trees = trees
-               self.myopts = myopts
-               self._spinner = spinner
-               self._mtimedb = mtimedb
-               self._mergelist = mergelist
-               self._favorites = favorites
-               self._args_set = InternalPackageSet(favorites)
-               self._build_opts = self._build_opts_class()
-               for k in self._build_opts.__slots__:
-                       setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
-               self._binpkg_opts = self._binpkg_opts_class()
-               for k in self._binpkg_opts.__slots__:
-                       setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
-
-               self.curval = 0
-               self._logger = self._emerge_log_class()
-               self._task_queues = self._task_queues_class()
-               for k in self._task_queues.allowed_keys:
-                       setattr(self._task_queues, k,
-                               SequentialTaskQueue())
-
-               # Holds merges that will wait to be executed when no builds are
-               # executing. This is useful for system packages since dependencies
-               # on system packages are frequently unspecified.
-               self._merge_wait_queue = []
-               # Holds merges that have been transfered from the merge_wait_queue to
-               # the actual merge queue. They are removed from this list upon
-               # completion. Other packages can start building only when this list is
-               # empty.
-               self._merge_wait_scheduled = []
-
-               # Holds system packages and their deep runtime dependencies. Before
-               # being merged, these packages go to merge_wait_queue, to be merged
-               # when no other packages are building.
-               self._deep_system_deps = set()
-
-               # Holds packages to merge which will satisfy currently unsatisfied
-               # deep runtime dependencies of system packages. If this is not empty
-               # then no parallel builds will be spawned until it is empty. This
-               # minimizes the possibility that a build will fail due to the system
-               # being in a fragile state. For example, see bug #259954.
-               self._unsatisfied_system_deps = set()
-
-               self._status_display = JobStatusDisplay(
-                       xterm_titles=('notitles' not in settings.features))
-               self._max_load = myopts.get("--load-average")
-               max_jobs = myopts.get("--jobs")
-               if max_jobs is None:
-                       max_jobs = 1
-               self._set_max_jobs(max_jobs)
-
-               # The root where the currently running
-               # portage instance is installed.
-               self._running_root = trees["/"]["root_config"]
-               self.edebug = 0
-               if settings.get("PORTAGE_DEBUG", "") == "1":
-                       self.edebug = 1
-               self.pkgsettings = {}
-               self._config_pool = {}
-               self._blocker_db = {}
-               for root in trees:
-                       self._config_pool[root] = []
-                       self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
-
-               fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
-                       schedule=self._schedule_fetch)
-               self._sched_iface = self._iface_class(
-                       dblinkEbuildPhase=self._dblink_ebuild_phase,
-                       dblinkDisplayMerge=self._dblink_display_merge,
-                       dblinkElog=self._dblink_elog,
-                       dblinkEmergeLog=self._dblink_emerge_log,
-                       fetch=fetch_iface, register=self._register,
-                       schedule=self._schedule_wait,
-                       scheduleSetup=self._schedule_setup,
-                       scheduleUnpack=self._schedule_unpack,
-                       scheduleYield=self._schedule_yield,
-                       unregister=self._unregister)
-
-               self._prefetchers = weakref.WeakValueDictionary()
-               self._pkg_queue = []
-               self._completed_tasks = set()
-
-               self._failed_pkgs = []
-               self._failed_pkgs_all = []
-               self._failed_pkgs_die_msgs = []
-               self._post_mod_echo_msgs = []
-               self._parallel_fetch = False
-               merge_count = len([x for x in mergelist \
-                       if isinstance(x, Package) and x.operation == "merge"])
-               self._pkg_count = self._pkg_count_class(
-                       curval=0, maxval=merge_count)
-               self._status_display.maxval = self._pkg_count.maxval
-
-               # The load average takes some time to respond when new
-               # jobs are added, so we need to limit the rate of adding
-               # new jobs.
-               self._job_delay_max = 10
-               self._job_delay_factor = 1.0
-               self._job_delay_exp = 1.5
-               self._previous_job_start_time = None
-
-               self._set_digraph(digraph)
-
-               # This is used to memoize the _choose_pkg() result when
-               # no packages can be chosen until one of the existing
-               # jobs completes.
-               self._choose_pkg_return_early = False
-
-               features = self.settings.features
-               if "parallel-fetch" in features and \
-                       not ("--pretend" in self.myopts or \
-                       "--fetch-all-uri" in self.myopts or \
-                       "--fetchonly" in self.myopts):
-                       if "distlocks" not in features:
-                               portage.writemsg(red("!!!")+"\n", noiselevel=-1)
-                               portage.writemsg(red("!!!")+" parallel-fetching " + \
-                                       "requires the distlocks feature enabled"+"\n",
-                                       noiselevel=-1)
-                               portage.writemsg(red("!!!")+" you have it disabled, " + \
-                                       "thus parallel-fetching is being disabled"+"\n",
-                                       noiselevel=-1)
-                               portage.writemsg(red("!!!")+"\n", noiselevel=-1)
-                       elif len(mergelist) > 1:
-                               self._parallel_fetch = True
-
-               if self._parallel_fetch:
-                               # clear out existing fetch log if it exists
-                               try:
-                                       open(self._fetch_log, 'w')
-                               except EnvironmentError:
-                                       pass
-
-               self._running_portage = None
-               portage_match = self._running_root.trees["vartree"].dbapi.match(
-                       portage.const.PORTAGE_PACKAGE_ATOM)
-               if portage_match:
-                       cpv = portage_match.pop()
-                       self._running_portage = self._pkg(cpv, "installed",
-                               self._running_root, installed=True)
-
-       def _poll(self, timeout=None):
-               self._schedule()
-               PollScheduler._poll(self, timeout=timeout)
-
-       def _set_max_jobs(self, max_jobs):
-               self._max_jobs = max_jobs
-               self._task_queues.jobs.max_jobs = max_jobs
-
-       def _background_mode(self):
-               """
-               Check if background mode is enabled and adjust states as necessary.
-
-               @rtype: bool
-               @returns: True if background mode is enabled, False otherwise.
-               """
-               background = (self._max_jobs is True or \
-                       self._max_jobs > 1 or "--quiet" in self.myopts) and \
-                       not bool(self._opts_no_background.intersection(self.myopts))
-
-               if background:
-                       interactive_tasks = self._get_interactive_tasks()
-                       if interactive_tasks:
-                               background = False
-                               writemsg_level(">>> Sending package output to stdio due " + \
-                                       "to interactive package(s):\n",
-                                       level=logging.INFO, noiselevel=-1)
-                               msg = [""]
-                               for pkg in interactive_tasks:
-                                       pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
-                                       if pkg.root != "/":
-                                               pkg_str += " for " + pkg.root
-                                       msg.append(pkg_str)
-                               msg.append("")
-                               writemsg_level("".join("%s\n" % (l,) for l in msg),
-                                       level=logging.INFO, noiselevel=-1)
-                               if self._max_jobs is True or self._max_jobs > 1:
-                                       self._set_max_jobs(1)
-                                       writemsg_level(">>> Setting --jobs=1 due " + \
-                                               "to the above interactive package(s)\n",
-                                               level=logging.INFO, noiselevel=-1)
-
-               self._status_display.quiet = \
-                       not background or \
-                       ("--quiet" in self.myopts and \
-                       "--verbose" not in self.myopts)
-
-               self._logger.xterm_titles = \
-                       "notitles" not in self.settings.features and \
-                       self._status_display.quiet
-
-               return background
-
-       def _get_interactive_tasks(self):
-               from portage import flatten
-               from portage.dep import use_reduce, paren_reduce
-               interactive_tasks = []
-               for task in self._mergelist:
-                       if not (isinstance(task, Package) and \
-                               task.operation == "merge"):
-                               continue
-                       try:
-                               properties = flatten(use_reduce(paren_reduce(
-                                       task.metadata["PROPERTIES"]), uselist=task.use.enabled))
-                       except portage.exception.InvalidDependString, e:
-                               show_invalid_depstring_notice(task,
-                                       task.metadata["PROPERTIES"], str(e))
-                               raise self._unknown_internal_error()
-                       if "interactive" in properties:
-                               interactive_tasks.append(task)
-               return interactive_tasks
-
-       def _set_digraph(self, digraph):
-               if "--nodeps" in self.myopts or \
-                       (self._max_jobs is not True and self._max_jobs < 2):
-                       # save some memory
-                       self._digraph = None
-                       return
-
-               self._digraph = digraph
-               self._find_system_deps()
-               self._prune_digraph()
-               self._prevent_builddir_collisions()
-
-       def _find_system_deps(self):
-               """
-               Find system packages and their deep runtime dependencies. Before being
-               merged, these packages go to merge_wait_queue, to be merged when no
-               other packages are building.
-               """
-               deep_system_deps = self._deep_system_deps
-               deep_system_deps.clear()
-               deep_system_deps.update(
-                       _find_deep_system_runtime_deps(self._digraph))
-               deep_system_deps.difference_update([pkg for pkg in \
-                       deep_system_deps if pkg.operation != "merge"])
-
-       def _prune_digraph(self):
-               """
-               Prune any root nodes that are irrelevant.
-               """
-
-               graph = self._digraph
-               completed_tasks = self._completed_tasks
-               removed_nodes = set()
-               while True:
-                       for node in graph.root_nodes():
-                               if not isinstance(node, Package) or \
-                                       (node.installed and node.operation == "nomerge") or \
-                                       node.onlydeps or \
-                                       node in completed_tasks:
-                                       removed_nodes.add(node)
-                       if removed_nodes:
-                               graph.difference_update(removed_nodes)
-                       if not removed_nodes:
-                               break
-                       removed_nodes.clear()
-
-       def _prevent_builddir_collisions(self):
-               """
-               When building stages, sometimes the same exact cpv needs to be merged
-               to both $ROOTs. Add edges to the digraph in order to avoid collisions
-               in the builddir. Currently, normal file locks would be inappropriate
-               for this purpose since emerge holds all of it's build dir locks from
-               the main process.
-               """
-               cpv_map = {}
-               for pkg in self._mergelist:
-                       if not isinstance(pkg, Package):
-                               # a satisfied blocker
-                               continue
-                       if pkg.installed:
-                               continue
-                       if pkg.cpv not in cpv_map:
-                               cpv_map[pkg.cpv] = [pkg]
-                               continue
-                       for earlier_pkg in cpv_map[pkg.cpv]:
-                               self._digraph.add(earlier_pkg, pkg,
-                                       priority=DepPriority(buildtime=True))
-                       cpv_map[pkg.cpv].append(pkg)
-
-       class _pkg_failure(portage.exception.PortageException):
-               """
-               An instance of this class is raised by unmerge() when
-               an uninstallation fails.
-               """
-               status = 1
-               def __init__(self, *pargs):
-                       portage.exception.PortageException.__init__(self, pargs)
-                       if pargs:
-                               self.status = pargs[0]
-
-       def _schedule_fetch(self, fetcher):
-               """
-               Schedule a fetcher on the fetch queue, in order to
-               serialize access to the fetch log.
-               """
-               self._task_queues.fetch.addFront(fetcher)
-
-       def _schedule_setup(self, setup_phase):
-               """
-               Schedule a setup phase on the merge queue, in order to
-               serialize unsandboxed access to the live filesystem.
-               """
-               self._task_queues.merge.addFront(setup_phase)
-               self._schedule()
-
-       def _schedule_unpack(self, unpack_phase):
-               """
-               Schedule an unpack phase on the unpack queue, in order
-               to serialize $DISTDIR access for live ebuilds.
-               """
-               self._task_queues.unpack.add(unpack_phase)
-
-       def _find_blockers(self, new_pkg):
-               """
-               Returns a callable which should be called only when
-               the vdb lock has been acquired.
-               """
-               def get_blockers():
-                       return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
-               return get_blockers
-
-       def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
-               if self._opts_ignore_blockers.intersection(self.myopts):
-                       return None
-
-               # Call gc.collect() here to avoid heap overflow that
-               # triggers 'Cannot allocate memory' errors (reported
-               # with python-2.5).
-               import gc
-               gc.collect()
-
-               blocker_db = self._blocker_db[new_pkg.root]
-
-               blocker_dblinks = []
-               for blocking_pkg in blocker_db.findInstalledBlockers(
-                       new_pkg, acquire_lock=acquire_lock):
-                       if new_pkg.slot_atom == blocking_pkg.slot_atom:
-                               continue
-                       if new_pkg.cpv == blocking_pkg.cpv:
-                               continue
-                       blocker_dblinks.append(portage.dblink(
-                               blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
-                               self.pkgsettings[blocking_pkg.root], treetype="vartree",
-                               vartree=self.trees[blocking_pkg.root]["vartree"]))
-
-               gc.collect()
-
-               return blocker_dblinks
-
-       def _dblink_pkg(self, pkg_dblink):
-               cpv = pkg_dblink.mycpv
-               type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
-               root_config = self.trees[pkg_dblink.myroot]["root_config"]
-               installed = type_name == "installed"
-               return self._pkg(cpv, type_name, root_config, installed=installed)
-
-       def _append_to_log_path(self, log_path, msg):
-               f = open(log_path, 'a')
-               try:
-                       f.write(msg)
-               finally:
-                       f.close()
-
-       def _dblink_elog(self, pkg_dblink, phase, func, msgs):
-
-               log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
-               log_file = None
-               out = sys.stdout
-               background = self._background
-
-               if background and log_path is not None:
-                       log_file = open(log_path, 'a')
-                       out = log_file
-
-               try:
-                       for msg in msgs:
-                               func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
-               finally:
-                       if log_file is not None:
-                               log_file.close()
-
-       def _dblink_emerge_log(self, msg):
-               self._logger.log(msg)
-
-       def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
-               log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
-               background = self._background
-
-               if log_path is None:
-                       if not (background and level < logging.WARN):
-                               portage.util.writemsg_level(msg,
-                                       level=level, noiselevel=noiselevel)
-               else:
-                       if not background:
-                               portage.util.writemsg_level(msg,
-                                       level=level, noiselevel=noiselevel)
-                       self._append_to_log_path(log_path, msg)
-
-       def _dblink_ebuild_phase(self,
-               pkg_dblink, pkg_dbapi, ebuild_path, phase):
-               """
-               Using this callback for merge phases allows the scheduler
-               to run while these phases execute asynchronously, and allows
-               the scheduler control output handling.
-               """
-
-               scheduler = self._sched_iface
-               settings = pkg_dblink.settings
-               pkg = self._dblink_pkg(pkg_dblink)
-               background = self._background
-               log_path = settings.get("PORTAGE_LOG_FILE")
-
-               ebuild_phase = EbuildPhase(background=background,
-                       pkg=pkg, phase=phase, scheduler=scheduler,
-                       settings=settings, tree=pkg_dblink.treetype)
-               ebuild_phase.start()
-               ebuild_phase.wait()
-
-               return ebuild_phase.returncode
-
-       def _generate_digests(self):
-               """
-               Generate digests if necessary for --digests or FEATURES=digest.
-               In order to avoid interference, this must done before parallel
-               tasks are started.
-               """
-
-               if '--fetchonly' in self.myopts:
-                       return os.EX_OK
-
-               digest = '--digest' in self.myopts
-               if not digest:
-                       for pkgsettings in self.pkgsettings.itervalues():
-                               if 'digest' in pkgsettings.features:
-                                       digest = True
-                                       break
-
-               if not digest:
-                       return os.EX_OK
-
-               for x in self._mergelist:
-                       if not isinstance(x, Package) or \
-                               x.type_name != 'ebuild' or \
-                               x.operation != 'merge':
-                               continue
-                       pkgsettings = self.pkgsettings[x.root]
-                       if '--digest' not in self.myopts and \
-                               'digest' not in pkgsettings.features:
-                               continue
-                       portdb = x.root_config.trees['porttree'].dbapi
-                       ebuild_path = portdb.findname(x.cpv)
-                       if not ebuild_path:
-                               writemsg_level(
-                                       "!!! Could not locate ebuild for '%s'.\n" \
-                                       % x.cpv, level=logging.ERROR, noiselevel=-1)
-                               return 1
-                       pkgsettings['O'] = os.path.dirname(ebuild_path)
-                       if not portage.digestgen([], pkgsettings, myportdb=portdb):
-                               writemsg_level(
-                                       "!!! Unable to generate manifest for '%s'.\n" \
-                                       % x.cpv, level=logging.ERROR, noiselevel=-1)
-                               return 1
-
-               return os.EX_OK
-
-       def _check_manifests(self):
-               # Verify all the manifests now so that the user is notified of failure
-               # as soon as possible.
-               if "strict" not in self.settings.features or \
-                       "--fetchonly" in self.myopts or \
-                       "--fetch-all-uri" in self.myopts:
-                       return os.EX_OK
-
-               shown_verifying_msg = False
-               quiet_settings = {}
-               for myroot, pkgsettings in self.pkgsettings.iteritems():
-                       quiet_config = portage.config(clone=pkgsettings)
-                       quiet_config["PORTAGE_QUIET"] = "1"
-                       quiet_config.backup_changes("PORTAGE_QUIET")
-                       quiet_settings[myroot] = quiet_config
-                       del quiet_config
-
-               for x in self._mergelist:
-                       if not isinstance(x, Package) or \
-                               x.type_name != "ebuild":
-                               continue
-
-                       if not shown_verifying_msg:
-                               shown_verifying_msg = True
-                               self._status_msg("Verifying ebuild manifests")
-
-                       root_config = x.root_config
-                       portdb = root_config.trees["porttree"].dbapi
-                       quiet_config = quiet_settings[root_config.root]
-                       quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
-                       if not portage.digestcheck([], quiet_config, strict=True):
-                               return 1
-
-               return os.EX_OK
-
-       def _add_prefetchers(self):
-
-               if not self._parallel_fetch:
-                       return
-
-               if self._parallel_fetch:
-                       self._status_msg("Starting parallel fetch")
-
-                       prefetchers = self._prefetchers
-                       getbinpkg = "--getbinpkg" in self.myopts
-
-                       # In order to avoid "waiting for lock" messages
-                       # at the beginning, which annoy users, never
-                       # spawn a prefetcher for the first package.
-                       for pkg in self._mergelist[1:]:
-                               prefetcher = self._create_prefetcher(pkg)
-                               if prefetcher is not None:
-                                       self._task_queues.fetch.add(prefetcher)
-                                       prefetchers[pkg] = prefetcher
-
-       def _create_prefetcher(self, pkg):
-               """
-               @return: a prefetcher, or None if not applicable
-               """
-               prefetcher = None
-
-               if not isinstance(pkg, Package):
-                       pass
-
-               elif pkg.type_name == "ebuild":
-
-                       prefetcher = EbuildFetcher(background=True,
-                               config_pool=self._ConfigPool(pkg.root,
-                               self._allocate_config, self._deallocate_config),
-                               fetchonly=1, logfile=self._fetch_log,
-                               pkg=pkg, prefetch=True, scheduler=self._sched_iface)
-
-               elif pkg.type_name == "binary" and \
-                       "--getbinpkg" in self.myopts and \
-                       pkg.root_config.trees["bintree"].isremote(pkg.cpv):
-
-                       prefetcher = BinpkgPrefetcher(background=True,
-                               pkg=pkg, scheduler=self._sched_iface)
-
-               return prefetcher
-
-       def _is_restart_scheduled(self):
-               """
-               Check if the merge list contains a replacement
-               for the current running instance, that will result
-               in restart after merge.
-               @rtype: bool
-               @returns: True if a restart is scheduled, False otherwise.
-               """
-               if self._opts_no_restart.intersection(self.myopts):
-                       return False
-
-               mergelist = self._mergelist
-
-               for i, pkg in enumerate(mergelist):
-                       if self._is_restart_necessary(pkg) and \
-                               i != len(mergelist) - 1:
-                               return True
-
-               return False
-
-       def _is_restart_necessary(self, pkg):
-               """
-               @return: True if merging the given package
-                       requires restart, False otherwise.
-               """
-
-               # Figure out if we need a restart.
-               if pkg.root == self._running_root.root and \
-                       portage.match_from_list(
-                       portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
-                       if self._running_portage:
-                               return pkg.cpv != self._running_portage.cpv
-                       return True
-               return False
-
-       def _restart_if_necessary(self, pkg):
-               """
-               Use execv() to restart emerge. This happens
-               if portage upgrades itself and there are
-               remaining packages in the list.
-               """
-
-               if self._opts_no_restart.intersection(self.myopts):
-                       return
-
-               if not self._is_restart_necessary(pkg):
-                       return
-
-               if pkg == self._mergelist[-1]:
-                       return
-
-               self._main_loop_cleanup()
-
-               logger = self._logger
-               pkg_count = self._pkg_count
-               mtimedb = self._mtimedb
-               bad_resume_opts = self._bad_resume_opts
-
-               logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
-                       (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
-
-               logger.log(" *** RESTARTING " + \
-                       "emerge via exec() after change of " + \
-                       "portage version.")
-
-               mtimedb["resume"]["mergelist"].remove(list(pkg))
-               mtimedb.commit()
-               portage.run_exitfuncs()
-               mynewargv = [sys.argv[0], "--resume"]
-               resume_opts = self.myopts.copy()
-               # For automatic resume, we need to prevent
-               # any of bad_resume_opts from leaking in
-               # via EMERGE_DEFAULT_OPTS.
-               resume_opts["--ignore-default-opts"] = True
-               for myopt, myarg in resume_opts.iteritems():
-                       if myopt not in bad_resume_opts:
-                               if myarg is True:
-                                       mynewargv.append(myopt)
-                               else:
-                                       mynewargv.append(myopt +"="+ str(myarg))
-               # priority only needs to be adjusted on the first run
-               os.environ["PORTAGE_NICENESS"] = "0"
-               os.execv(mynewargv[0], mynewargv)
-
-       def merge(self):
-
-               if "--resume" in self.myopts:
-                       # We're resuming.
-                       portage.writemsg_stdout(
-                               colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
-                       self._logger.log(" *** Resuming merge...")
-
-               self._save_resume_list()
-
-               try:
-                       self._background = self._background_mode()
-               except self._unknown_internal_error:
-                       return 1
-
-               for root in self.trees:
-                       root_config = self.trees[root]["root_config"]
-
-                       # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
-                       # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
-                       # for ensuring sane $PWD (bug #239560) and storing elog messages.
-                       tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
-                       if not tmpdir or not os.path.isdir(tmpdir):
-                               msg = "The directory specified in your " + \
-                                       "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
-                               "does not exist. Please create this " + \
-                               "directory or correct your PORTAGE_TMPDIR setting."
-                               msg = textwrap.wrap(msg, 70)
-                               out = portage.output.EOutput()
-                               for l in msg:
-                                       out.eerror(l)
-                               return 1
-
-                       if self._background:
-                               root_config.settings.unlock()
-                               root_config.settings["PORTAGE_BACKGROUND"] = "1"
-                               root_config.settings.backup_changes("PORTAGE_BACKGROUND")
-                               root_config.settings.lock()
-
-                       self.pkgsettings[root] = portage.config(
-                               clone=root_config.settings)
-
-               rval = self._generate_digests()
-               if rval != os.EX_OK:
-                       return rval
-
-               rval = self._check_manifests()
-               if rval != os.EX_OK:
-                       return rval
-
-               keep_going = "--keep-going" in self.myopts
-               fetchonly = self._build_opts.fetchonly
-               mtimedb = self._mtimedb
-               failed_pkgs = self._failed_pkgs
-
-               while True:
-                       rval = self._merge()
-                       if rval == os.EX_OK or fetchonly or not keep_going:
-                               break
-                       if "resume" not in mtimedb:
-                               break
-                       mergelist = self._mtimedb["resume"].get("mergelist")
-                       if not mergelist:
-                               break
-
-                       if not failed_pkgs:
-                               break
-
-                       for failed_pkg in failed_pkgs:
-                               mergelist.remove(list(failed_pkg.pkg))
-
-                       self._failed_pkgs_all.extend(failed_pkgs)
-                       del failed_pkgs[:]
-
-                       if not mergelist:
-                               break
-
-                       if not self._calc_resume_list():
-                               break
-
-                       clear_caches(self.trees)
-                       if not self._mergelist:
-                               break
-
-                       self._save_resume_list()
-                       self._pkg_count.curval = 0
-                       self._pkg_count.maxval = len([x for x in self._mergelist \
-                               if isinstance(x, Package) and x.operation == "merge"])
-                       self._status_display.maxval = self._pkg_count.maxval
-
-               self._logger.log(" *** Finished. Cleaning up...")
-
-               if failed_pkgs:
-                       self._failed_pkgs_all.extend(failed_pkgs)
-                       del failed_pkgs[:]
-
-               background = self._background
-               failure_log_shown = False
-               if background and len(self._failed_pkgs_all) == 1:
-                       # If only one package failed then just show it's
-                       # whole log for easy viewing.
-                       failed_pkg = self._failed_pkgs_all[-1]
-                       build_dir = failed_pkg.build_dir
-                       log_file = None
 
-                       log_paths = [failed_pkg.build_log]
+actions = frozenset([
+"clean", "config", "depclean",
+"info", "list-sets", "metadata",
+"prune", "regen",  "search",
+"sync",  "unmerge", "version",
+])
+options=[
+"--ask",          "--alphabetical",
+"--buildpkg",     "--buildpkgonly",
+"--changelog",    "--columns",
+"--complete-graph",
+"--debug",        "--deep",
+"--digest",
+"--emptytree",
+"--fetchonly",    "--fetch-all-uri",
+"--getbinpkg",    "--getbinpkgonly",
+"--help",         "--ignore-default-opts",
+"--keep-going",
+"--noconfmem",
+"--newuse",
+"--nodeps",       "--noreplace",
+"--nospinner",    "--oneshot",
+"--onlydeps",     "--pretend",
+"--quiet",        "--resume",
+"--searchdesc",   "--selective",
+"--skipfirst",
+"--tree",
+"--update",
+"--usepkg",       "--usepkgonly",
+"--verbose",
+]
 
-                       log_path = self._locate_failure_log(failed_pkg)
-                       if log_path is not None:
-                               try:
-                                       log_file = open(log_path)
-                               except IOError:
-                                       pass
+shortmapping={
+"1":"--oneshot",
+"a":"--ask",
+"b":"--buildpkg",  "B":"--buildpkgonly",
+"c":"--clean",     "C":"--unmerge",
+"d":"--debug",     "D":"--deep",
+"e":"--emptytree",
+"f":"--fetchonly", "F":"--fetch-all-uri",
+"g":"--getbinpkg", "G":"--getbinpkgonly",
+"h":"--help",
+"k":"--usepkg",    "K":"--usepkgonly",
+"l":"--changelog",
+"n":"--noreplace", "N":"--newuse",
+"o":"--onlydeps",  "O":"--nodeps",
+"p":"--pretend",   "P":"--prune",
+"q":"--quiet",
+"s":"--search",    "S":"--searchdesc",
+"t":"--tree",
+"u":"--update",
+"v":"--verbose",   "V":"--version"
+}
 
-                       if log_file is not None:
-                               try:
-                                       for line in log_file:
-                                               writemsg_level(line, noiselevel=-1)
-                               finally:
-                                       log_file.close()
-                               failure_log_shown = True
-
-               # Dump mod_echo output now since it tends to flood the terminal.
-               # This allows us to avoid having more important output, generated
-               # later, from being swept away by the mod_echo output.
-               mod_echo_output =  _flush_elog_mod_echo()
-
-               if background and not failure_log_shown and \
-                       self._failed_pkgs_all and \
-                       self._failed_pkgs_die_msgs and \
-                       not mod_echo_output:
-
-                       printer = portage.output.EOutput()
-                       for mysettings, key, logentries in self._failed_pkgs_die_msgs:
-                               root_msg = ""
-                               if mysettings["ROOT"] != "/":
-                                       root_msg = " merged to %s" % mysettings["ROOT"]
-                               print
-                               printer.einfo("Error messages for package %s%s:" % \
-                                       (colorize("INFORM", key), root_msg))
-                               print
-                               for phase in portage.const.EBUILD_PHASES:
-                                       if phase not in logentries:
-                                               continue
-                                       for msgtype, msgcontent in logentries[phase]:
-                                               if isinstance(msgcontent, basestring):
-                                                       msgcontent = [msgcontent]
-                                               for line in msgcontent:
-                                                       printer.eerror(line.strip("\n"))
-
-               if self._post_mod_echo_msgs:
-                       for msg in self._post_mod_echo_msgs:
-                               msg()
-
-               if len(self._failed_pkgs_all) > 1 or \
-                       (self._failed_pkgs_all and "--keep-going" in self.myopts):
-                       if len(self._failed_pkgs_all) > 1:
-                               msg = "The following %d packages have " % \
-                                       len(self._failed_pkgs_all) + \
-                                       "failed to build or install:"
-                       else:
-                               msg = "The following package has " + \
-                                       "failed to build or install:"
-                       prefix = bad(" * ")
-                       writemsg(prefix + "\n", noiselevel=-1)
-                       from textwrap import wrap
-                       for line in wrap(msg, 72):
-                               writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
-                       writemsg(prefix + "\n", noiselevel=-1)
-                       for failed_pkg in self._failed_pkgs_all:
-                               writemsg("%s\t%s\n" % (prefix,
-                                       colorize("INFORM", str(failed_pkg.pkg))),
-                                       noiselevel=-1)
-                       writemsg(prefix + "\n", noiselevel=-1)
+def getgccversion(chost):
+       """
+       rtype: C{str}
+       return:  the current in-use gcc version
+       """
 
-               return rval
+       gcc_ver_command = 'gcc -dumpversion'
+       gcc_ver_prefix = 'gcc-'
 
-       def _elog_listener(self, mysettings, key, logentries, fulltext):
-               errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
-               if errors:
-                       self._failed_pkgs_die_msgs.append(
-                               (mysettings, key, errors))
+       gcc_not_found_error = red(
+       "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
+       "!!! to update the environment of this terminal and possibly\n" +
+       "!!! other terminals also.\n"
+       )
 
-       def _locate_failure_log(self, failed_pkg):
+       mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
+       if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
+               return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
 
-               build_dir = failed_pkg.build_dir
-               log_file = None
+       mystatus, myoutput = commands.getstatusoutput(
+               chost + "-" + gcc_ver_command)
+       if mystatus == os.EX_OK:
+               return gcc_ver_prefix + myoutput
 
-               log_paths = [failed_pkg.build_log]
+       mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
+       if mystatus == os.EX_OK:
+               return gcc_ver_prefix + myoutput
 
-               for log_path in log_paths:
-                       if not log_path:
-                               continue
+       portage.writemsg(gcc_not_found_error, noiselevel=-1)
+       return "[unavailable]"
 
+def getportageversion(portdir, target_root, profile, chost, vardb):
+       profilever = "unavailable"
+       if profile:
+               realpath = os.path.realpath(profile)
+               basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
+               if realpath.startswith(basepath):
+                       profilever = realpath[1 + len(basepath):]
+               else:
                        try:
-                               log_size = os.stat(log_path).st_size
-                       except OSError:
-                               continue
-
-                       if log_size == 0:
-                               continue
-
-                       return log_path
-
-               return None
-
-       def _add_packages(self):
-               pkg_queue = self._pkg_queue
-               for pkg in self._mergelist:
-                       if isinstance(pkg, Package):
-                               pkg_queue.append(pkg)
-                       elif isinstance(pkg, Blocker):
+                               profilever = "!" + os.readlink(profile)
+                       except (OSError):
                                pass
+               del realpath, basepath
 
-       def _system_merge_started(self, merge):
-               """
-               Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
-               """
-               graph = self._digraph
-               if graph is None:
-                       return
-               pkg = merge.merge.pkg
-
-               # Skip this if $ROOT != / since it shouldn't matter if there
-               # are unsatisfied system runtime deps in this case.
-               if pkg.root != '/':
-                       return
-
-               completed_tasks = self._completed_tasks
-               unsatisfied = self._unsatisfied_system_deps
-
-               def ignore_non_runtime_or_satisfied(priority):
-                       """
-                       Ignore non-runtime and satisfied runtime priorities.
-                       """
-                       if isinstance(priority, DepPriority) and \
-                               not priority.satisfied and \
-                               (priority.runtime or priority.runtime_post):
-                               return False
-                       return True
-
-               # When checking for unsatisfied runtime deps, only check
-               # direct deps since indirect deps are checked when the
-               # corresponding parent is merged.
-               for child in graph.child_nodes(pkg,
-                       ignore_priority=ignore_non_runtime_or_satisfied):
-                       if not isinstance(child, Package) or \
-                               child.operation == 'uninstall':
-                               continue
-                       if child is pkg:
-                               continue
-                       if child.operation == 'merge' and \
-                               child not in completed_tasks:
-                               unsatisfied.add(child)
-
-       def _merge_wait_exit_handler(self, task):
-               self._merge_wait_scheduled.remove(task)
-               self._merge_exit(task)
-
-       def _merge_exit(self, merge):
-               self._do_merge_exit(merge)
-               self._deallocate_config(merge.merge.settings)
-               if merge.returncode == os.EX_OK and \
-                       not merge.merge.pkg.installed:
-                       self._status_display.curval += 1
-               self._status_display.merges = len(self._task_queues.merge)
-               self._schedule()
-
-       def _do_merge_exit(self, merge):
-               pkg = merge.merge.pkg
-               if merge.returncode != os.EX_OK:
-                       settings = merge.merge.settings
-                       build_dir = settings.get("PORTAGE_BUILDDIR")
-                       build_log = settings.get("PORTAGE_LOG_FILE")
-
-                       self._failed_pkgs.append(self._failed_pkg(
-                               build_dir=build_dir, build_log=build_log,
-                               pkg=pkg,
-                               returncode=merge.returncode))
-                       self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
-
-                       self._status_display.failed = len(self._failed_pkgs)
-                       return
-
-               self._task_complete(pkg)
-               pkg_to_replace = merge.merge.pkg_to_replace
-               if pkg_to_replace is not None:
-                       # When a package is replaced, mark it's uninstall
-                       # task complete (if any).
-                       uninst_hash_key = \
-                               ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
-                       self._task_complete(uninst_hash_key)
-
-               if pkg.installed:
-                       return
-
-               self._restart_if_necessary(pkg)
-
-               # Call mtimedb.commit() after each merge so that
-               # --resume still works after being interrupted
-               # by reboot, sigkill or similar.
-               mtimedb = self._mtimedb
-               mtimedb["resume"]["mergelist"].remove(list(pkg))
-               if not mtimedb["resume"]["mergelist"]:
-                       del mtimedb["resume"]
-               mtimedb.commit()
-
-       def _build_exit(self, build):
-               if build.returncode == os.EX_OK:
-                       self.curval += 1
-                       merge = PackageMerge(merge=build)
-                       if not build.build_opts.buildpkgonly and \
-                               build.pkg in self._deep_system_deps:
-                               # Since dependencies on system packages are frequently
-                               # unspecified, merge them only when no builds are executing.
-                               self._merge_wait_queue.append(merge)
-                               merge.addStartListener(self._system_merge_started)
-                       else:
-                               merge.addExitListener(self._merge_exit)
-                               self._task_queues.merge.add(merge)
-                               self._status_display.merges = len(self._task_queues.merge)
-               else:
-                       settings = build.settings
-                       build_dir = settings.get("PORTAGE_BUILDDIR")
-                       build_log = settings.get("PORTAGE_LOG_FILE")
-
-                       self._failed_pkgs.append(self._failed_pkg(
-                               build_dir=build_dir, build_log=build_log,
-                               pkg=build.pkg,
-                               returncode=build.returncode))
-                       self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
-
-                       self._status_display.failed = len(self._failed_pkgs)
-                       self._deallocate_config(build.settings)
-               self._jobs -= 1
-               self._status_display.running = self._jobs
-               self._schedule()
-
-       def _extract_exit(self, build):
-               self._build_exit(build)
-
-       def _task_complete(self, pkg):
-               self._completed_tasks.add(pkg)
-               self._unsatisfied_system_deps.discard(pkg)
-               self._choose_pkg_return_early = False
-
-       def _merge(self):
-
-               self._add_prefetchers()
-               self._add_packages()
-               pkg_queue = self._pkg_queue
-               failed_pkgs = self._failed_pkgs
-               portage.locks._quiet = self._background
-               portage.elog._emerge_elog_listener = self._elog_listener
-               rval = os.EX_OK
-
-               try:
-                       self._main_loop()
-               finally:
-                       self._main_loop_cleanup()
-                       portage.locks._quiet = False
-                       portage.elog._emerge_elog_listener = None
-                       if failed_pkgs:
-                               rval = failed_pkgs[-1].returncode
-
-               return rval
-
-       def _main_loop_cleanup(self):
-               del self._pkg_queue[:]
-               self._completed_tasks.clear()
-               self._deep_system_deps.clear()
-               self._unsatisfied_system_deps.clear()
-               self._choose_pkg_return_early = False
-               self._status_display.reset()
-               self._digraph = None
-               self._task_queues.fetch.clear()
-
-       def _choose_pkg(self):
-               """
-               Choose a task that has all it's dependencies satisfied.
-               """
-
-               if self._choose_pkg_return_early:
-                       return None
-
-               if self._digraph is None:
-                       if (self._jobs or self._task_queues.merge) and \
-                               not ("--nodeps" in self.myopts and \
-                               (self._max_jobs is True or self._max_jobs > 1)):
-                               self._choose_pkg_return_early = True
-                               return None
-                       return self._pkg_queue.pop(0)
-
-               if not (self._jobs or self._task_queues.merge):
-                       return self._pkg_queue.pop(0)
-
-               self._prune_digraph()
-
-               chosen_pkg = None
-               later = set(self._pkg_queue)
-               for pkg in self._pkg_queue:
-                       later.remove(pkg)
-                       if not self._dependent_on_scheduled_merges(pkg, later):
-                               chosen_pkg = pkg
-                               break
-
-               if chosen_pkg is not None:
-                       self._pkg_queue.remove(chosen_pkg)
-
-               if chosen_pkg is None:
-                       # There's no point in searching for a package to
-                       # choose until at least one of the existing jobs
-                       # completes.
-                       self._choose_pkg_return_early = True
-
-               return chosen_pkg
-
-       def _dependent_on_scheduled_merges(self, pkg, later):
-               """
-               Traverse the subgraph of the given packages deep dependencies
-               to see if it contains any scheduled merges.
-               @param pkg: a package to check dependencies for
-               @type pkg: Package
-               @param later: packages for which dependence should be ignored
-                       since they will be merged later than pkg anyway and therefore
-                       delaying the merge of pkg will not result in a more optimal
-                       merge order
-               @type later: set
-               @rtype: bool
-               @returns: True if the package is dependent, False otherwise.
-               """
-
-               graph = self._digraph
-               completed_tasks = self._completed_tasks
-
-               dependent = False
-               traversed_nodes = set([pkg])
-               direct_deps = graph.child_nodes(pkg)
-               node_stack = direct_deps
-               direct_deps = frozenset(direct_deps)
-               while node_stack:
-                       node = node_stack.pop()
-                       if node in traversed_nodes:
-                               continue
-                       traversed_nodes.add(node)
-                       if not ((node.installed and node.operation == "nomerge") or \
-                               (node.operation == "uninstall" and \
-                               node not in direct_deps) or \
-                               node in completed_tasks or \
-                               node in later):
-                               dependent = True
-                               break
-                       node_stack.extend(graph.child_nodes(node))
-
-               return dependent
-
-       def _allocate_config(self, root):
-               """
-               Allocate a unique config instance for a task in order
-               to prevent interference between parallel tasks.
-               """
-               if self._config_pool[root]:
-                       temp_settings = self._config_pool[root].pop()
+       libcver=[]
+       libclist  = vardb.match("virtual/libc")
+       libclist += vardb.match("virtual/glibc")
+       libclist  = portage.util.unique_array(libclist)
+       for x in libclist:
+               xs=portage.catpkgsplit(x)
+               if libcver:
+                       libcver+=","+"-".join(xs[1:])
                else:
-                       temp_settings = portage.config(clone=self.pkgsettings[root])
-               # Since config.setcpv() isn't guaranteed to call config.reset() due to
-               # performance reasons, call it here to make sure all settings from the
-               # previous package get flushed out (such as PORTAGE_LOG_FILE).
-               temp_settings.reload()
-               temp_settings.reset()
-               return temp_settings
-
-       def _deallocate_config(self, settings):
-               self._config_pool[settings["ROOT"]].append(settings)
-
-       def _main_loop(self):
-
-               # Only allow 1 job max if a restart is scheduled
-               # due to portage update.
-               if self._is_restart_scheduled() or \
-                       self._opts_no_background.intersection(self.myopts):
-                       self._set_max_jobs(1)
-
-               merge_queue = self._task_queues.merge
-
-               while self._schedule():
-                       if self._poll_event_handlers:
-                               self._poll_loop()
-
-               while True:
-                       self._schedule()
-                       if not (self._jobs or merge_queue):
-                               break
-                       if self._poll_event_handlers:
-                               self._poll_loop()
-
-       def _keep_scheduling(self):
-               return bool(self._pkg_queue and \
-                       not (self._failed_pkgs and not self._build_opts.fetchonly))
-
-       def _schedule_tasks(self):
-
-               # When the number of jobs drops to zero, process all waiting merges.
-               if not self._jobs and self._merge_wait_queue:
-                       for task in self._merge_wait_queue:
-                               task.addExitListener(self._merge_wait_exit_handler)
-                               self._task_queues.merge.add(task)
-                       self._status_display.merges = len(self._task_queues.merge)
-                       self._merge_wait_scheduled.extend(self._merge_wait_queue)
-                       del self._merge_wait_queue[:]
-
-               self._schedule_tasks_imp()
-               self._status_display.display()
-
-               state_change = 0
-               for q in self._task_queues.values():
-                       if q.schedule():
-                               state_change += 1
-
-               # Cancel prefetchers if they're the only reason
-               # the main poll loop is still running.
-               if self._failed_pkgs and not self._build_opts.fetchonly and \
-                       not (self._jobs or self._task_queues.merge) and \
-                       self._task_queues.fetch:
-                       self._task_queues.fetch.clear()
-                       state_change += 1
-
-               if state_change:
-                       self._schedule_tasks_imp()
-                       self._status_display.display()
-
-               return self._keep_scheduling()
-
-       def _job_delay(self):
-               """
-               @rtype: bool
-               @returns: True if job scheduling should be delayed, False otherwise.
-               """
-
-               if self._jobs and self._max_load is not None:
-
-                       current_time = time.time()
-
-                       delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
-                       if delay > self._job_delay_max:
-                               delay = self._job_delay_max
-                       if (current_time - self._previous_job_start_time) < delay:
-                               return True
-
-               return False
-
-       def _schedule_tasks_imp(self):
-               """
-               @rtype: bool
-               @returns: True if state changed, False otherwise.
-               """
-
-               state_change = 0
-
-               while True:
-
-                       if not self._keep_scheduling():
-                               return bool(state_change)
-
-                       if self._choose_pkg_return_early or \
-                               self._merge_wait_scheduled or \
-                               (self._jobs and self._unsatisfied_system_deps) or \
-                               not self._can_add_job() or \
-                               self._job_delay():
-                               return bool(state_change)
-
-                       pkg = self._choose_pkg()
-                       if pkg is None:
-                               return bool(state_change)
-
-                       state_change += 1
-
-                       if not pkg.installed:
-                               self._pkg_count.curval += 1
-
-                       task = self._task(pkg)
-
-                       if pkg.installed:
-                               merge = PackageMerge(merge=task)
-                               merge.addExitListener(self._merge_exit)
-                               self._task_queues.merge.add(merge)
-
-                       elif pkg.built:
-                               self._jobs += 1
-                               self._previous_job_start_time = time.time()
-                               self._status_display.running = self._jobs
-                               task.addExitListener(self._extract_exit)
-                               self._task_queues.jobs.add(task)
-
-                       else:
-                               self._jobs += 1
-                               self._previous_job_start_time = time.time()
-                               self._status_display.running = self._jobs
-                               task.addExitListener(self._build_exit)
-                               self._task_queues.jobs.add(task)
-
-               return bool(state_change)
-
-       def _task(self, pkg):
-
-               pkg_to_replace = None
-               if pkg.operation != "uninstall":
-                       vardb = pkg.root_config.trees["vartree"].dbapi
-                       previous_cpv = vardb.match(pkg.slot_atom)
-                       if previous_cpv:
-                               previous_cpv = previous_cpv.pop()
-                               pkg_to_replace = self._pkg(previous_cpv,
-                                       "installed", pkg.root_config, installed=True)
-
-               task = MergeListItem(args_set=self._args_set,
-                       background=self._background, binpkg_opts=self._binpkg_opts,
-                       build_opts=self._build_opts,
-                       config_pool=self._ConfigPool(pkg.root,
-                       self._allocate_config, self._deallocate_config),
-                       emerge_opts=self.myopts,
-                       find_blockers=self._find_blockers(pkg), logger=self._logger,
-                       mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
-                       pkg_to_replace=pkg_to_replace,
-                       prefetcher=self._prefetchers.get(pkg),
-                       scheduler=self._sched_iface,
-                       settings=self._allocate_config(pkg.root),
-                       statusMessage=self._status_msg,
-                       world_atom=self._world_atom)
-
-               return task
-
-       def _failed_pkg_msg(self, failed_pkg, action, preposition):
-               pkg = failed_pkg.pkg
-               msg = "%s to %s %s" % \
-                       (bad("Failed"), action, colorize("INFORM", pkg.cpv))
-               if pkg.root != "/":
-                       msg += " %s %s" % (preposition, pkg.root)
-
-               log_path = self._locate_failure_log(failed_pkg)
-               if log_path is not None:
-                       msg += ", Log file:"
-               self._status_msg(msg)
-
-               if log_path is not None:
-                       self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
-
-       def _status_msg(self, msg):
-               """
-               Display a brief status message (no newlines) in the status display.
-               This is called by tasks to provide feedback to the user. This
-               delegates the resposibility of generating \r and \n control characters,
-               to guarantee that lines are created or erased when necessary and
-               appropriate.
-
-               @type msg: str
-               @param msg: a brief status message (no newlines allowed)
-               """
-               if not self._background:
-                       writemsg_level("\n")
-               self._status_display.displayMessage(msg)
-
-       def _save_resume_list(self):
-               """
-               Do this before verifying the ebuild Manifests since it might
-               be possible for the user to use --resume --skipfirst get past
-               a non-essential package with a broken digest.
-               """
-               mtimedb = self._mtimedb
-               mtimedb["resume"]["mergelist"] = [list(x) \
-                       for x in self._mergelist \
-                       if isinstance(x, Package) and x.operation == "merge"]
-
-               mtimedb.commit()
-
-       def _calc_resume_list(self):
-               """
-               Use the current resume list to calculate a new one,
-               dropping any packages with unsatisfied deps.
-               @rtype: bool
-               @returns: True if successful, False otherwise.
-               """
-               print colorize("GOOD", "*** Resuming merge...")
-
-               if self._show_list():
-                       if "--tree" in self.myopts:
-                               portage.writemsg_stdout("\n" + \
-                                       darkgreen("These are the packages that " + \
-                                       "would be merged, in reverse order:\n\n"))
-
-                       else:
-                               portage.writemsg_stdout("\n" + \
-                                       darkgreen("These are the packages that " + \
-                                       "would be merged, in order:\n\n"))
-
-               show_spinner = "--quiet" not in self.myopts and \
-                       "--nodeps" not in self.myopts
-
-               if show_spinner:
-                       print "Calculating dependencies  ",
-
-               myparams = create_depgraph_params(self.myopts, None)
-               success = False
-               e = None
-               try:
-                       success, mydepgraph, dropped_tasks = resume_depgraph(
-                               self.settings, self.trees, self._mtimedb, self.myopts,
-                               myparams, self._spinner)
-               except depgraph.UnsatisfiedResumeDep, exc:
-                       # rename variable to avoid python-3.0 error:
-                       # SyntaxError: can not delete variable 'e' referenced in nested
-                       #              scope
-                       e = exc
-                       mydepgraph = e.depgraph
-                       dropped_tasks = set()
-
-               if show_spinner:
-                       print "\b\b... done!"
-
-               if e is not None:
-                       def unsatisfied_resume_dep_msg():
-                               mydepgraph.display_problems()
-                               out = portage.output.EOutput()
-                               out.eerror("One or more packages are either masked or " + \
-                                       "have missing dependencies:")
-                               out.eerror("")
-                               indent = "  "
-                               show_parents = set()
-                               for dep in e.value:
-                                       if dep.parent in show_parents:
-                                               continue
-                                       show_parents.add(dep.parent)
-                                       if dep.atom is None:
-                                               out.eerror(indent + "Masked package:")
-                                               out.eerror(2 * indent + str(dep.parent))
-                                               out.eerror("")
-                                       else:
-                                               out.eerror(indent + str(dep.atom) + " pulled in by:")
-                                               out.eerror(2 * indent + str(dep.parent))
-                                               out.eerror("")
-                               msg = "The resume list contains packages " + \
-                                       "that are either masked or have " + \
-                                       "unsatisfied dependencies. " + \
-                                       "Please restart/continue " + \
-                                       "the operation manually, or use --skipfirst " + \
-                                       "to skip the first package in the list and " + \
-                                       "any other packages that may be " + \
-                                       "masked or have missing dependencies."
-                               for line in textwrap.wrap(msg, 72):
-                                       out.eerror(line)
-                       self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
-                       return False
-
-               if success and self._show_list():
-                       mylist = mydepgraph.altlist()
-                       if mylist:
-                               if "--tree" in self.myopts:
-                                       mylist.reverse()
-                               mydepgraph.display(mylist, favorites=self._favorites)
-
-               if not success:
-                       self._post_mod_echo_msgs.append(mydepgraph.display_problems)
-                       return False
-               mydepgraph.display_problems()
-
-               mylist = mydepgraph.altlist()
-               mydepgraph.break_refs(mylist)
-               mydepgraph.break_refs(dropped_tasks)
-               self._mergelist = mylist
-               self._set_digraph(mydepgraph.schedulerGraph())
-
-               msg_width = 75
-               for task in dropped_tasks:
-                       if not (isinstance(task, Package) and task.operation == "merge"):
-                               continue
-                       pkg = task
-                       msg = "emerge --keep-going:" + \
-                               " %s" % (pkg.cpv,)
-                       if pkg.root != "/":
-                               msg += " for %s" % (pkg.root,)
-                       msg += " dropped due to unsatisfied dependency."
-                       for line in textwrap.wrap(msg, msg_width):
-                               eerror(line, phase="other", key=pkg.cpv)
-                       settings = self.pkgsettings[pkg.root]
-                       # Ensure that log collection from $T is disabled inside
-                       # elog_process(), since any logs that might exist are
-                       # not valid here.
-                       settings.pop("T", None)
-                       portage.elog.elog_process(pkg.cpv, settings)
-                       self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
-
-               return True
-
-       def _show_list(self):
-               myopts = self.myopts
-               if "--quiet" not in myopts and \
-                       ("--ask" in myopts or "--tree" in myopts or \
-                       "--verbose" in myopts):
-                       return True
-               return False
-
-       def _world_atom(self, pkg):
-               """
-               Add the package to the world file, but only if
-               it's supposed to be added. Otherwise, do nothing.
-               """
-
-               if set(("--buildpkgonly", "--fetchonly",
-                       "--fetch-all-uri",
-                       "--oneshot", "--onlydeps",
-                       "--pretend")).intersection(self.myopts):
-                       return
-
-               if pkg.root != self.target_root:
-                       return
-
-               args_set = self._args_set
-               if not args_set.findAtomForPackage(pkg):
-                       return
+                       libcver="-".join(xs[1:])
+       if libcver==[]:
+               libcver="unavailable"
 
-               logger = self._logger
-               pkg_count = self._pkg_count
-               root_config = pkg.root_config
-               world_set = root_config.sets["world"]
-               world_locked = False
-               if hasattr(world_set, "lock"):
-                       world_set.lock()
-                       world_locked = True
+       gccver = getgccversion(chost)
+       unameout=platform.release()+" "+platform.machine()
 
-               try:
-                       if hasattr(world_set, "load"):
-                               world_set.load() # maybe it's changed on disk
-
-                       atom = create_world_atom(pkg, args_set, root_config)
-                       if atom:
-                               if hasattr(world_set, "add"):
-                                       self._status_msg(('Recording %s in "world" ' + \
-                                               'favorites file...') % atom)
-                                       logger.log(" === (%s of %s) Updating world file (%s)" % \
-                                               (pkg_count.curval, pkg_count.maxval, pkg.cpv))
-                                       world_set.add(atom)
-                               else:
-                                       writemsg_level('\n!!! Unable to record %s in "world"\n' % \
-                                               (atom,), level=logging.WARN, noiselevel=-1)
-               finally:
-                       if world_locked:
-                               world_set.unlock()
-
-       def _pkg(self, cpv, type_name, root_config, installed=False):
-               """
-               Get a package instance from the cache, or create a new
-               one if necessary. Raises KeyError from aux_get if it
-               failures for some reason (package does not exist or is
-               corrupt).
-               """
-               operation = "merge"
-               if installed:
-                       operation = "nomerge"
-
-               if self._digraph is not None:
-                       # Reuse existing instance when available.
-                       pkg = self._digraph.get(
-                               (type_name, root_config.root, cpv, operation))
-                       if pkg is not None:
-                               return pkg
-
-               tree_type = depgraph.pkg_tree_map[type_name]
-               db = root_config.trees[tree_type].dbapi
-               db_keys = list(self.trees[root_config.root][
-                       tree_type].dbapi._aux_cache_keys)
-               metadata = izip(db_keys, db.aux_get(cpv, db_keys))
-               pkg = Package(cpv=cpv, metadata=metadata,
-                       root_config=root_config, installed=installed)
-               if type_name == "ebuild":
-                       settings = self.pkgsettings[root_config.root]
-                       settings.setcpv(pkg)
-                       pkg.metadata["USE"] = settings["PORTAGE_USE"]
-                       pkg.metadata['CHOST'] = settings.get('CHOST', '')
-
-               return pkg
+       return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
 
 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
 
@@ -6958,23 +395,6 @@ def display_preserved_libs(vardbapi):
                print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
 
 
-def _flush_elog_mod_echo():
-       """
-       Dump the mod_echo output now so that our other
-       notifications are shown last.
-       @rtype: bool
-       @returns: True if messages were shown, False otherwise.
-       """
-       messages_shown = False
-       try:
-               from portage.elog import mod_echo
-       except ImportError:
-               pass # happens during downgrade to a version without the module
-       else:
-               messages_shown = bool(mod_echo._items)
-               mod_echo.finalize()
-       return messages_shown
-
 def post_emerge(root_config, myopts, mtimedb, retval):
        """
        Misc. things to run at the end of a merge session.
@@ -7134,34 +554,6 @@ def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
        manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
        return manager.getUnreadItems( repo_id, update=update )
 
-def insert_category_into_atom(atom, category):
-       alphanum = re.search(r'\w', atom)
-       if alphanum:
-               ret = atom[:alphanum.start()] + "%s/" % category + \
-                       atom[alphanum.start():]
-       else:
-               ret = None
-       return ret
-
-def is_valid_package_atom(x):
-       if "/" not in x:
-               alphanum = re.search(r'\w', x)
-               if alphanum:
-                       x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
-       return portage.isvalidatom(x)
-
-def show_blocker_docs_link():
-       print
-       print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
-       print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
-       print
-       print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
-       print
-
-def show_mask_docs():
-       print "For more information, see the MASKED PACKAGES section in the emerge"
-       print "man page or refer to the Gentoo Handbook."
-
 def action_sync(settings, trees, mtimedb, myopts, myaction):
        xterm_titles = "notitles" not in settings.features
        emergelog(xterm_titles, " === sync")
@@ -9108,80 +2500,6 @@ def action_depclean(settings, trees, ldpath_mtimes,
        else:
                print "Number removed:       "+str(len(cleanlist))
 
-def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
-       """
-       Construct a depgraph for the given resume list. This will raise
-       PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
-       @rtype: tuple
-       @returns: (success, depgraph, dropped_tasks)
-       """
-       skip_masked = True
-       skip_unsatisfied = True
-       mergelist = mtimedb["resume"]["mergelist"]
-       dropped_tasks = set()
-       while True:
-               mydepgraph = depgraph(settings, trees,
-                       myopts, myparams, spinner)
-               try:
-                       success = mydepgraph.loadResumeCommand(mtimedb["resume"],
-                               skip_masked=skip_masked)
-               except depgraph.UnsatisfiedResumeDep, e:
-                       if not skip_unsatisfied:
-                               raise
-
-                       graph = mydepgraph.digraph
-                       unsatisfied_parents = dict((dep.parent, dep.parent) \
-                               for dep in e.value)
-                       traversed_nodes = set()
-                       unsatisfied_stack = list(unsatisfied_parents)
-                       while unsatisfied_stack:
-                               pkg = unsatisfied_stack.pop()
-                               if pkg in traversed_nodes:
-                                       continue
-                               traversed_nodes.add(pkg)
-
-                               # If this package was pulled in by a parent
-                               # package scheduled for merge, removing this
-                               # package may cause the the parent package's
-                               # dependency to become unsatisfied.
-                               for parent_node in graph.parent_nodes(pkg):
-                                       if not isinstance(parent_node, Package) \
-                                               or parent_node.operation not in ("merge", "nomerge"):
-                                               continue
-                                       unsatisfied = \
-                                               graph.child_nodes(parent_node,
-                                               ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
-                                       if pkg in unsatisfied:
-                                               unsatisfied_parents[parent_node] = parent_node
-                                               unsatisfied_stack.append(parent_node)
-
-                       pruned_mergelist = []
-                       for x in mergelist:
-                               if isinstance(x, list) and \
-                                       tuple(x) not in unsatisfied_parents:
-                                       pruned_mergelist.append(x)
-
-                       # If the mergelist doesn't shrink then this loop is infinite.
-                       if len(pruned_mergelist) == len(mergelist):
-                               # This happens if a package can't be dropped because
-                               # it's already installed, but it has unsatisfied PDEPEND.
-                               raise
-                       mergelist[:] = pruned_mergelist
-
-                       # Exclude installed packages that have been removed from the graph due
-                       # to failure to build/install runtime dependencies after the dependent
-                       # package has already been installed.
-                       dropped_tasks.update(pkg for pkg in \
-                               unsatisfied_parents if pkg.operation != "nomerge")
-                       mydepgraph.break_refs(unsatisfied_parents)
-
-                       del e, graph, traversed_nodes, \
-                               unsatisfied_parents, unsatisfied_stack
-                       continue
-               else:
-                       break
-       return (success, mydepgraph, dropped_tasks)
-
 def action_build(settings, trees, mtimedb,
        myopts, myaction, myfiles, spinner):
 
@@ -9847,16 +3165,6 @@ def validate_ebuild_environment(trees):
                settings = trees[myroot]["vartree"].settings
                settings.validate()
 
-def clear_caches(trees):
-       for d in trees.itervalues():
-               d["porttree"].dbapi.melt()
-               d["porttree"].dbapi._aux_cache.clear()
-               d["bintree"].dbapi._aux_cache.clear()
-               d["bintree"].dbapi._clear_cache()
-               d["vartree"].dbapi.linkmap._clear_cache()
-       portage.dircache.clear()
-       gc.collect()
-
 def load_emerge_config(trees=None):
        kwargs = {}
        for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
@@ -10255,28 +3563,6 @@ def config_protect_check(trees):
                                msg += " for '%s'" % root
                        writemsg_level(msg, level=logging.WARN, noiselevel=-1)
 
-def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
-
-       if "--quiet" in myopts:
-               print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
-               print "!!! one of the following fully-qualified ebuild names instead:\n"
-               for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
-                       print "    " + colorize("INFORM", cp)
-               return
-
-       s = search(root_config, spinner, "--searchdesc" in myopts,
-               "--quiet" not in myopts, "--usepkg" in myopts,
-               "--usepkgonly" in myopts)
-       null_cp = portage.dep_getkey(insert_category_into_atom(
-               arg, "null"))
-       cat, atom_pn = portage.catsplit(null_cp)
-       s.searchkey = atom_pn
-       for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
-               s.addCP(cp)
-       s.output()
-       print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
-       print "!!! one of the above fully-qualified ebuild names instead.\n"
-
 def profile_check(trees, myaction, myopts):
        if myaction in ("info", "sync"):
                return os.EX_OK
diff --git a/pym/_emerge/_find_deep_system_runtime_deps.py b/pym/_emerge/_find_deep_system_runtime_deps.py
new file mode 100644 (file)
index 0000000..3249168
--- /dev/null
@@ -0,0 +1,35 @@
+from _emerge.DepPriority import DepPriority
+from _emerge.Package import Package
+
+def _find_deep_system_runtime_deps(graph):
+       deep_system_deps = set()
+       node_stack = []
+       for node in graph:
+               if not isinstance(node, Package) or \
+                       node.operation == 'uninstall':
+                       continue
+               if node.root_config.sets['system'].findAtomForPackage(node):
+                       node_stack.append(node)
+
+       def ignore_priority(priority):
+               """
+               Ignore non-runtime priorities.
+               """
+               if isinstance(priority, DepPriority) and \
+                       (priority.runtime or priority.runtime_post):
+                       return False
+               return True
+
+       while node_stack:
+               node = node_stack.pop()
+               if node in deep_system_deps:
+                       continue
+               deep_system_deps.add(node)
+               for child in graph.child_nodes(node, ignore_priority=ignore_priority):
+                       if not isinstance(child, Package) or \
+                               child.operation == 'uninstall':
+                               continue
+                       node_stack.append(child)
+
+       return deep_system_deps
+
diff --git a/pym/_emerge/_flush_elog_mod_echo.py b/pym/_emerge/_flush_elog_mod_echo.py
new file mode 100644 (file)
index 0000000..61eeb29
--- /dev/null
@@ -0,0 +1,17 @@
+def _flush_elog_mod_echo():
+       """
+       Dump the mod_echo output now so that our other
+       notifications are shown last.
+       @rtype: bool
+       @returns: True if messages were shown, False otherwise.
+       """
+       messages_shown = False
+       try:
+               from portage.elog import mod_echo
+       except ImportError:
+               pass # happens during downgrade to a version without the module
+       else:
+               messages_shown = bool(mod_echo._items)
+               mod_echo.finalize()
+       return messages_shown
+
diff --git a/pym/_emerge/clear_caches.py b/pym/_emerge/clear_caches.py
new file mode 100644 (file)
index 0000000..ce903ba
--- /dev/null
@@ -0,0 +1,19 @@
+import gc
+
+try:
+       import portage
+except ImportError:
+       from os import path as osp
+       import sys
+       sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+       import portage
+
+def clear_caches(trees):
+       for d in trees.itervalues():
+               d["porttree"].dbapi.melt()
+               d["porttree"].dbapi._aux_cache.clear()
+               d["bintree"].dbapi._aux_cache.clear()
+               d["bintree"].dbapi._clear_cache()
+               d["vartree"].dbapi.linkmap._clear_cache()
+       portage.dircache.clear()
+       gc.collect()
diff --git a/pym/_emerge/create_depgraph_params.py b/pym/_emerge/create_depgraph_params.py
new file mode 100644 (file)
index 0000000..fab741b
--- /dev/null
@@ -0,0 +1,33 @@
+def create_depgraph_params(myopts, myaction):
+       #configure emerge engine parameters
+       #
+       # self:      include _this_ package regardless of if it is merged.
+       # selective: exclude the package if it is merged
+       # recurse:   go into the dependencies
+       # deep:      go into the dependencies of already merged packages
+       # empty:     pretend nothing is merged
+       # complete:  completely account for all known dependencies
+       # remove:    build graph for use in removing packages
+       myparams = set(["recurse"])
+
+       if myaction == "remove":
+               myparams.add("remove")
+               myparams.add("complete")
+               return myparams
+
+       if "--update" in myopts or \
+               "--newuse" in myopts or \
+               "--reinstall" in myopts or \
+               "--noreplace" in myopts:
+               myparams.add("selective")
+       if "--emptytree" in myopts:
+               myparams.add("empty")
+               myparams.discard("selective")
+       if "--nodeps" in myopts:
+               myparams.discard("recurse")
+       if "--deep" in myopts:
+               myparams.add("deep")
+       if "--complete-graph" in myopts:
+               myparams.add("complete")
+       return myparams
+
diff --git a/pym/_emerge/create_world_atom.py b/pym/_emerge/create_world_atom.py
new file mode 100644 (file)
index 0000000..1b13c40
--- /dev/null
@@ -0,0 +1,91 @@
+try:
+       import portage
+except ImportError:
+       from os import path as osp
+       import sys
+       sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+       import portage
+
+def create_world_atom(pkg, args_set, root_config):
+       """Create a new atom for the world file if one does not exist.  If the
+       argument atom is precise enough to identify a specific slot then a slot
+       atom will be returned. Atoms that are in the system set may also be stored
+       in world since system atoms can only match one slot while world atoms can
+       be greedy with respect to slots.  Unslotted system packages will not be
+       stored in world."""
+
+       arg_atom = args_set.findAtomForPackage(pkg)
+       if not arg_atom:
+               return None
+       cp = portage.dep_getkey(arg_atom)
+       new_world_atom = cp
+       sets = root_config.sets
+       portdb = root_config.trees["porttree"].dbapi
+       vardb = root_config.trees["vartree"].dbapi
+       available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
+               for cpv in portdb.match(cp))
+       slotted = len(available_slots) > 1 or \
+               (len(available_slots) == 1 and "0" not in available_slots)
+       if not slotted:
+               # check the vdb in case this is multislot
+               available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
+                       for cpv in vardb.match(cp))
+               slotted = len(available_slots) > 1 or \
+                       (len(available_slots) == 1 and "0" not in available_slots)
+       if slotted and arg_atom != cp:
+               # If the user gave a specific atom, store it as a
+               # slot atom in the world file.
+               slot_atom = pkg.slot_atom
+
+               # For USE=multislot, there are a couple of cases to
+               # handle here:
+               #
+               # 1) SLOT="0", but the real SLOT spontaneously changed to some
+               #    unknown value, so just record an unslotted atom.
+               #
+               # 2) SLOT comes from an installed package and there is no
+               #    matching SLOT in the portage tree.
+               #
+               # Make sure that the slot atom is available in either the
+               # portdb or the vardb, since otherwise the user certainly
+               # doesn't want the SLOT atom recorded in the world file
+               # (case 1 above).  If it's only available in the vardb,
+               # the user may be trying to prevent a USE=multislot
+               # package from being removed by --depclean (case 2 above).
+
+               mydb = portdb
+               if not portdb.match(slot_atom):
+                       # SLOT seems to come from an installed multislot package
+                       mydb = vardb
+               # If there is no installed package matching the SLOT atom,
+               # it probably changed SLOT spontaneously due to USE=multislot,
+               # so just record an unslotted atom.
+               if vardb.match(slot_atom):
+                       # Now verify that the argument is precise
+                       # enough to identify a specific slot.
+                       matches = mydb.match(arg_atom)
+                       matched_slots = set()
+                       for cpv in matches:
+                               matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
+                       if len(matched_slots) == 1:
+                               new_world_atom = slot_atom
+
+       if new_world_atom == sets["world"].findAtomForPackage(pkg):
+               # Both atoms would be identical, so there's nothing to add.
+               return None
+       if not slotted:
+               # Unlike world atoms, system atoms are not greedy for slots, so they
+               # can't be safely excluded from world if they are slotted.
+               system_atom = sets["system"].findAtomForPackage(pkg)
+               if system_atom:
+                       if not portage.dep_getkey(system_atom).startswith("virtual/"):
+                               return None
+                       # System virtuals aren't safe to exclude from world since they can
+                       # match multiple old-style virtuals but only one of them will be
+                       # pulled in by update or depclean.
+                       providers = portdb.mysettings.getvirtuals().get(
+                               portage.dep_getkey(system_atom))
+                       if providers and len(providers) == 1 and providers[0] == cp:
+                               return None
+       return new_world_atom
+
diff --git a/pym/_emerge/depgraph.py b/pym/_emerge/depgraph.py
new file mode 100644 (file)
index 0000000..b590128
--- /dev/null
@@ -0,0 +1,4973 @@
+import gc
+import os
+import re
+import sys
+import textwrap
+from itertools import chain, izip
+
+try:
+       import portage
+except ImportError:
+       from os import path as osp
+       sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+       import portage
+
+from portage import digraph
+from portage.output import bold, blue, colorize, create_color_func, darkblue, \
+       darkgreen, green, nc_len, red, teal, turquoise, yellow
+bad = create_color_func("BAD")
+from portage.sets import SETPREFIX
+from portage.sets.base import InternalPackageSet
+from portage.util import cmp_sort_key, writemsg
+
+from _emerge.AtomArg import AtomArg
+from _emerge.Blocker import Blocker
+from _emerge.BlockerCache import BlockerCache
+from _emerge.BlockerDepPriority import BlockerDepPriority
+from _emerge.countdown import countdown
+from _emerge.create_world_atom import create_world_atom
+from _emerge.Dependency import Dependency
+from _emerge.DependencyArg import DependencyArg
+from _emerge.DepPriority import DepPriority
+from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.FakeVartree import FakeVartree
+from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
+from _emerge.format_size import format_size
+from _emerge.is_valid_package_atom import is_valid_package_atom
+from _emerge.Package import Package
+from _emerge.PackageArg import PackageArg
+from _emerge.PackageCounters import PackageCounters
+from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
+from _emerge.RepoDisplay import RepoDisplay
+from _emerge.RootConfig import RootConfig
+from _emerge.search import search
+from _emerge.SetArg import SetArg
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+from _emerge.UnmergeDepPriority import UnmergeDepPriority
+from _emerge.visible import visible
+
+import portage.proxy.lazyimport
+import portage.proxy as proxy
+proxy.lazyimport.lazyimport(globals(),
+       '_emerge.Scheduler:Scheduler',
+)
+#from _emerge.Scheduler import Scheduler
+class depgraph(object):
+
+       pkg_tree_map = RootConfig.pkg_tree_map
+
+       _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
+
+       def __init__(self, settings, trees, myopts, myparams, spinner):
+               self.settings = settings
+               self.target_root = settings["ROOT"]
+               self.myopts = myopts
+               self.myparams = myparams
+               self.edebug = 0
+               if settings.get("PORTAGE_DEBUG", "") == "1":
+                       self.edebug = 1
+               self.spinner = spinner
+               self._running_root = trees["/"]["root_config"]
+               self._opts_no_restart = Scheduler._opts_no_restart
+               self.pkgsettings = {}
+               # Maps slot atom to package for each Package added to the graph.
+               self._slot_pkg_map = {}
+               # Maps nodes to the reasons they were selected for reinstallation.
+               self._reinstall_nodes = {}
+               self.mydbapi = {}
+               self.trees = {}
+               self._trees_orig = trees
+               self.roots = {}
+               # Contains a filtered view of preferred packages that are selected
+               # from available repositories.
+               self._filtered_trees = {}
+               # Contains installed packages and new packages that have been added
+               # to the graph.
+               self._graph_trees = {}
+               # All Package instances
+               self._pkg_cache = {}
+               for myroot in trees:
+                       self.trees[myroot] = {}
+                       # Create a RootConfig instance that references
+                       # the FakeVartree instead of the real one.
+                       self.roots[myroot] = RootConfig(
+                               trees[myroot]["vartree"].settings,
+                               self.trees[myroot],
+                               trees[myroot]["root_config"].setconfig)
+                       for tree in ("porttree", "bintree"):
+                               self.trees[myroot][tree] = trees[myroot][tree]
+                       self.trees[myroot]["vartree"] = \
+                               FakeVartree(trees[myroot]["root_config"],
+                                       pkg_cache=self._pkg_cache)
+                       self.pkgsettings[myroot] = portage.config(
+                               clone=self.trees[myroot]["vartree"].settings)
+                       self._slot_pkg_map[myroot] = {}
+                       vardb = self.trees[myroot]["vartree"].dbapi
+                       preload_installed_pkgs = "--nodeps" not in self.myopts and \
+                               "--buildpkgonly" not in self.myopts
+                       # This fakedbapi instance will model the state that the vdb will
+                       # have after new packages have been installed.
+                       fakedb = PackageVirtualDbapi(vardb.settings)
+                       if preload_installed_pkgs:
+                               for pkg in vardb:
+                                       self.spinner.update()
+                                       # This triggers metadata updates via FakeVartree.
+                                       vardb.aux_get(pkg.cpv, [])
+                                       fakedb.cpv_inject(pkg)
+
+                       # Now that the vardb state is cached in our FakeVartree,
+                       # we won't be needing the real vartree cache for awhile.
+                       # To make some room on the heap, clear the vardbapi
+                       # caches.
+                       trees[myroot]["vartree"].dbapi._clear_cache()
+                       gc.collect()
+
+                       self.mydbapi[myroot] = fakedb
+                       def graph_tree():
+                               pass
+                       graph_tree.dbapi = fakedb
+                       self._graph_trees[myroot] = {}
+                       self._filtered_trees[myroot] = {}
+                       # Substitute the graph tree for the vartree in dep_check() since we
+                       # want atom selections to be consistent with package selections
+                       # have already been made.
+                       self._graph_trees[myroot]["porttree"]   = graph_tree
+                       self._graph_trees[myroot]["vartree"]    = graph_tree
+                       def filtered_tree():
+                               pass
+                       filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
+                       self._filtered_trees[myroot]["porttree"] = filtered_tree
+
+                       # Passing in graph_tree as the vartree here could lead to better
+                       # atom selections in some cases by causing atoms for packages that
+                       # have been added to the graph to be preferred over other choices.
+                       # However, it can trigger atom selections that result in
+                       # unresolvable direct circular dependencies. For example, this
+                       # happens with gwydion-dylan which depends on either itself or
+                       # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
+                       # gwydion-dylan-bin needs to be selected in order to avoid a
+                       # an unresolvable direct circular dependency.
+                       #
+                       # To solve the problem described above, pass in "graph_db" so that
+                       # packages that have been added to the graph are distinguishable
+                       # from other available packages and installed packages. Also, pass
+                       # the parent package into self._select_atoms() calls so that
+                       # unresolvable direct circular dependencies can be detected and
+                       # avoided when possible.
+                       self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
+                       self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
+
+                       dbs = []
+                       portdb = self.trees[myroot]["porttree"].dbapi
+                       bindb  = self.trees[myroot]["bintree"].dbapi
+                       vardb  = self.trees[myroot]["vartree"].dbapi
+                       #               (db, pkg_type, built, installed, db_keys)
+                       if "--usepkgonly" not in self.myopts:
+                               db_keys = list(portdb._aux_cache_keys)
+                               dbs.append((portdb, "ebuild", False, False, db_keys))
+                       if "--usepkg" in self.myopts:
+                               db_keys = list(bindb._aux_cache_keys)
+                               dbs.append((bindb,  "binary", True, False, db_keys))
+                       db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
+                       dbs.append((vardb, "installed", True, True, db_keys))
+                       self._filtered_trees[myroot]["dbs"] = dbs
+                       if "--usepkg" in self.myopts:
+                               self.trees[myroot]["bintree"].populate(
+                                       "--getbinpkg" in self.myopts,
+                                       "--getbinpkgonly" in self.myopts)
+               del trees
+
+               self.digraph=portage.digraph()
+               # contains all sets added to the graph
+               self._sets = {}
+               # contains atoms given as arguments
+               self._sets["args"] = InternalPackageSet()
+               # contains all atoms from all sets added to the graph, including
+               # atoms given as arguments
+               self._set_atoms = InternalPackageSet()
+               self._atom_arg_map = {}
+               # contains all nodes pulled in by self._set_atoms
+               self._set_nodes = set()
+               # Contains only Blocker -> Uninstall edges
+               self._blocker_uninstalls = digraph()
+               # Contains only Package -> Blocker edges
+               self._blocker_parents = digraph()
+               # Contains only irrelevant Package -> Blocker edges
+               self._irrelevant_blockers = digraph()
+               # Contains only unsolvable Package -> Blocker edges
+               self._unsolvable_blockers = digraph()
+               # Contains all Blocker -> Blocked Package edges
+               self._blocked_pkgs = digraph()
+               # Contains world packages that have been protected from
+               # uninstallation but may not have been added to the graph
+               # if the graph is not complete yet.
+               self._blocked_world_pkgs = {}
+               self._slot_collision_info = {}
+               # Slot collision nodes are not allowed to block other packages since
+               # blocker validation is only able to account for one package per slot.
+               self._slot_collision_nodes = set()
+               self._parent_atoms = {}
+               self._slot_conflict_parent_atoms = set()
+               self._serialized_tasks_cache = None
+               self._scheduler_graph = None
+               self._displayed_list = None
+               self._pprovided_args = []
+               self._missing_args = []
+               self._masked_installed = set()
+               self._unsatisfied_deps_for_display = []
+               self._unsatisfied_blockers_for_display = None
+               self._circular_deps_for_display = None
+               self._dep_stack = []
+               self._dep_disjunctive_stack = []
+               self._unsatisfied_deps = []
+               self._initially_unsatisfied_deps = []
+               self._ignored_deps = []
+               self._required_set_names = set(["system", "world"])
+               self._select_atoms = self._select_atoms_highest_available
+               self._select_package = self._select_pkg_highest_available
+               self._highest_pkg_cache = {}
+
+       def _show_slot_collision_notice(self):
+               """Show an informational message advising the user to mask one of the
+               the packages. In some cases it may be possible to resolve this
+               automatically, but support for backtracking (removal nodes that have
+               already been selected) will be required in order to handle all possible
+               cases.
+               """
+
+               if not self._slot_collision_info:
+                       return
+
+               self._show_merge_list()
+
+               msg = []
+               msg.append("\n!!! Multiple package instances within a single " + \
+                       "package slot have been pulled\n")
+               msg.append("!!! into the dependency graph, resulting" + \
+                       " in a slot conflict:\n\n")
+               indent = "  "
+               # Max number of parents shown, to avoid flooding the display.
+               max_parents = 3
+               explanation_columns = 70
+               explanations = 0
+               for (slot_atom, root), slot_nodes \
+                       in self._slot_collision_info.iteritems():
+                       msg.append(str(slot_atom))
+                       msg.append("\n\n")
+
+                       for node in slot_nodes:
+                               msg.append(indent)
+                               msg.append(str(node))
+                               parent_atoms = self._parent_atoms.get(node)
+                               if parent_atoms:
+                                       pruned_list = set()
+                                       # Prefer conflict atoms over others.
+                                       for parent_atom in parent_atoms:
+                                               if len(pruned_list) >= max_parents:
+                                                       break
+                                               if parent_atom in self._slot_conflict_parent_atoms:
+                                                       pruned_list.add(parent_atom)
+
+                                       # If this package was pulled in by conflict atoms then
+                                       # show those alone since those are the most interesting.
+                                       if not pruned_list:
+                                               # When generating the pruned list, prefer instances
+                                               # of DependencyArg over instances of Package.
+                                               for parent_atom in parent_atoms:
+                                                       if len(pruned_list) >= max_parents:
+                                                               break
+                                                       parent, atom = parent_atom
+                                                       if isinstance(parent, DependencyArg):
+                                                               pruned_list.add(parent_atom)
+                                               # Prefer Packages instances that themselves have been
+                                               # pulled into collision slots.
+                                               for parent_atom in parent_atoms:
+                                                       if len(pruned_list) >= max_parents:
+                                                               break
+                                                       parent, atom = parent_atom
+                                                       if isinstance(parent, Package) and \
+                                                               (parent.slot_atom, parent.root) \
+                                                               in self._slot_collision_info:
+                                                               pruned_list.add(parent_atom)
+                                               for parent_atom in parent_atoms:
+                                                       if len(pruned_list) >= max_parents:
+                                                               break
+                                                       pruned_list.add(parent_atom)
+                                       omitted_parents = len(parent_atoms) - len(pruned_list)
+                                       parent_atoms = pruned_list
+                                       msg.append(" pulled in by\n")
+                                       for parent_atom in parent_atoms:
+                                               parent, atom = parent_atom
+                                               msg.append(2*indent)
+                                               if isinstance(parent,
+                                                       (PackageArg, AtomArg)):
+                                                       # For PackageArg and AtomArg types, it's
+                                                       # redundant to display the atom attribute.
+                                                       msg.append(str(parent))
+                                               else:
+                                                       # Display the specific atom from SetArg or
+                                                       # Package types.
+                                                       msg.append("%s required by %s" % (atom, parent))
+                                               msg.append("\n")
+                                       if omitted_parents:
+                                               msg.append(2*indent)
+                                               msg.append("(and %d more)\n" % omitted_parents)
+                               else:
+                                       msg.append(" (no parents)\n")
+                               msg.append("\n")
+                       explanation = self._slot_conflict_explanation(slot_nodes)
+                       if explanation:
+                               explanations += 1
+                               msg.append(indent + "Explanation:\n\n")
+                               for line in textwrap.wrap(explanation, explanation_columns):
+                                       msg.append(2*indent + line + "\n")
+                               msg.append("\n")
+               msg.append("\n")
+               sys.stderr.write("".join(msg))
+               sys.stderr.flush()
+
+               explanations_for_all = explanations == len(self._slot_collision_info)
+
+               if explanations_for_all or "--quiet" in self.myopts:
+                       return
+
+               msg = []
+               msg.append("It may be possible to solve this problem ")
+               msg.append("by using package.mask to prevent one of ")
+               msg.append("those packages from being selected. ")
+               msg.append("However, it is also possible that conflicting ")
+               msg.append("dependencies exist such that they are impossible to ")
+               msg.append("satisfy simultaneously.  If such a conflict exists in ")
+               msg.append("the dependencies of two different packages, then those ")
+               msg.append("packages can not be installed simultaneously.")
+
+               from formatter import AbstractFormatter, DumbWriter
+               f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
+               for x in msg:
+                       f.add_flowing_data(x)
+               f.end_paragraph(1)
+
+               msg = []
+               msg.append("For more information, see MASKED PACKAGES ")
+               msg.append("section in the emerge man page or refer ")
+               msg.append("to the Gentoo Handbook.")
+               for x in msg:
+                       f.add_flowing_data(x)
+               f.end_paragraph(1)
+               f.writer.flush()
+
+       def _slot_conflict_explanation(self, slot_nodes):
+               """
+               When a slot conflict occurs due to USE deps, there are a few
+               different cases to consider:
+
+               1) New USE are correctly set but --newuse wasn't requested so an
+                  installed package with incorrect USE happened to get pulled
+                  into graph before the new one.
+
+               2) New USE are incorrectly set but an installed package has correct
+                  USE so it got pulled into the graph, and a new instance also got
+                  pulled in due to --newuse or an upgrade.
+
+               3) Multiple USE deps exist that can't be satisfied simultaneously,
+                  and multiple package instances got pulled into the same slot to
+                  satisfy the conflicting deps.
+
+               Currently, explanations and suggested courses of action are generated
+               for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
+               """
+
+               if len(slot_nodes) != 2:
+                       # Suggestions are only implemented for
+                       # conflicts between two packages.
+                       return None
+
+               all_conflict_atoms = self._slot_conflict_parent_atoms
+               matched_node = None
+               matched_atoms = None
+               unmatched_node = None
+               for node in slot_nodes:
+                       parent_atoms = self._parent_atoms.get(node)
+                       if not parent_atoms:
+                               # Normally, there are always parent atoms. If there are
+                               # none then something unexpected is happening and there's
+                               # currently no suggestion for this case.
+                               return None
+                       conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
+                       for parent_atom in conflict_atoms:
+                               parent, atom = parent_atom
+                               if not atom.use:
+                                       # Suggestions are currently only implemented for cases
+                                       # in which all conflict atoms have USE deps.
+                                       return None
+                       if conflict_atoms:
+                               if matched_node is not None:
+                                       # If conflict atoms match multiple nodes
+                                       # then there's no suggestion.
+                                       return None
+                               matched_node = node
+                               matched_atoms = conflict_atoms
+                       else:
+                               if unmatched_node is not None:
+                                       # Neither node is matched by conflict atoms, and
+                                       # there is no suggestion for this case.
+                                       return None
+                               unmatched_node = node
+
+               if matched_node is None or unmatched_node is None:
+                       # This shouldn't happen.
+                       return None
+
+               if unmatched_node.installed and not matched_node.installed and \
+                       unmatched_node.cpv == matched_node.cpv:
+                       # If the conflicting packages are the same version then
+                       # --newuse should be all that's needed. If they are different
+                       # versions then there's some other problem.
+                       return "New USE are correctly set, but --newuse wasn't" + \
+                               " requested, so an installed package with incorrect USE " + \
+                               "happened to get pulled into the dependency graph. " + \
+                               "In order to solve " + \
+                               "this, either specify the --newuse option or explicitly " + \
+                               " reinstall '%s'." % matched_node.slot_atom
+
+               if matched_node.installed and not unmatched_node.installed:
+                       atoms = sorted(set(atom for parent, atom in matched_atoms))
+                       explanation = ("New USE for '%s' are incorrectly set. " + \
+                               "In order to solve this, adjust USE to satisfy '%s'") % \
+                               (matched_node.slot_atom, atoms[0])
+                       if len(atoms) > 1:
+                               for atom in atoms[1:-1]:
+                                       explanation += ", '%s'" % (atom,)
+                               if len(atoms) > 2:
+                                       explanation += ","
+                               explanation += " and '%s'" % (atoms[-1],)
+                       explanation += "."
+                       return explanation
+
+               return None
+
+       def _process_slot_conflicts(self):
+               """
+               Process slot conflict data to identify specific atoms which
+               lead to conflict. These atoms only match a subset of the
+               packages that have been pulled into a given slot.
+               """
+               for (slot_atom, root), slot_nodes \
+                       in self._slot_collision_info.iteritems():
+
+                       all_parent_atoms = set()
+                       for pkg in slot_nodes:
+                               parent_atoms = self._parent_atoms.get(pkg)
+                               if not parent_atoms:
+                                       continue
+                               all_parent_atoms.update(parent_atoms)
+
+                       for pkg in slot_nodes:
+                               parent_atoms = self._parent_atoms.get(pkg)
+                               if parent_atoms is None:
+                                       parent_atoms = set()
+                                       self._parent_atoms[pkg] = parent_atoms
+                               for parent_atom in all_parent_atoms:
+                                       if parent_atom in parent_atoms:
+                                               continue
+                                       # Use package set for matching since it will match via
+                                       # PROVIDE when necessary, while match_from_list does not.
+                                       parent, atom = parent_atom
+                                       atom_set = InternalPackageSet(
+                                               initial_atoms=(atom,))
+                                       if atom_set.findAtomForPackage(pkg):
+                                               parent_atoms.add(parent_atom)
+                                       else:
+                                               self._slot_conflict_parent_atoms.add(parent_atom)
+
+       def _reinstall_for_flags(self, forced_flags,
+               orig_use, orig_iuse, cur_use, cur_iuse):
+               """Return a set of flags that trigger reinstallation, or None if there
+               are no such flags."""
+               if "--newuse" in self.myopts:
+                       flags = set(orig_iuse.symmetric_difference(
+                               cur_iuse).difference(forced_flags))
+                       flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
+                               cur_iuse.intersection(cur_use)))
+                       if flags:
+                               return flags
+               elif "changed-use" == self.myopts.get("--reinstall"):
+                       flags = orig_iuse.intersection(orig_use).symmetric_difference(
+                               cur_iuse.intersection(cur_use))
+                       if flags:
+                               return flags
+               return None
+
+       def _create_graph(self, allow_unsatisfied=False):
+               dep_stack = self._dep_stack
+               dep_disjunctive_stack = self._dep_disjunctive_stack
+               while dep_stack or dep_disjunctive_stack:
+                       self.spinner.update()
+                       while dep_stack:
+                               dep = dep_stack.pop()
+                               if isinstance(dep, Package):
+                                       if not self._add_pkg_deps(dep,
+                                               allow_unsatisfied=allow_unsatisfied):
+                                               return 0
+                                       continue
+                               if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
+                                       return 0
+                       if dep_disjunctive_stack:
+                               if not self._pop_disjunction(allow_unsatisfied):
+                                       return 0
+               return 1
+
+       def _add_dep(self, dep, allow_unsatisfied=False):
+               debug = "--debug" in self.myopts
+               buildpkgonly = "--buildpkgonly" in self.myopts
+               nodeps = "--nodeps" in self.myopts
+               empty = "empty" in self.myparams
+               deep = "deep" in self.myparams
+               update = "--update" in self.myopts and dep.depth <= 1
+               if dep.blocker:
+                       if not buildpkgonly and \
+                               not nodeps and \
+                               dep.parent not in self._slot_collision_nodes:
+                               if dep.parent.onlydeps:
+                                       # It's safe to ignore blockers if the
+                                       # parent is an --onlydeps node.
+                                       return 1
+                               # The blocker applies to the root where
+                               # the parent is or will be installed.
+                               blocker = Blocker(atom=dep.atom,
+                                       eapi=dep.parent.metadata["EAPI"],
+                                       root=dep.parent.root)
+                               self._blocker_parents.add(blocker, dep.parent)
+                       return 1
+               dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
+                       onlydeps=dep.onlydeps)
+               if not dep_pkg:
+                       if dep.priority.optional:
+                               # This could be an unecessary build-time dep
+                               # pulled in by --with-bdeps=y.
+                               return 1
+                       if allow_unsatisfied:
+                               self._unsatisfied_deps.append(dep)
+                               return 1
+                       self._unsatisfied_deps_for_display.append(
+                               ((dep.root, dep.atom), {"myparent":dep.parent}))
+                       return 0
+               # In some cases, dep_check will return deps that shouldn't
+               # be proccessed any further, so they are identified and
+               # discarded here. Try to discard as few as possible since
+               # discarded dependencies reduce the amount of information
+               # available for optimization of merge order.
+               if dep.priority.satisfied and \
+                       not dep_pkg.installed and \
+                       not (existing_node or empty or deep or update):
+                       myarg = None
+                       if dep.root == self.target_root:
+                               try:
+                                       myarg = self._iter_atoms_for_pkg(dep_pkg).next()
+                               except StopIteration:
+                                       pass
+                               except portage.exception.InvalidDependString:
+                                       if not dep_pkg.installed:
+                                               # This shouldn't happen since the package
+                                               # should have been masked.
+                                               raise
+                       if not myarg:
+                               self._ignored_deps.append(dep)
+                               return 1
+
+               if not self._add_pkg(dep_pkg, dep):
+                       return 0
+               return 1
+
+       def _add_pkg(self, pkg, dep):
+               myparent = None
+               priority = None
+               depth = 0
+               if dep is None:
+                       dep = Dependency()
+               else:
+                       myparent = dep.parent
+                       priority = dep.priority
+                       depth = dep.depth
+               if priority is None:
+                       priority = DepPriority()
+               """
+               Fills the digraph with nodes comprised of packages to merge.
+               mybigkey is the package spec of the package to merge.
+               myparent is the package depending on mybigkey ( or None )
+               addme = Should we add this package to the digraph or are we just looking at it's deps?
+                       Think --onlydeps, we need to ignore packages in that case.
+               #stuff to add:
+               #SLOT-aware emerge
+               #IUSE-aware emerge -> USE DEP aware depgraph
+               #"no downgrade" emerge
+               """
+               # Ensure that the dependencies of the same package
+               # are never processed more than once.
+               previously_added = pkg in self.digraph
+
+               # select the correct /var database that we'll be checking against
+               vardbapi = self.trees[pkg.root]["vartree"].dbapi
+               pkgsettings = self.pkgsettings[pkg.root]
+
+               arg_atoms = None
+               if True:
+                       try:
+                               arg_atoms = list(self._iter_atoms_for_pkg(pkg))
+                       except portage.exception.InvalidDependString, e:
+                               if not pkg.installed:
+                                       show_invalid_depstring_notice(
+                                               pkg, pkg.metadata["PROVIDE"], str(e))
+                                       return 0
+                               del e
+
+               if not pkg.onlydeps:
+                       if not pkg.installed and \
+                               "empty" not in self.myparams and \
+                               vardbapi.match(pkg.slot_atom):
+                               # Increase the priority of dependencies on packages that
+                               # are being rebuilt. This optimizes merge order so that
+                               # dependencies are rebuilt/updated as soon as possible,
+                               # which is needed especially when emerge is called by
+                               # revdep-rebuild since dependencies may be affected by ABI
+                               # breakage that has rendered them useless. Don't adjust
+                               # priority here when in "empty" mode since all packages
+                               # are being merged in that case.
+                               priority.rebuild = True
+
+                       existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
+                       slot_collision = False
+                       if existing_node:
+                               existing_node_matches = pkg.cpv == existing_node.cpv
+                               if existing_node_matches and \
+                                       pkg != existing_node and \
+                                       dep.atom is not None:
+                                       # Use package set for matching since it will match via
+                                       # PROVIDE when necessary, while match_from_list does not.
+                                       atom_set = InternalPackageSet(initial_atoms=[dep.atom])
+                                       if not atom_set.findAtomForPackage(existing_node):
+                                               existing_node_matches = False
+                               if existing_node_matches:
+                                       # The existing node can be reused.
+                                       if arg_atoms:
+                                               for parent_atom in arg_atoms:
+                                                       parent, atom = parent_atom
+                                                       self.digraph.add(existing_node, parent,
+                                                               priority=priority)
+                                                       self._add_parent_atom(existing_node, parent_atom)
+                                       # If a direct circular dependency is not an unsatisfied
+                                       # buildtime dependency then drop it here since otherwise
+                                       # it can skew the merge order calculation in an unwanted
+                                       # way.
+                                       if existing_node != myparent or \
+                                               (priority.buildtime and not priority.satisfied):
+                                               self.digraph.addnode(existing_node, myparent,
+                                                       priority=priority)
+                                               if dep.atom is not None and dep.parent is not None:
+                                                       self._add_parent_atom(existing_node,
+                                                               (dep.parent, dep.atom))
+                                       return 1
+                               else:
+
+                                       # A slot collision has occurred.  Sometimes this coincides
+                                       # with unresolvable blockers, so the slot collision will be
+                                       # shown later if there are no unresolvable blockers.
+                                       self._add_slot_conflict(pkg)
+                                       slot_collision = True
+
+                       if slot_collision:
+                               # Now add this node to the graph so that self.display()
+                               # can show use flags and --tree portage.output.  This node is
+                               # only being partially added to the graph.  It must not be
+                               # allowed to interfere with the other nodes that have been
+                               # added.  Do not overwrite data for existing nodes in
+                               # self.mydbapi since that data will be used for blocker
+                               # validation.
+                               # Even though the graph is now invalid, continue to process
+                               # dependencies so that things like --fetchonly can still
+                               # function despite collisions.
+                               pass
+                       elif not previously_added:
+                               self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
+                               self.mydbapi[pkg.root].cpv_inject(pkg)
+                               self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
+
+                       if not pkg.installed:
+                               # Allow this package to satisfy old-style virtuals in case it
+                               # doesn't already. Any pre-existing providers will be preferred
+                               # over this one.
+                               try:
+                                       pkgsettings.setinst(pkg.cpv, pkg.metadata)
+                                       # For consistency, also update the global virtuals.
+                                       settings = self.roots[pkg.root].settings
+                                       settings.unlock()
+                                       settings.setinst(pkg.cpv, pkg.metadata)
+                                       settings.lock()
+                               except portage.exception.InvalidDependString, e:
+                                       show_invalid_depstring_notice(
+                                               pkg, pkg.metadata["PROVIDE"], str(e))
+                                       del e
+                                       return 0
+
+               if arg_atoms:
+                       self._set_nodes.add(pkg)
+
+               # Do this even when addme is False (--onlydeps) so that the
+               # parent/child relationship is always known in case
+               # self._show_slot_collision_notice() needs to be called later.
+               self.digraph.add(pkg, myparent, priority=priority)
+               if dep.atom is not None and dep.parent is not None:
+                       self._add_parent_atom(pkg, (dep.parent, dep.atom))
+
+               if arg_atoms:
+                       for parent_atom in arg_atoms:
+                               parent, atom = parent_atom
+                               self.digraph.add(pkg, parent, priority=priority)
+                               self._add_parent_atom(pkg, parent_atom)
+
+               """ This section determines whether we go deeper into dependencies or not.
+                   We want to go deeper on a few occasions:
+                   Installing package A, we need to make sure package A's deps are met.
+                   emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
+                   If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
+               """
+               dep_stack = self._dep_stack
+               if "recurse" not in self.myparams:
+                       return 1
+               elif pkg.installed and \
+                       "deep" not in self.myparams:
+                       dep_stack = self._ignored_deps
+
+               self.spinner.update()
+
+               if arg_atoms:
+                       depth = 0
+               pkg.depth = depth
+               if not previously_added:
+                       dep_stack.append(pkg)
+               return 1
+
+       def _add_parent_atom(self, pkg, parent_atom):
+               parent_atoms = self._parent_atoms.get(pkg)
+               if parent_atoms is None:
+                       parent_atoms = set()
+                       self._parent_atoms[pkg] = parent_atoms
+               parent_atoms.add(parent_atom)
+
+       def _add_slot_conflict(self, pkg):
+               self._slot_collision_nodes.add(pkg)
+               slot_key = (pkg.slot_atom, pkg.root)
+               slot_nodes = self._slot_collision_info.get(slot_key)
+               if slot_nodes is None:
+                       slot_nodes = set()
+                       slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
+                       self._slot_collision_info[slot_key] = slot_nodes
+               slot_nodes.add(pkg)
+
+       def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
+
+               mytype = pkg.type_name
+               myroot = pkg.root
+               mykey = pkg.cpv
+               metadata = pkg.metadata
+               myuse = pkg.use.enabled
+               jbigkey = pkg
+               depth = pkg.depth + 1
+               removal_action = "remove" in self.myparams
+
+               edepend={}
+               depkeys = ["DEPEND","RDEPEND","PDEPEND"]
+               for k in depkeys:
+                       edepend[k] = metadata[k]
+
+               if not pkg.built and \
+                       "--buildpkgonly" in self.myopts and \
+                       "deep" not in self.myparams and \
+                       "empty" not in self.myparams:
+                       edepend["RDEPEND"] = ""
+                       edepend["PDEPEND"] = ""
+               bdeps_optional = False
+
+               if pkg.built and not removal_action:
+                       if self.myopts.get("--with-bdeps", "n") == "y":
+                               # Pull in build time deps as requested, but marked them as
+                               # "optional" since they are not strictly required. This allows
+                               # more freedom in the merge order calculation for solving
+                               # circular dependencies. Don't convert to PDEPEND since that
+                               # could make --with-bdeps=y less effective if it is used to
+                               # adjust merge order to prevent built_with_use() calls from
+                               # failing.
+                               bdeps_optional = True
+                       else:
+                               # built packages do not have build time dependencies.
+                               edepend["DEPEND"] = ""
+
+               if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
+                       edepend["DEPEND"] = ""
+
+               bdeps_root = "/"
+               root_deps = self.myopts.get("--root-deps")
+               if root_deps is not None:
+                       if root_deps is True:
+                               bdeps_root = myroot
+                       elif root_deps == "rdeps":
+                               edepend["DEPEND"] = ""
+
+               deps = (
+                       (bdeps_root, edepend["DEPEND"],
+                               self._priority(buildtime=(not bdeps_optional),
+                               optional=bdeps_optional)),
+                       (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
+                       (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
+               )
+
+               debug = "--debug" in self.myopts
+               strict = mytype != "installed"
+               try:
+                       if not strict:
+                               portage.dep._dep_check_strict = False
+
+                       for dep_root, dep_string, dep_priority in deps:
+                               if not dep_string:
+                                       continue
+                               if debug:
+                                       print
+                                       print "Parent:   ", jbigkey
+                                       print "Depstring:", dep_string
+                                       print "Priority:", dep_priority
+
+                               try:
+
+                                       dep_string = portage.dep.paren_normalize(
+                                               portage.dep.use_reduce(
+                                               portage.dep.paren_reduce(dep_string),
+                                               uselist=pkg.use.enabled))
+
+                                       dep_string = list(self._queue_disjunctive_deps(
+                                               pkg, dep_root, dep_priority, dep_string))
+
+                               except portage.exception.InvalidDependString, e:
+                                       if pkg.installed:
+                                               del e
+                                               continue
+                                       show_invalid_depstring_notice(pkg, dep_string, str(e))
+                                       return 0
+
+                               if not dep_string:
+                                       continue
+
+                               dep_string = portage.dep.paren_enclose(dep_string)
+
+                               if not self._add_pkg_dep_string(
+                                       pkg, dep_root, dep_priority, dep_string,
+                                       allow_unsatisfied):
+                                       return 0
+
+               except portage.exception.AmbiguousPackageName, e:
+                       pkgs = e.args[0]
+                       portage.writemsg("\n\n!!! An atom in the dependencies " + \
+                               "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
+                       for cpv in pkgs:
+                               portage.writemsg("    %s\n" % cpv, noiselevel=-1)
+                       portage.writemsg("\n", noiselevel=-1)
+                       if mytype == "binary":
+                               portage.writemsg(
+                                       "!!! This binary package cannot be installed: '%s'\n" % \
+                                       mykey, noiselevel=-1)
+                       elif mytype == "ebuild":
+                               portdb = self.roots[myroot].trees["porttree"].dbapi
+                               myebuild, mylocation = portdb.findname2(mykey)
+                               portage.writemsg("!!! This ebuild cannot be installed: " + \
+                                       "'%s'\n" % myebuild, noiselevel=-1)
+                       portage.writemsg("!!! Please notify the package maintainer " + \
+                               "that atoms must be fully-qualified.\n", noiselevel=-1)
+                       return 0
+               finally:
+                       portage.dep._dep_check_strict = True
+               return 1
+
+       def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
+               allow_unsatisfied):
+               depth = pkg.depth + 1
+               debug = "--debug" in self.myopts
+               strict = pkg.type_name != "installed"
+
+               if debug:
+                       print
+                       print "Parent:   ", pkg
+                       print "Depstring:", dep_string
+                       print "Priority:", dep_priority
+
+               try:
+                       selected_atoms = self._select_atoms(dep_root,
+                               dep_string, myuse=pkg.use.enabled, parent=pkg,
+                               strict=strict, priority=dep_priority)
+               except portage.exception.InvalidDependString, e:
+                       show_invalid_depstring_notice(pkg, dep_string, str(e))
+                       del e
+                       if pkg.installed:
+                               return 1
+                       return 0
+
+               if debug:
+                       print "Candidates:", selected_atoms
+
+               vardb = self.roots[dep_root].trees["vartree"].dbapi
+
+               for atom in selected_atoms:
+                       try:
+
+                               atom = portage.dep.Atom(atom)
+
+                               mypriority = dep_priority.copy()
+                               if not atom.blocker and vardb.match(atom):
+                                       mypriority.satisfied = True
+
+                               if not self._add_dep(Dependency(atom=atom,
+                                       blocker=atom.blocker, depth=depth, parent=pkg,
+                                       priority=mypriority, root=dep_root),
+                                       allow_unsatisfied=allow_unsatisfied):
+                                       return 0
+
+                       except portage.exception.InvalidAtom, e:
+                               show_invalid_depstring_notice(
+                                       pkg, dep_string, str(e))
+                               del e
+                               if not pkg.installed:
+                                       return 0
+
+               if debug:
+                       print "Exiting...", pkg
+
+               return 1
+
+       def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
+               """
+               Queue disjunctive (virtual and ||) deps in self._dep_disjunctive_stack.
+               Yields non-disjunctive deps. Raises InvalidDependString when 
+               necessary.
+               """
+               i = 0
+               while i < len(dep_struct):
+                       x = dep_struct[i]
+                       if isinstance(x, list):
+                               for y in self._queue_disjunctive_deps(
+                                       pkg, dep_root, dep_priority, x):
+                                       yield y
+                       elif x == "||":
+                               self._queue_disjunction(pkg, dep_root, dep_priority,
+                                       [ x, dep_struct[ i + 1 ] ] )
+                               i += 1
+                       else:
+                               try:
+                                       x = portage.dep.Atom(x)
+                               except portage.exception.InvalidAtom:
+                                       if not pkg.installed:
+                                               raise portage.exception.InvalidDependString(
+                                                       "invalid atom: '%s'" % x)
+                               else:
+                                       # Note: Eventually this will check for PROPERTIES=virtual
+                                       # or whatever other metadata gets implemented for this
+                                       # purpose.
+                                       if x.cp.startswith('virtual/'):
+                                               self._queue_disjunction( pkg, dep_root,
+                                                       dep_priority, [ str(x) ] )
+                                       else:
+                                               yield str(x)
+                       i += 1
+
+       def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
+               self._dep_disjunctive_stack.append(
+                       (pkg, dep_root, dep_priority, dep_struct))
+
+       def _pop_disjunction(self, allow_unsatisfied):
+               """
+               Pop one disjunctive dep from self._dep_disjunctive_stack, and use it to
+               populate self._dep_stack.
+               """
+               pkg, dep_root, dep_priority, dep_struct = \
+                       self._dep_disjunctive_stack.pop()
+               dep_string = portage.dep.paren_enclose(dep_struct)
+               if not self._add_pkg_dep_string(
+                       pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
+                       return 0
+               return 1
+
+       def _priority(self, **kwargs):
+               if "remove" in self.myparams:
+                       priority_constructor = UnmergeDepPriority
+               else:
+                       priority_constructor = DepPriority
+               return priority_constructor(**kwargs)
+
+       def _dep_expand(self, root_config, atom_without_category):
+               """
+               @param root_config: a root config instance
+               @type root_config: RootConfig
+               @param atom_without_category: an atom without a category component
+               @type atom_without_category: String
+               @rtype: list
+               @returns: a list of atoms containing categories (possibly empty)
+               """
+               null_cp = portage.dep_getkey(insert_category_into_atom(
+                       atom_without_category, "null"))
+               cat, atom_pn = portage.catsplit(null_cp)
+
+               dbs = self._filtered_trees[root_config.root]["dbs"]
+               categories = set()
+               for db, pkg_type, built, installed, db_keys in dbs:
+                       for cat in db.categories:
+                               if db.cp_list("%s/%s" % (cat, atom_pn)):
+                                       categories.add(cat)
+
+               deps = []
+               for cat in categories:
+                       deps.append(insert_category_into_atom(
+                               atom_without_category, cat))
+               return deps
+
+       def _have_new_virt(self, root, atom_cp):
+               ret = False
+               for db, pkg_type, built, installed, db_keys in \
+                       self._filtered_trees[root]["dbs"]:
+                       if db.cp_list(atom_cp):
+                               ret = True
+                               break
+               return ret
+
+       def _iter_atoms_for_pkg(self, pkg):
+               # TODO: add multiple $ROOT support
+               if pkg.root != self.target_root:
+                       return
+               atom_arg_map = self._atom_arg_map
+               root_config = self.roots[pkg.root]
+               for atom in self._set_atoms.iterAtomsForPackage(pkg):
+                       atom_cp = portage.dep_getkey(atom)
+                       if atom_cp != pkg.cp and \
+                               self._have_new_virt(pkg.root, atom_cp):
+                               continue
+                       visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
+                       visible_pkgs.reverse() # descending order
+                       higher_slot = None
+                       for visible_pkg in visible_pkgs:
+                               if visible_pkg.cp != atom_cp:
+                                       continue
+                               if pkg >= visible_pkg:
+                                       # This is descending order, and we're not
+                                       # interested in any versions <= pkg given.
+                                       break
+                               if pkg.slot_atom != visible_pkg.slot_atom:
+                                       higher_slot = visible_pkg
+                                       break
+                       if higher_slot is not None:
+                               continue
+                       for arg in atom_arg_map[(atom, pkg.root)]:
+                               if isinstance(arg, PackageArg) and \
+                                       arg.package != pkg:
+                                       continue
+                               yield arg, atom
+
+       def select_files(self, myfiles):
+               """Given a list of .tbz2s, .ebuilds sets, and deps, create the
+               appropriate depgraph and return a favorite list."""
+               debug = "--debug" in self.myopts
+               root_config = self.roots[self.target_root]
+               sets = root_config.sets
+               getSetAtoms = root_config.setconfig.getSetAtoms
+               myfavorites=[]
+               myroot = self.target_root
+               dbs = self._filtered_trees[myroot]["dbs"]
+               vardb = self.trees[myroot]["vartree"].dbapi
+               real_vardb = self._trees_orig[myroot]["vartree"].dbapi
+               portdb = self.trees[myroot]["porttree"].dbapi
+               bindb = self.trees[myroot]["bintree"].dbapi
+               pkgsettings = self.pkgsettings[myroot]
+               args = []
+               onlydeps = "--onlydeps" in self.myopts
+               lookup_owners = []
+               for x in myfiles:
+                       ext = os.path.splitext(x)[1]
+                       if ext==".tbz2":
+                               if not os.path.exists(x):
+                                       if os.path.exists(
+                                               os.path.join(pkgsettings["PKGDIR"], "All", x)):
+                                               x = os.path.join(pkgsettings["PKGDIR"], "All", x)
+                                       elif os.path.exists(
+                                               os.path.join(pkgsettings["PKGDIR"], x)):
+                                               x = os.path.join(pkgsettings["PKGDIR"], x)
+                                       else:
+                                               print "\n\n!!! Binary package '"+str(x)+"' does not exist."
+                                               print "!!! Please ensure the tbz2 exists as specified.\n"
+                                               return 0, myfavorites
+                               mytbz2=portage.xpak.tbz2(x)
+                               mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
+                               if os.path.realpath(x) != \
+                                       os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
+                                       print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
+                                       return 0, myfavorites
+                               db_keys = list(bindb._aux_cache_keys)
+                               metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
+                               pkg = Package(type_name="binary", root_config=root_config,
+                                       cpv=mykey, built=True, metadata=metadata,
+                                       onlydeps=onlydeps)
+                               self._pkg_cache[pkg] = pkg
+                               args.append(PackageArg(arg=x, package=pkg,
+                                       root_config=root_config))
+                       elif ext==".ebuild":
+                               ebuild_path = portage.util.normalize_path(os.path.abspath(x))
+                               pkgdir = os.path.dirname(ebuild_path)
+                               tree_root = os.path.dirname(os.path.dirname(pkgdir))
+                               cp = pkgdir[len(tree_root)+1:]
+                               e = portage.exception.PackageNotFound(
+                                       ("%s is not in a valid portage tree " + \
+                                       "hierarchy or does not exist") % x)
+                               if not portage.isvalidatom(cp):
+                                       raise e
+                               cat = portage.catsplit(cp)[0]
+                               mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
+                               if not portage.isvalidatom("="+mykey):
+                                       raise e
+                               ebuild_path = portdb.findname(mykey)
+                               if ebuild_path:
+                                       if ebuild_path != os.path.join(os.path.realpath(tree_root),
+                                               cp, os.path.basename(ebuild_path)):
+                                               print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
+                                               return 0, myfavorites
+                                       if mykey not in portdb.xmatch(
+                                               "match-visible", portage.dep_getkey(mykey)):
+                                               print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
+                                               print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
+                                               print colorize("BAD", "*** page for details.")
+                                               countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
+                                                       "Continuing...")
+                               else:
+                                       raise portage.exception.PackageNotFound(
+                                               "%s is not in a valid portage tree hierarchy or does not exist" % x)
+                               db_keys = list(portdb._aux_cache_keys)
+                               metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
+                               pkg = Package(type_name="ebuild", root_config=root_config,
+                                       cpv=mykey, metadata=metadata, onlydeps=onlydeps)
+                               pkgsettings.setcpv(pkg)
+                               pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
+                               pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
+                               self._pkg_cache[pkg] = pkg
+                               args.append(PackageArg(arg=x, package=pkg,
+                                       root_config=root_config))
+                       elif x.startswith(os.path.sep):
+                               if not x.startswith(myroot):
+                                       portage.writemsg(("\n\n!!! '%s' does not start with" + \
+                                               " $ROOT.\n") % x, noiselevel=-1)
+                                       return 0, []
+                               # Queue these up since it's most efficient to handle
+                               # multiple files in a single iter_owners() call.
+                               lookup_owners.append(x)
+                       else:
+                               if x in ("system", "world"):
+                                       x = SETPREFIX + x
+                               if x.startswith(SETPREFIX):
+                                       s = x[len(SETPREFIX):]
+                                       if s not in sets:
+                                               raise portage.exception.PackageSetNotFound(s)
+                                       if s in self._sets:
+                                               continue
+                                       # Recursively expand sets so that containment tests in
+                                       # self._get_parent_sets() properly match atoms in nested
+                                       # sets (like if world contains system).
+                                       expanded_set = InternalPackageSet(
+                                               initial_atoms=getSetAtoms(s))
+                                       self._sets[s] = expanded_set
+                                       args.append(SetArg(arg=x, set=expanded_set,
+                                               root_config=root_config))
+                                       continue
+                               if not is_valid_package_atom(x):
+                                       portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
+                                               noiselevel=-1)
+                                       portage.writemsg("!!! Please check ebuild(5) for full details.\n")
+                                       portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
+                                       return (0,[])
+                               # Don't expand categories or old-style virtuals here unless
+                               # necessary. Expansion of old-style virtuals here causes at
+                               # least the following problems:
+                               #   1) It's more difficult to determine which set(s) an atom
+                               #      came from, if any.
+                               #   2) It takes away freedom from the resolver to choose other
+                               #      possible expansions when necessary.
+                               if "/" in x:
+                                       args.append(AtomArg(arg=x, atom=x,
+                                               root_config=root_config))
+                                       continue
+                               expanded_atoms = self._dep_expand(root_config, x)
+                               installed_cp_set = set()
+                               for atom in expanded_atoms:
+                                       atom_cp = portage.dep_getkey(atom)
+                                       if vardb.cp_list(atom_cp):
+                                               installed_cp_set.add(atom_cp)
+
+                               if len(installed_cp_set) > 1:
+                                       non_virtual_cps = set()
+                                       for atom_cp in installed_cp_set:
+                                               if not atom_cp.startswith("virtual/"):
+                                                       non_virtual_cps.add(atom_cp)
+                                       if len(non_virtual_cps) == 1:
+                                               installed_cp_set = non_virtual_cps
+
+                               if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
+                                       installed_cp = iter(installed_cp_set).next()
+                                       expanded_atoms = [atom for atom in expanded_atoms \
+                                               if portage.dep_getkey(atom) == installed_cp]
+
+                               if len(expanded_atoms) > 1:
+                                       print
+                                       print
+                                       ambiguous_package_name(x, expanded_atoms, root_config,
+                                               self.spinner, self.myopts)
+                                       return False, myfavorites
+                               if expanded_atoms:
+                                       atom = expanded_atoms[0]
+                               else:
+                                       null_atom = insert_category_into_atom(x, "null")
+                                       null_cp = portage.dep_getkey(null_atom)
+                                       cat, atom_pn = portage.catsplit(null_cp)
+                                       virts_p = root_config.settings.get_virts_p().get(atom_pn)
+                                       if virts_p:
+                                               # Allow the depgraph to choose which virtual.
+                                               atom = insert_category_into_atom(x, "virtual")
+                                       else:
+                                               atom = insert_category_into_atom(x, "null")
+
+                               args.append(AtomArg(arg=x, atom=atom,
+                                       root_config=root_config))
+
+               if lookup_owners:
+                       relative_paths = []
+                       search_for_multiple = False
+                       if len(lookup_owners) > 1:
+                               search_for_multiple = True
+
+                       for x in lookup_owners:
+                               if not search_for_multiple and os.path.isdir(x):
+                                       search_for_multiple = True
+                               relative_paths.append(x[len(myroot):])
+
+                       owners = set()
+                       for pkg, relative_path in \
+                               real_vardb._owners.iter_owners(relative_paths):
+                               owners.add(pkg.mycpv)
+                               if not search_for_multiple:
+                                       break
+
+                       if not owners:
+                               portage.writemsg(("\n\n!!! '%s' is not claimed " + \
+                                       "by any package.\n") % lookup_owners[0], noiselevel=-1)
+                               return 0, []
+
+                       for cpv in owners:
+                               slot = vardb.aux_get(cpv, ["SLOT"])[0]
+                               if not slot:
+                                       # portage now masks packages with missing slot, but it's
+                                       # possible that one was installed by an older version
+                                       atom = portage.cpv_getkey(cpv)
+                               else:
+                                       atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
+                               args.append(AtomArg(arg=atom, atom=atom,
+                                       root_config=root_config))
+
+               if "--update" in self.myopts:
+                       # In some cases, the greedy slots behavior can pull in a slot that
+                       # the user would want to uninstall due to it being blocked by a
+                       # newer version in a different slot. Therefore, it's necessary to
+                       # detect and discard any that should be uninstalled. Each time
+                       # that arguments are updated, package selections are repeated in
+                       # order to ensure consistency with the current arguments:
+                       #
+                       #  1) Initialize args
+                       #  2) Select packages and generate initial greedy atoms
+                       #  3) Update args with greedy atoms
+                       #  4) Select packages and generate greedy atoms again, while
+                       #     accounting for any blockers between selected packages
+                       #  5) Update args with revised greedy atoms
+
+                       self._set_args(args)
+                       greedy_args = []
+                       for arg in args:
+                               greedy_args.append(arg)
+                               if not isinstance(arg, AtomArg):
+                                       continue
+                               for atom in self._greedy_slots(arg.root_config, arg.atom):
+                                       greedy_args.append(
+                                               AtomArg(arg=arg.arg, atom=atom,
+                                                       root_config=arg.root_config))
+
+                       self._set_args(greedy_args)
+                       del greedy_args
+
+                       # Revise greedy atoms, accounting for any blockers
+                       # between selected packages.
+                       revised_greedy_args = []
+                       for arg in args:
+                               revised_greedy_args.append(arg)
+                               if not isinstance(arg, AtomArg):
+                                       continue
+                               for atom in self._greedy_slots(arg.root_config, arg.atom,
+                                       blocker_lookahead=True):
+                                       revised_greedy_args.append(
+                                               AtomArg(arg=arg.arg, atom=atom,
+                                                       root_config=arg.root_config))
+                       args = revised_greedy_args
+                       del revised_greedy_args
+
+               self._set_args(args)
+
+               myfavorites = set(myfavorites)
+               for arg in args:
+                       if isinstance(arg, (AtomArg, PackageArg)):
+                               myfavorites.add(arg.atom)
+                       elif isinstance(arg, SetArg):
+                               myfavorites.add(arg.arg)
+               myfavorites = list(myfavorites)
+
+               pprovideddict = pkgsettings.pprovideddict
+               if debug:
+                       portage.writemsg("\n", noiselevel=-1)
+               # Order needs to be preserved since a feature of --nodeps
+               # is to allow the user to force a specific merge order.
+               args.reverse()
+               while args:
+                       arg = args.pop()
+                       for atom in arg.set:
+                               self.spinner.update()
+                               dep = Dependency(atom=atom, onlydeps=onlydeps,
+                                       root=myroot, parent=arg)
+                               atom_cp = portage.dep_getkey(atom)
+                               try:
+                                       pprovided = pprovideddict.get(portage.dep_getkey(atom))
+                                       if pprovided and portage.match_from_list(atom, pprovided):
+                                               # A provided package has been specified on the command line.
+                                               self._pprovided_args.append((arg, atom))
+                                               continue
+                                       if isinstance(arg, PackageArg):
+                                               if not self._add_pkg(arg.package, dep) or \
+                                                       not self._create_graph():
+                                                       sys.stderr.write(("\n\n!!! Problem resolving " + \
+                                                               "dependencies for %s\n") % arg.arg)
+                                                       return 0, myfavorites
+                                               continue
+                                       if debug:
+                                               portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
+                                                       (arg, atom), noiselevel=-1)
+                                       pkg, existing_node = self._select_package(
+                                               myroot, atom, onlydeps=onlydeps)
+                                       if not pkg:
+                                               if not (isinstance(arg, SetArg) and \
+                                                       arg.name in ("system", "world")):
+                                                       self._unsatisfied_deps_for_display.append(
+                                                               ((myroot, atom), {}))
+                                                       return 0, myfavorites
+                                               self._missing_args.append((arg, atom))
+                                               continue
+                                       if atom_cp != pkg.cp:
+                                               # For old-style virtuals, we need to repeat the
+                                               # package.provided check against the selected package.
+                                               expanded_atom = atom.replace(atom_cp, pkg.cp)
+                                               pprovided = pprovideddict.get(pkg.cp)
+                                               if pprovided and \
+                                                       portage.match_from_list(expanded_atom, pprovided):
+                                                       # A provided package has been
+                                                       # specified on the command line.
+                                                       self._pprovided_args.append((arg, atom))
+                                                       continue
+                                       if pkg.installed and "selective" not in self.myparams:
+                                               self._unsatisfied_deps_for_display.append(
+                                                       ((myroot, atom), {}))
+                                               # Previous behavior was to bail out in this case, but
+                                               # since the dep is satisfied by the installed package,
+                                               # it's more friendly to continue building the graph
+                                               # and just show a warning message. Therefore, only bail
+                                               # out here if the atom is not from either the system or
+                                               # world set.
+                                               if not (isinstance(arg, SetArg) and \
+                                                       arg.name in ("system", "world")):
+                                                       return 0, myfavorites
+
+                                       # Add the selected package to the graph as soon as possible
+                                       # so that later dep_check() calls can use it as feedback
+                                       # for making more consistent atom selections.
+                                       if not self._add_pkg(pkg, dep):
+                                               if isinstance(arg, SetArg):
+                                                       sys.stderr.write(("\n\n!!! Problem resolving " + \
+                                                               "dependencies for %s from %s\n") % \
+                                                               (atom, arg.arg))
+                                               else:
+                                                       sys.stderr.write(("\n\n!!! Problem resolving " + \
+                                                               "dependencies for %s\n") % atom)
+                                               return 0, myfavorites
+
+                               except portage.exception.MissingSignature, e:
+                                       portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
+                                       portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
+                                       portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
+                                       portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
+                                       portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
+                                       return 0, myfavorites
+                               except portage.exception.InvalidSignature, e:
+                                       portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
+                                       portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
+                                       portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
+                                       portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
+                                       portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
+                                       return 0, myfavorites
+                               except SystemExit, e:
+                                       raise # Needed else can't exit
+                               except Exception, e:
+                                       print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
+                                       print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
+                                       raise
+
+               # Now that the root packages have been added to the graph,
+               # process the dependencies.
+               if not self._create_graph():
+                       return 0, myfavorites
+
+               missing=0
+               if "--usepkgonly" in self.myopts:
+                       for xs in self.digraph.all_nodes():
+                               if not isinstance(xs, Package):
+                                       continue
+                               if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
+                                       if missing == 0:
+                                               print
+                                       missing += 1
+                                       print "Missing binary for:",xs[2]
+
+               try:
+                       self.altlist()
+               except self._unknown_internal_error:
+                       return False, myfavorites
+
+               # We're true here unless we are missing binaries.
+               return (not missing,myfavorites)
+
+       def _set_args(self, args):
+               """
+               Create the "args" package set from atoms and packages given as
+               arguments. This method can be called multiple times if necessary.
+               The package selection cache is automatically invalidated, since
+               arguments influence package selections.
+               """
+               args_set = self._sets["args"]
+               args_set.clear()
+               for arg in args:
+                       if not isinstance(arg, (AtomArg, PackageArg)):
+                               continue
+                       atom = arg.atom
+                       if atom in args_set:
+                               continue
+                       args_set.add(atom)
+
+               self._set_atoms.clear()
+               self._set_atoms.update(chain(*self._sets.itervalues()))
+               atom_arg_map = self._atom_arg_map
+               atom_arg_map.clear()
+               for arg in args:
+                       for atom in arg.set:
+                               atom_key = (atom, arg.root_config.root)
+                               refs = atom_arg_map.get(atom_key)
+                               if refs is None:
+                                       refs = []
+                                       atom_arg_map[atom_key] = refs
+                                       if arg not in refs:
+                                               refs.append(arg)
+
+               # Invalidate the package selection cache, since
+               # arguments influence package selections.
+               self._highest_pkg_cache.clear()
+               for trees in self._filtered_trees.itervalues():
+                       trees["porttree"].dbapi._clear_cache()
+
+       def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
+               """
+               Return a list of slot atoms corresponding to installed slots that
+               differ from the slot of the highest visible match. When
+               blocker_lookahead is True, slot atoms that would trigger a blocker
+               conflict are automatically discarded, potentially allowing automatic
+               uninstallation of older slots when appropriate.
+               """
+               highest_pkg, in_graph = self._select_package(root_config.root, atom)
+               if highest_pkg is None:
+                       return []
+               vardb = root_config.trees["vartree"].dbapi
+               slots = set()
+               for cpv in vardb.match(atom):
+                       # don't mix new virtuals with old virtuals
+                       if portage.cpv_getkey(cpv) == highest_pkg.cp:
+                               slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
+
+               slots.add(highest_pkg.metadata["SLOT"])
+               if len(slots) == 1:
+                       return []
+               greedy_pkgs = []
+               slots.remove(highest_pkg.metadata["SLOT"])
+               while slots:
+                       slot = slots.pop()
+                       slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
+                       pkg, in_graph = self._select_package(root_config.root, slot_atom)
+                       if pkg is not None and \
+                               pkg.cp == highest_pkg.cp and pkg < highest_pkg:
+                               greedy_pkgs.append(pkg)
+               if not greedy_pkgs:
+                       return []
+               if not blocker_lookahead:
+                       return [pkg.slot_atom for pkg in greedy_pkgs]
+
+               blockers = {}
+               blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
+               for pkg in greedy_pkgs + [highest_pkg]:
+                       dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
+                       try:
+                               atoms = self._select_atoms(
+                                       pkg.root, dep_str, pkg.use.enabled,
+                                       parent=pkg, strict=True)
+                       except portage.exception.InvalidDependString:
+                               continue
+                       blocker_atoms = (x for x in atoms if x.blocker)
+                       blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
+
+               if highest_pkg not in blockers:
+                       return []
+
+               # filter packages with invalid deps
+               greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
+
+               # filter packages that conflict with highest_pkg
+               greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
+                       (blockers[highest_pkg].findAtomForPackage(pkg) or \
+                       blockers[pkg].findAtomForPackage(highest_pkg))]
+
+               if not greedy_pkgs:
+                       return []
+
+               # If two packages conflict, discard the lower version.
+               discard_pkgs = set()
+               greedy_pkgs.sort(reverse=True)
+               for i in xrange(len(greedy_pkgs) - 1):
+                       pkg1 = greedy_pkgs[i]
+                       if pkg1 in discard_pkgs:
+                               continue
+                       for j in xrange(i + 1, len(greedy_pkgs)):
+                               pkg2 = greedy_pkgs[j]
+                               if pkg2 in discard_pkgs:
+                                       continue
+                               if blockers[pkg1].findAtomForPackage(pkg2) or \
+                                       blockers[pkg2].findAtomForPackage(pkg1):
+                                       # pkg1 > pkg2
+                                       discard_pkgs.add(pkg2)
+
+               return [pkg.slot_atom for pkg in greedy_pkgs \
+                       if pkg not in discard_pkgs]
+
+       def _select_atoms_from_graph(self, *pargs, **kwargs):
+               """
+               Prefer atoms matching packages that have already been
+               added to the graph or those that are installed and have
+               not been scheduled for replacement.
+               """
+               kwargs["trees"] = self._graph_trees
+               return self._select_atoms_highest_available(*pargs, **kwargs)
+
+       def _select_atoms_highest_available(self, root, depstring,
+               myuse=None, parent=None, strict=True, trees=None, priority=None):
+               """This will raise InvalidDependString if necessary. If trees is
+               None then self._filtered_trees is used."""
+               pkgsettings = self.pkgsettings[root]
+               if trees is None:
+                       trees = self._filtered_trees
+               if not getattr(priority, "buildtime", False):
+                       # The parent should only be passed to dep_check() for buildtime
+                       # dependencies since that's the only case when it's appropriate
+                       # to trigger the circular dependency avoidance code which uses it.
+                       # It's important not to trigger the same circular dependency
+                       # avoidance code for runtime dependencies since it's not needed
+                       # and it can promote an incorrect package choice.
+                       parent = None
+               if True:
+                       try:
+                               if parent is not None:
+                                       trees[root]["parent"] = parent
+                               if not strict:
+                                       portage.dep._dep_check_strict = False
+                               mycheck = portage.dep_check(depstring, None,
+                                       pkgsettings, myuse=myuse,
+                                       myroot=root, trees=trees)
+                       finally:
+                               if parent is not None:
+                                       trees[root].pop("parent")
+                               portage.dep._dep_check_strict = True
+                       if not mycheck[0]:
+                               raise portage.exception.InvalidDependString(mycheck[1])
+                       selected_atoms = mycheck[1]
+               return selected_atoms
+
+       def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
+               atom = portage.dep.Atom(atom)
+               atom_set = InternalPackageSet(initial_atoms=(atom,))
+               atom_without_use = atom
+               if atom.use:
+                       atom_without_use = portage.dep.remove_slot(atom)
+                       if atom.slot:
+                               atom_without_use += ":" + atom.slot
+                       atom_without_use = portage.dep.Atom(atom_without_use)
+               xinfo = '"%s"' % atom
+               if arg:
+                       xinfo='"%s"' % arg
+               # Discard null/ from failed cpv_expand category expansion.
+               xinfo = xinfo.replace("null/", "")
+               masked_packages = []
+               missing_use = []
+               masked_pkg_instances = set()
+               missing_licenses = []
+               have_eapi_mask = False
+               pkgsettings = self.pkgsettings[root]
+               implicit_iuse = pkgsettings._get_implicit_iuse()
+               root_config = self.roots[root]
+               portdb = self.roots[root].trees["porttree"].dbapi
+               dbs = self._filtered_trees[root]["dbs"]
+               for db, pkg_type, built, installed, db_keys in dbs:
+                       if installed:
+                               continue
+                       match = db.match
+                       if hasattr(db, "xmatch"):
+                               cpv_list = db.xmatch("match-all", atom_without_use)
+                       else:
+                               cpv_list = db.match(atom_without_use)
+                       # descending order
+                       cpv_list.reverse()
+                       for cpv in cpv_list:
+                               metadata, mreasons  = get_mask_info(root_config, cpv,
+                                       pkgsettings, db, pkg_type, built, installed, db_keys)
+                               if metadata is not None:
+                                       pkg = Package(built=built, cpv=cpv,
+                                               installed=installed, metadata=metadata,
+                                               root_config=root_config)
+                                       if pkg.cp != atom.cp:
+                                               # A cpv can be returned from dbapi.match() as an
+                                               # old-style virtual match even in cases when the
+                                               # package does not actually PROVIDE the virtual.
+                                               # Filter out any such false matches here.
+                                               if not atom_set.findAtomForPackage(pkg):
+                                                       continue
+                                       if mreasons:
+                                               masked_pkg_instances.add(pkg)
+                                       if atom.use:
+                                               missing_use.append(pkg)
+                                               if not mreasons:
+                                                       continue
+                               masked_packages.append(
+                                       (root_config, pkgsettings, cpv, metadata, mreasons))
+
+               missing_use_reasons = []
+               missing_iuse_reasons = []
+               for pkg in missing_use:
+                       use = pkg.use.enabled
+                       iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
+                       iuse_re = re.compile("^(%s)$" % "|".join(iuse))
+                       missing_iuse = []
+                       for x in atom.use.required:
+                               if iuse_re.match(x) is None:
+                                       missing_iuse.append(x)
+                       mreasons = []
+                       if missing_iuse:
+                               mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
+                               missing_iuse_reasons.append((pkg, mreasons))
+                       else:
+                               need_enable = sorted(atom.use.enabled.difference(use))
+                               need_disable = sorted(atom.use.disabled.intersection(use))
+                               if need_enable or need_disable:
+                                       changes = []
+                                       changes.extend(colorize("red", "+" + x) \
+                                               for x in need_enable)
+                                       changes.extend(colorize("blue", "-" + x) \
+                                               for x in need_disable)
+                                       mreasons.append("Change USE: %s" % " ".join(changes))
+                                       missing_use_reasons.append((pkg, mreasons))
+
+               unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
+                       in missing_use_reasons if pkg not in masked_pkg_instances]
+
+               unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
+                       in missing_iuse_reasons if pkg not in masked_pkg_instances]
+
+               show_missing_use = False
+               if unmasked_use_reasons:
+                       # Only show the latest version.
+                       show_missing_use = unmasked_use_reasons[:1]
+               elif unmasked_iuse_reasons:
+                       if missing_use_reasons:
+                               # All packages with required IUSE are masked,
+                               # so display a normal masking message.
+                               pass
+                       else:
+                               show_missing_use = unmasked_iuse_reasons
+
+               if show_missing_use:
+                       print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
+                       print "!!! One of the following packages is required to complete your request:"
+                       for pkg, mreasons in show_missing_use:
+                               print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
+
+               elif masked_packages:
+                       print "\n!!! " + \
+                               colorize("BAD", "All ebuilds that could satisfy ") + \
+                               colorize("INFORM", xinfo) + \
+                               colorize("BAD", " have been masked.")
+                       print "!!! One of the following masked packages is required to complete your request:"
+                       have_eapi_mask = show_masked_packages(masked_packages)
+                       if have_eapi_mask:
+                               print
+                               msg = ("The current version of portage supports " + \
+                                       "EAPI '%s'. You must upgrade to a newer version" + \
+                                       " of portage before EAPI masked packages can" + \
+                                       " be installed.") % portage.const.EAPI
+                               from textwrap import wrap
+                               for line in wrap(msg, 75):
+                                       print line
+                       print
+                       show_mask_docs()
+               else:
+                       print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
+
+               # Show parent nodes and the argument that pulled them in.
+               traversed_nodes = set()
+               node = myparent
+               msg = []
+               while node is not None:
+                       traversed_nodes.add(node)
+                       msg.append('(dependency required by "%s" [%s])' % \
+                               (colorize('INFORM', str(node.cpv)), node.type_name))
+                       # When traversing to parents, prefer arguments over packages
+                       # since arguments are root nodes. Never traverse the same
+                       # package twice, in order to prevent an infinite loop.
+                       selected_parent = None
+                       for parent in self.digraph.parent_nodes(node):
+                               if isinstance(parent, DependencyArg):
+                                       msg.append('(dependency required by "%s" [argument])' % \
+                                               (colorize('INFORM', str(parent))))
+                                       selected_parent = None
+                                       break
+                               if parent not in traversed_nodes:
+                                       selected_parent = parent
+                       node = selected_parent
+               for line in msg:
+                       print line
+
+               print
+
+       def _select_pkg_highest_available(self, root, atom, onlydeps=False):
+               cache_key = (root, atom, onlydeps)
+               ret = self._highest_pkg_cache.get(cache_key)
+               if ret is not None:
+                       pkg, existing = ret
+                       if pkg and not existing:
+                               existing = self._slot_pkg_map[root].get(pkg.slot_atom)
+                               if existing and existing == pkg:
+                                       # Update the cache to reflect that the
+                                       # package has been added to the graph.
+                                       ret = pkg, pkg
+                                       self._highest_pkg_cache[cache_key] = ret
+                       return ret
+               ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
+               self._highest_pkg_cache[cache_key] = ret
+               pkg, existing = ret
+               if pkg is not None:
+                       settings = pkg.root_config.settings
+                       if visible(settings, pkg) and not (pkg.installed and \
+                               settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
+                               pkg.root_config.visible_pkgs.cpv_inject(pkg)
+               return ret
+
+       def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
+               root_config = self.roots[root]
+               pkgsettings = self.pkgsettings[root]
+               dbs = self._filtered_trees[root]["dbs"]
+               vardb = self.roots[root].trees["vartree"].dbapi
+               portdb = self.roots[root].trees["porttree"].dbapi
+               # List of acceptable packages, ordered by type preference.
+               matched_packages = []
+               highest_version = None
+               if not isinstance(atom, portage.dep.Atom):
+                       atom = portage.dep.Atom(atom)
+               atom_cp = atom.cp
+               atom_set = InternalPackageSet(initial_atoms=(atom,))
+               existing_node = None
+               myeb = None
+               usepkgonly = "--usepkgonly" in self.myopts
+               empty = "empty" in self.myparams
+               selective = "selective" in self.myparams
+               reinstall = False
+               noreplace = "--noreplace" in self.myopts
+               # Behavior of the "selective" parameter depends on
+               # whether or not a package matches an argument atom.
+               # If an installed package provides an old-style
+               # virtual that is no longer provided by an available
+               # package, the installed package may match an argument
+               # atom even though none of the available packages do.
+               # Therefore, "selective" logic does not consider
+               # whether or not an installed package matches an
+               # argument atom. It only considers whether or not
+               # available packages match argument atoms, which is
+               # represented by the found_available_arg flag.
+               found_available_arg = False
+               for find_existing_node in True, False:
+                       if existing_node:
+                               break
+                       for db, pkg_type, built, installed, db_keys in dbs:
+                               if existing_node:
+                                       break
+                               if installed and not find_existing_node:
+                                       want_reinstall = reinstall or empty or \
+                                               (found_available_arg and not selective)
+                                       if want_reinstall and matched_packages:
+                                               continue
+                               if hasattr(db, "xmatch"):
+                                       cpv_list = db.xmatch("match-all", atom)
+                               else:
+                                       cpv_list = db.match(atom)
+
+                               # USE=multislot can make an installed package appear as if
+                               # it doesn't satisfy a slot dependency. Rebuilding the ebuild
+                               # won't do any good as long as USE=multislot is enabled since
+                               # the newly built package still won't have the expected slot.
+                               # Therefore, assume that such SLOT dependencies are already
+                               # satisfied rather than forcing a rebuild.
+                               if installed and not cpv_list and atom.slot:
+                                       for cpv in db.match(atom.cp):
+                                               slot_available = False
+                                               for other_db, other_type, other_built, \
+                                                       other_installed, other_keys in dbs:
+                                                       try:
+                                                               if atom.slot == \
+                                                                       other_db.aux_get(cpv, ["SLOT"])[0]:
+                                                                       slot_available = True
+                                                                       break
+                                                       except KeyError:
+                                                               pass
+                                               if not slot_available:
+                                                       continue
+                                               inst_pkg = self._pkg(cpv, "installed",
+                                                       root_config, installed=installed)
+                                               # Remove the slot from the atom and verify that
+                                               # the package matches the resulting atom.
+                                               atom_without_slot = portage.dep.remove_slot(atom)
+                                               if atom.use:
+                                                       atom_without_slot += str(atom.use)
+                                               atom_without_slot = portage.dep.Atom(atom_without_slot)
+                                               if portage.match_from_list(
+                                                       atom_without_slot, [inst_pkg]):
+                                                       cpv_list = [inst_pkg.cpv]
+                                               break
+
+                               if not cpv_list:
+                                       continue
+                               pkg_status = "merge"
+                               if installed or onlydeps:
+                                       pkg_status = "nomerge"
+                               # descending order
+                               cpv_list.reverse()
+                               for cpv in cpv_list:
+                                       # Make --noreplace take precedence over --newuse.
+                                       if not installed and noreplace and \
+                                               cpv in vardb.match(atom):
+                                               # If the installed version is masked, it may
+                                               # be necessary to look at lower versions,
+                                               # in case there is a visible downgrade.
+                                               continue
+                                       reinstall_for_flags = None
+                                       cache_key = (pkg_type, root, cpv, pkg_status)
+                                       calculated_use = True
+                                       pkg = self._pkg_cache.get(cache_key)
+                                       if pkg is None:
+                                               calculated_use = False
+                                               try:
+                                                       metadata = izip(db_keys, db.aux_get(cpv, db_keys))
+                                               except KeyError:
+                                                       continue
+                                               pkg = Package(built=built, cpv=cpv,
+                                                       installed=installed, metadata=metadata,
+                                                       onlydeps=onlydeps, root_config=root_config,
+                                                       type_name=pkg_type)
+                                               metadata = pkg.metadata
+                                               if not built:
+                                                       metadata['CHOST'] = pkgsettings.get('CHOST', '')
+                                               if not built and ("?" in metadata["LICENSE"] or \
+                                                       "?" in metadata["PROVIDE"]):
+                                                       # This is avoided whenever possible because
+                                                       # it's expensive. It only needs to be done here
+                                                       # if it has an effect on visibility.
+                                                       pkgsettings.setcpv(pkg)
+                                                       metadata["USE"] = pkgsettings["PORTAGE_USE"]
+                                                       calculated_use = True
+                                               self._pkg_cache[pkg] = pkg
+
+                                       if not installed or (built and matched_packages):
+                                               # Only enforce visibility on installed packages
+                                               # if there is at least one other visible package
+                                               # available. By filtering installed masked packages
+                                               # here, packages that have been masked since they
+                                               # were installed can be automatically downgraded
+                                               # to an unmasked version.
+                                               try:
+                                                       if not visible(pkgsettings, pkg):
+                                                               continue
+                                               except portage.exception.InvalidDependString:
+                                                       if not installed:
+                                                               continue
+
+                                               # Enable upgrade or downgrade to a version
+                                               # with visible KEYWORDS when the installed
+                                               # version is masked by KEYWORDS, but never
+                                               # reinstall the same exact version only due
+                                               # to a KEYWORDS mask.
+                                               if built and matched_packages:
+
+                                                       different_version = None
+                                                       for avail_pkg in matched_packages:
+                                                               if not portage.dep.cpvequal(
+                                                                       pkg.cpv, avail_pkg.cpv):
+                                                                       different_version = avail_pkg
+                                                                       break
+                                                       if different_version is not None:
+
+                                                               if installed and \
+                                                                       pkgsettings._getMissingKeywords(
+                                                                       pkg.cpv, pkg.metadata):
+                                                                       continue
+
+                                                               # If the ebuild no longer exists or it's
+                                                               # keywords have been dropped, reject built
+                                                               # instances (installed or binary).
+                                                               # If --usepkgonly is enabled, assume that
+                                                               # the ebuild status should be ignored.
+                                                               if not usepkgonly:
+                                                                       try:
+                                                                               pkg_eb = self._pkg(
+                                                                                       pkg.cpv, "ebuild", root_config)
+                                                                       except portage.exception.PackageNotFound:
+                                                                               continue
+                                                                       else:
+                                                                               if not visible(pkgsettings, pkg_eb):
+                                                                                       continue
+
+                                       if not pkg.built and not calculated_use:
+                                               # This is avoided whenever possible because
+                                               # it's expensive.
+                                               pkgsettings.setcpv(pkg)
+                                               pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
+
+                                       if pkg.cp != atom.cp:
+                                               # A cpv can be returned from dbapi.match() as an
+                                               # old-style virtual match even in cases when the
+                                               # package does not actually PROVIDE the virtual.
+                                               # Filter out any such false matches here.
+                                               if not atom_set.findAtomForPackage(pkg):
+                                                       continue
+
+                                       myarg = None
+                                       if root == self.target_root:
+                                               try:
+                                                       # Ebuild USE must have been calculated prior
+                                                       # to this point, in case atoms have USE deps.
+                                                       myarg = self._iter_atoms_for_pkg(pkg).next()
+                                               except StopIteration:
+                                                       pass
+                                               except portage.exception.InvalidDependString:
+                                                       if not installed:
+                                                               # masked by corruption
+                                                               continue
+                                       if not installed and myarg:
+                                               found_available_arg = True
+
+                                       if atom.use and not pkg.built:
+                                               use = pkg.use.enabled
+                                               if atom.use.enabled.difference(use):
+                                                       continue
+                                               if atom.use.disabled.intersection(use):
+                                                       continue
+                                       if pkg.cp == atom_cp:
+                                               if highest_version is None:
+                                                       highest_version = pkg
+                                               elif pkg > highest_version:
+                                                       highest_version = pkg
+                                       # At this point, we've found the highest visible
+                                       # match from the current repo. Any lower versions
+                                       # from this repo are ignored, so this so the loop
+                                       # will always end with a break statement below
+                                       # this point.
+                                       if find_existing_node:
+                                               e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
+                                               if not e_pkg:
+                                                       break
+                                               if portage.dep.match_from_list(atom, [e_pkg]):
+                                                       if highest_version and \
+                                                               e_pkg.cp == atom_cp and \
+                                                               e_pkg < highest_version and \
+                                                               e_pkg.slot_atom != highest_version.slot_atom:
+                                                               # There is a higher version available in a
+                                                               # different slot, so this existing node is
+                                                               # irrelevant.
+                                                               pass
+                                                       else:
+                                                               matched_packages.append(e_pkg)
+                                                               existing_node = e_pkg
+                                               break
+                                       # Compare built package to current config and
+                                       # reject the built package if necessary.
+                                       if built and not installed and \
+                                               ("--newuse" in self.myopts or \
+                                               "--reinstall" in self.myopts):
+                                               iuses = pkg.iuse.all
+                                               old_use = pkg.use.enabled
+                                               if myeb:
+                                                       pkgsettings.setcpv(myeb)
+                                               else:
+                                                       pkgsettings.setcpv(pkg)
+                                               now_use = pkgsettings["PORTAGE_USE"].split()
+                                               forced_flags = set()
+                                               forced_flags.update(pkgsettings.useforce)
+                                               forced_flags.update(pkgsettings.usemask)
+                                               cur_iuse = iuses
+                                               if myeb and not usepkgonly:
+                                                       cur_iuse = myeb.iuse.all
+                                               if self._reinstall_for_flags(forced_flags,
+                                                       old_use, iuses,
+                                                       now_use, cur_iuse):
+                                                       break
+                                       # Compare current config to installed package
+                                       # and do not reinstall if possible.
+                                       if not installed and \
+                                               ("--newuse" in self.myopts or \
+                                               "--reinstall" in self.myopts) and \
+                                               cpv in vardb.match(atom):
+                                               pkgsettings.setcpv(pkg)
+                                               forced_flags = set()
+                                               forced_flags.update(pkgsettings.useforce)
+                                               forced_flags.update(pkgsettings.usemask)
+                                               old_use = vardb.aux_get(cpv, ["USE"])[0].split()
+                                               old_iuse = set(filter_iuse_defaults(
+                                                       vardb.aux_get(cpv, ["IUSE"])[0].split()))
+                                               cur_use = pkg.use.enabled
+                                               cur_iuse = pkg.iuse.all
+                                               reinstall_for_flags = \
+                                                       self._reinstall_for_flags(
+                                                       forced_flags, old_use, old_iuse,
+                                                       cur_use, cur_iuse)
+                                               if reinstall_for_flags:
+                                                       reinstall = True
+                                       if not built:
+                                               myeb = pkg
+                                       matched_packages.append(pkg)
+                                       if reinstall_for_flags:
+                                               self._reinstall_nodes[pkg] = \
+                                                       reinstall_for_flags
+                                       break
+
+               if not matched_packages:
+                       return None, None
+
+               if "--debug" in self.myopts:
+                       for pkg in matched_packages:
+                               portage.writemsg("%s %s\n" % \
+                                       ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
+
+               # Filter out any old-style virtual matches if they are
+               # mixed with new-style virtual matches.
+               cp = portage.dep_getkey(atom)
+               if len(matched_packages) > 1 and \
+                       "virtual" == portage.catsplit(cp)[0]:
+                       for pkg in matched_packages:
+                               if pkg.cp != cp:
+                                       continue
+                               # Got a new-style virtual, so filter
+                               # out any old-style virtuals.
+                               matched_packages = [pkg for pkg in matched_packages \
+                                       if pkg.cp == cp]
+                               break
+
+               if len(matched_packages) > 1:
+                       bestmatch = portage.best(
+                               [pkg.cpv for pkg in matched_packages])
+                       matched_packages = [pkg for pkg in matched_packages \
+                               if portage.dep.cpvequal(pkg.cpv, bestmatch)]
+
+               # ordered by type preference ("ebuild" type is the last resort)
+               return  matched_packages[-1], existing_node
+
+       def _select_pkg_from_graph(self, root, atom, onlydeps=False):
+               """
+               Select packages that have already been added to the graph or
+               those that are installed and have not been scheduled for
+               replacement.
+               """
+               graph_db = self._graph_trees[root]["porttree"].dbapi
+               matches = graph_db.match_pkgs(atom)
+               if not matches:
+                       return None, None
+               pkg = matches[-1] # highest match
+               in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
+               return pkg, in_graph
+
+       def _complete_graph(self):
+               """
+               Add any deep dependencies of required sets (args, system, world) that
+               have not been pulled into the graph yet. This ensures that the graph
+               is consistent such that initially satisfied deep dependencies are not
+               broken in the new graph. Initially unsatisfied dependencies are
+               irrelevant since we only want to avoid breaking dependencies that are
+               intially satisfied.
+
+               Since this method can consume enough time to disturb users, it is
+               currently only enabled by the --complete-graph option.
+               """
+               if "--buildpkgonly" in self.myopts or \
+                       "recurse" not in self.myparams:
+                       return 1
+
+               if "complete" not in self.myparams:
+                       # Skip this to avoid consuming enough time to disturb users.
+                       return 1
+
+               # Put the depgraph into a mode that causes it to only
+               # select packages that have already been added to the
+               # graph or those that are installed and have not been
+               # scheduled for replacement. Also, toggle the "deep"
+               # parameter so that all dependencies are traversed and
+               # accounted for.
+               self._select_atoms = self._select_atoms_from_graph
+               self._select_package = self._select_pkg_from_graph
+               already_deep = "deep" in self.myparams
+               if not already_deep:
+                       self.myparams.add("deep")
+
+               for root in self.roots:
+                       required_set_names = self._required_set_names.copy()
+                       if root == self.target_root and \
+                               (already_deep or "empty" in self.myparams):
+                               required_set_names.difference_update(self._sets)
+                       if not required_set_names and not self._ignored_deps:
+                               continue
+                       root_config = self.roots[root]
+                       setconfig = root_config.setconfig
+                       args = []
+                       # Reuse existing SetArg instances when available.
+                       for arg in self.digraph.root_nodes():
+                               if not isinstance(arg, SetArg):
+                                       continue
+                               if arg.root_config != root_config:
+                                       continue
+                               if arg.name in required_set_names:
+                                       args.append(arg)
+                                       required_set_names.remove(arg.name)
+                       # Create new SetArg instances only when necessary.
+                       for s in required_set_names:
+                               expanded_set = InternalPackageSet(
+                                       initial_atoms=setconfig.getSetAtoms(s))
+                               atom = SETPREFIX + s
+                               args.append(SetArg(arg=atom, set=expanded_set,
+                                       root_config=root_config))
+                       vardb = root_config.trees["vartree"].dbapi
+                       for arg in args:
+                               for atom in arg.set:
+                                       self._dep_stack.append(
+                                               Dependency(atom=atom, root=root, parent=arg))
+                       if self._ignored_deps:
+                               self._dep_stack.extend(self._ignored_deps)
+                               self._ignored_deps = []
+                       if not self._create_graph(allow_unsatisfied=True):
+                               return 0
+                       # Check the unsatisfied deps to see if any initially satisfied deps
+                       # will become unsatisfied due to an upgrade. Initially unsatisfied
+                       # deps are irrelevant since we only want to avoid breaking deps
+                       # that are initially satisfied.
+                       while self._unsatisfied_deps:
+                               dep = self._unsatisfied_deps.pop()
+                               matches = vardb.match_pkgs(dep.atom)
+                               if not matches:
+                                       self._initially_unsatisfied_deps.append(dep)
+                                       continue
+                               # An scheduled installation broke a deep dependency.
+                               # Add the installed package to the graph so that it
+                               # will be appropriately reported as a slot collision
+                               # (possibly solvable via backtracking).
+                               pkg = matches[-1] # highest match
+                               if not self._add_pkg(pkg, dep):
+                                       return 0
+                               if not self._create_graph(allow_unsatisfied=True):
+                                       return 0
+               return 1
+
+       def _pkg(self, cpv, type_name, root_config, installed=False):
+               """
+               Get a package instance from the cache, or create a new
+               one if necessary. Raises KeyError from aux_get if it
+               failures for some reason (package does not exist or is
+               corrupt).
+               """
+               operation = "merge"
+               if installed:
+                       operation = "nomerge"
+               pkg = self._pkg_cache.get(
+                       (type_name, root_config.root, cpv, operation))
+               if pkg is None:
+                       tree_type = self.pkg_tree_map[type_name]
+                       db = root_config.trees[tree_type].dbapi
+                       db_keys = list(self._trees_orig[root_config.root][
+                               tree_type].dbapi._aux_cache_keys)
+                       try:
+                               metadata = izip(db_keys, db.aux_get(cpv, db_keys))
+                       except KeyError:
+                               raise portage.exception.PackageNotFound(cpv)
+                       pkg = Package(cpv=cpv, metadata=metadata,
+                               root_config=root_config, installed=installed)
+                       if type_name == "ebuild":
+                               settings = self.pkgsettings[root_config.root]
+                               settings.setcpv(pkg)
+                               pkg.metadata["USE"] = settings["PORTAGE_USE"]
+                               pkg.metadata['CHOST'] = settings.get('CHOST', '')
+                       self._pkg_cache[pkg] = pkg
+               return pkg
+
+       def validate_blockers(self):
+               """Remove any blockers from the digraph that do not match any of the
+               packages within the graph.  If necessary, create hard deps to ensure
+               correct merge order such that mutually blocking packages are never
+               installed simultaneously."""
+
+               if "--buildpkgonly" in self.myopts or \
+                       "--nodeps" in self.myopts:
+                       return True
+
+               #if "deep" in self.myparams:
+               if True:
+                       # Pull in blockers from all installed packages that haven't already
+                       # been pulled into the depgraph.  This is not enabled by default
+                       # due to the performance penalty that is incurred by all the
+                       # additional dep_check calls that are required.
+
+                       dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
+                       for myroot in self.trees:
+                               vardb = self.trees[myroot]["vartree"].dbapi
+                               portdb = self.trees[myroot]["porttree"].dbapi
+                               pkgsettings = self.pkgsettings[myroot]
+                               final_db = self.mydbapi[myroot]
+
+                               blocker_cache = BlockerCache(myroot, vardb)
+                               stale_cache = set(blocker_cache)
+                               for pkg in vardb:
+                                       cpv = pkg.cpv
+                                       stale_cache.discard(cpv)
+                                       pkg_in_graph = self.digraph.contains(pkg)
+
+                                       # Check for masked installed packages. Only warn about
+                                       # packages that are in the graph in order to avoid warning
+                                       # about those that will be automatically uninstalled during
+                                       # the merge process or by --depclean.
+                                       if pkg in final_db:
+                                               if pkg_in_graph and not visible(pkgsettings, pkg):
+                                                       self._masked_installed.add(pkg)
+
+                                       blocker_atoms = None
+                                       blockers = None
+                                       if pkg_in_graph:
+                                               blockers = []
+                                               try:
+                                                       blockers.extend(
+                                                               self._blocker_parents.child_nodes(pkg))
+                                               except KeyError:
+                                                       pass
+                                               try:
+                                                       blockers.extend(
+                                                               self._irrelevant_blockers.child_nodes(pkg))
+                                               except KeyError:
+                                                       pass
+                                       if blockers is not None:
+                                               blockers = set(str(blocker.atom) \
+                                                       for blocker in blockers)
+
+                                       # If this node has any blockers, create a "nomerge"
+                                       # node for it so that they can be enforced.
+                                       self.spinner.update()
+                                       blocker_data = blocker_cache.get(cpv)
+                                       if blocker_data is not None and \
+                                               blocker_data.counter != long(pkg.metadata["COUNTER"]):
+                                               blocker_data = None
+
+                                       # If blocker data from the graph is available, use
+                                       # it to validate the cache and update the cache if
+                                       # it seems invalid.
+                                       if blocker_data is not None and \
+                                               blockers is not None:
+                                               if not blockers.symmetric_difference(
+                                                       blocker_data.atoms):
+                                                       continue
+                                               blocker_data = None
+
+                                       if blocker_data is None and \
+                                               blockers is not None:
+                                               # Re-use the blockers from the graph.
+                                               blocker_atoms = sorted(blockers)
+                                               counter = long(pkg.metadata["COUNTER"])
+                                               blocker_data = \
+                                                       blocker_cache.BlockerData(counter, blocker_atoms)
+                                               blocker_cache[pkg.cpv] = blocker_data
+                                               continue
+
+                                       if blocker_data:
+                                               blocker_atoms = blocker_data.atoms
+                                       else:
+                                               # Use aux_get() to trigger FakeVartree global
+                                               # updates on *DEPEND when appropriate.
+                                               depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
+                                               # It is crucial to pass in final_db here in order to
+                                               # optimize dep_check calls by eliminating atoms via
+                                               # dep_wordreduce and dep_eval calls.
+                                               try:
+                                                       portage.dep._dep_check_strict = False
+                                                       try:
+                                                               success, atoms = portage.dep_check(depstr,
+                                                                       final_db, pkgsettings, myuse=pkg.use.enabled,
+                                                                       trees=self._graph_trees, myroot=myroot)
+                                                       except Exception, e:
+                                                               if isinstance(e, SystemExit):
+                                                                       raise
+                                                               # This is helpful, for example, if a ValueError
+                                                               # is thrown from cpv_expand due to multiple
+                                                               # matches (this can happen if an atom lacks a
+                                                               # category).
+                                                               show_invalid_depstring_notice(
+                                                                       pkg, depstr, str(e))
+                                                               del e
+                                                               raise
+                                               finally:
+                                                       portage.dep._dep_check_strict = True
+                                               if not success:
+                                                       replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
+                                                       if replacement_pkg and \
+                                                               replacement_pkg[0].operation == "merge":
+                                                               # This package is being replaced anyway, so
+                                                               # ignore invalid dependencies so as not to
+                                                               # annoy the user too much (otherwise they'd be
+                                                               # forced to manually unmerge it first).
+                                                               continue
+                                                       show_invalid_depstring_notice(pkg, depstr, atoms)
+                                                       return False
+                                               blocker_atoms = [myatom for myatom in atoms \
+                                                       if myatom.startswith("!")]
+                                               blocker_atoms.sort()
+                                               counter = long(pkg.metadata["COUNTER"])
+                                               blocker_cache[cpv] = \
+                                                       blocker_cache.BlockerData(counter, blocker_atoms)
+                                       if blocker_atoms:
+                                               try:
+                                                       for atom in blocker_atoms:
+                                                               blocker = Blocker(atom=portage.dep.Atom(atom),
+                                                                       eapi=pkg.metadata["EAPI"], root=myroot)
+                                                               self._blocker_parents.add(blocker, pkg)
+                                               except portage.exception.InvalidAtom, e:
+                                                       depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
+                                                       show_invalid_depstring_notice(
+                                                               pkg, depstr, "Invalid Atom: %s" % (e,))
+                                                       return False
+                               for cpv in stale_cache:
+                                       del blocker_cache[cpv]
+                               blocker_cache.flush()
+                               del blocker_cache
+
+               # Discard any "uninstall" tasks scheduled by previous calls
+               # to this method, since those tasks may not make sense given
+               # the current graph state.
+               previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
+               if previous_uninstall_tasks:
+                       self._blocker_uninstalls = digraph()
+                       self.digraph.difference_update(previous_uninstall_tasks)
+
+               for blocker in self._blocker_parents.leaf_nodes():
+                       self.spinner.update()
+                       root_config = self.roots[blocker.root]
+                       virtuals = root_config.settings.getvirtuals()
+                       myroot = blocker.root
+                       initial_db = self.trees[myroot]["vartree"].dbapi
+                       final_db = self.mydbapi[myroot]
+                       
+                       provider_virtual = False
+                       if blocker.cp in virtuals and \
+                               not self._have_new_virt(blocker.root, blocker.cp):
+                               provider_virtual = True
+
+                       # Use this to check PROVIDE for each matched package
+                       # when necessary.
+                       atom_set = InternalPackageSet(
+                               initial_atoms=[blocker.atom])
+
+                       if provider_virtual:
+                               atoms = []
+                               for provider_entry in virtuals[blocker.cp]:
+                                       provider_cp = \
+                                               portage.dep_getkey(provider_entry)
+                                       atoms.append(blocker.atom.replace(
+                                               blocker.cp, provider_cp))
+                       else:
+                               atoms = [blocker.atom]
+
+                       blocked_initial = set()
+                       for atom in atoms:
+                               for pkg in initial_db.match_pkgs(atom):
+                                       if atom_set.findAtomForPackage(pkg):
+                                               blocked_initial.add(pkg)
+
+                       blocked_final = set()
+                       for atom in atoms:
+                               for pkg in final_db.match_pkgs(atom):
+                                       if atom_set.findAtomForPackage(pkg):
+                                               blocked_final.add(pkg)
+
+                       if not blocked_initial and not blocked_final:
+                               parent_pkgs = self._blocker_parents.parent_nodes(blocker)
+                               self._blocker_parents.remove(blocker)
+                               # Discard any parents that don't have any more blockers.
+                               for pkg in parent_pkgs:
+                                       self._irrelevant_blockers.add(blocker, pkg)
+                                       if not self._blocker_parents.child_nodes(pkg):
+                                               self._blocker_parents.remove(pkg)
+                               continue
+                       for parent in self._blocker_parents.parent_nodes(blocker):
+                               unresolved_blocks = False
+                               depends_on_order = set()
+                               for pkg in blocked_initial:
+                                       if pkg.slot_atom == parent.slot_atom:
+                                               # TODO: Support blocks within slots in cases where it
+                                               # might make sense.  For example, a new version might
+                                               # require that the old version be uninstalled at build
+                                               # time.
+                                               continue
+                                       if parent.installed:
+                                               # Two currently installed packages conflict with
+                                               # eachother. Ignore this case since the damage
+                                               # is already done and this would be likely to
+                                               # confuse users if displayed like a normal blocker.
+                                               continue
+
+                                       self._blocked_pkgs.add(pkg, blocker)
+
+                                       if parent.operation == "merge":
+                                               # Maybe the blocked package can be replaced or simply
+                                               # unmerged to resolve this block.
+                                               depends_on_order.add((pkg, parent))
+                                               continue
+                                       # None of the above blocker resolutions techniques apply,
+                                       # so apparently this one is unresolvable.
+                                       unresolved_blocks = True
+                               for pkg in blocked_final:
+                                       if pkg.slot_atom == parent.slot_atom:
+                                               # TODO: Support blocks within slots.
+                                               continue
+                                       if parent.operation == "nomerge" and \
+                                               pkg.operation == "nomerge":
+                                               # This blocker will be handled the next time that a
+                                               # merge of either package is triggered.
+                                               continue
+
+                                       self._blocked_pkgs.add(pkg, blocker)
+
+                                       # Maybe the blocking package can be
+                                       # unmerged to resolve this block.
+                                       if parent.operation == "merge" and pkg.installed:
+                                               depends_on_order.add((pkg, parent))
+                                               continue
+                                       elif parent.operation == "nomerge":
+                                               depends_on_order.add((parent, pkg))
+                                               continue
+                                       # None of the above blocker resolutions techniques apply,
+                                       # so apparently this one is unresolvable.
+                                       unresolved_blocks = True
+
+                               # Make sure we don't unmerge any package that have been pulled
+                               # into the graph.
+                               if not unresolved_blocks and depends_on_order:
+                                       for inst_pkg, inst_task in depends_on_order:
+                                               if self.digraph.contains(inst_pkg) and \
+                                                       self.digraph.parent_nodes(inst_pkg):
+                                                       unresolved_blocks = True
+                                                       break
+
+                               if not unresolved_blocks and depends_on_order:
+                                       for inst_pkg, inst_task in depends_on_order:
+                                               uninst_task = Package(built=inst_pkg.built,
+                                                       cpv=inst_pkg.cpv, installed=inst_pkg.installed,
+                                                       metadata=inst_pkg.metadata,
+                                                       operation="uninstall",
+                                                       root_config=inst_pkg.root_config,
+                                                       type_name=inst_pkg.type_name)
+                                               self._pkg_cache[uninst_task] = uninst_task
+                                               # Enforce correct merge order with a hard dep.
+                                               self.digraph.addnode(uninst_task, inst_task,
+                                                       priority=BlockerDepPriority.instance)
+                                               # Count references to this blocker so that it can be
+                                               # invalidated after nodes referencing it have been
+                                               # merged.
+                                               self._blocker_uninstalls.addnode(uninst_task, blocker)
+                               if not unresolved_blocks and not depends_on_order:
+                                       self._irrelevant_blockers.add(blocker, parent)
+                                       self._blocker_parents.remove_edge(blocker, parent)
+                                       if not self._blocker_parents.parent_nodes(blocker):
+                                               self._blocker_parents.remove(blocker)
+                                       if not self._blocker_parents.child_nodes(parent):
+                                               self._blocker_parents.remove(parent)
+                               if unresolved_blocks:
+                                       self._unsolvable_blockers.add(blocker, parent)
+
+               return True
+
+       def _accept_blocker_conflicts(self):
+               acceptable = False
+               for x in ("--buildpkgonly", "--fetchonly",
+                       "--fetch-all-uri", "--nodeps"):
+                       if x in self.myopts:
+                               acceptable = True
+                               break
+               return acceptable
+
+       def _merge_order_bias(self, mygraph):
+               """
+               For optimal leaf node selection, promote deep system runtime deps and
+               order nodes from highest to lowest overall reference count.
+               """
+
+               node_info = {}
+               for node in mygraph.order:
+                       node_info[node] = len(mygraph.parent_nodes(node))
+               deep_system_deps = _find_deep_system_runtime_deps(mygraph)
+
+               def cmp_merge_preference(node1, node2):
+
+                       if node1.operation == 'uninstall':
+                               if node2.operation == 'uninstall':
+                                       return 0
+                               return 1
+
+                       if node2.operation == 'uninstall':
+                               if node1.operation == 'uninstall':
+                                       return 0
+                               return -1
+
+                       node1_sys = node1 in deep_system_deps
+                       node2_sys = node2 in deep_system_deps
+                       if node1_sys != node2_sys:
+                               if node1_sys:
+                                       return -1
+                               return 1
+
+                       return node_info[node2] - node_info[node1]
+
+               mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
+
+       def altlist(self, reversed=False):
+
+               while self._serialized_tasks_cache is None:
+                       self._resolve_conflicts()
+                       try:
+                               self._serialized_tasks_cache, self._scheduler_graph = \
+                                       self._serialize_tasks()
+                       except self._serialize_tasks_retry:
+                               pass
+
+               retlist = self._serialized_tasks_cache[:]
+               if reversed:
+                       retlist.reverse()
+               return retlist
+
+       def schedulerGraph(self):
+               """
+               The scheduler graph is identical to the normal one except that
+               uninstall edges are reversed in specific cases that require
+               conflicting packages to be temporarily installed simultaneously.
+               This is intended for use by the Scheduler in it's parallelization
+               logic. It ensures that temporary simultaneous installation of
+               conflicting packages is avoided when appropriate (especially for
+               !!atom blockers), but allowed in specific cases that require it.
+
+               Note that this method calls break_refs() which alters the state of
+               internal Package instances such that this depgraph instance should
+               not be used to perform any more calculations.
+               """
+               if self._scheduler_graph is None:
+                       self.altlist()
+               self.break_refs(self._scheduler_graph.order)
+               return self._scheduler_graph
+
+       def break_refs(self, nodes):
+               """
+               Take a mergelist like that returned from self.altlist() and
+               break any references that lead back to the depgraph. This is
+               useful if you want to hold references to packages without
+               also holding the depgraph on the heap.
+               """
+               for node in nodes:
+                       if hasattr(node, "root_config"):
+                               # The FakeVartree references the _package_cache which
+                               # references the depgraph. So that Package instances don't
+                               # hold the depgraph and FakeVartree on the heap, replace
+                               # the RootConfig that references the FakeVartree with the
+                               # original RootConfig instance which references the actual
+                               # vartree.
+                               node.root_config = \
+                                       self._trees_orig[node.root_config.root]["root_config"]
+
+       def _resolve_conflicts(self):
+               if not self._complete_graph():
+                       raise self._unknown_internal_error()
+
+               if not self.validate_blockers():
+                       raise self._unknown_internal_error()
+
+               if self._slot_collision_info:
+                       self._process_slot_conflicts()
+
+       def _serialize_tasks(self):
+
+               if "--debug" in self.myopts:
+                       writemsg("\ndigraph:\n\n", noiselevel=-1)
+                       self.digraph.debug_print()
+                       writemsg("\n", noiselevel=-1)
+
+               scheduler_graph = self.digraph.copy()
+
+               if '--nodeps' in self.myopts:
+                       # Preserve the package order given on the command line.
+                       return ([node for node in scheduler_graph \
+                               if isinstance(node, Package) \
+                               and node.operation == 'merge'], scheduler_graph)
+
+               mygraph=self.digraph.copy()
+               # Prune "nomerge" root nodes if nothing depends on them, since
+               # otherwise they slow down merge order calculation. Don't remove
+               # non-root nodes since they help optimize merge order in some cases
+               # such as revdep-rebuild.
+               removed_nodes = set()
+               while True:
+                       for node in mygraph.root_nodes():
+                               if not isinstance(node, Package) or \
+                                       node.installed or node.onlydeps:
+                                       removed_nodes.add(node)
+                       if removed_nodes:
+                               self.spinner.update()
+                               mygraph.difference_update(removed_nodes)
+                       if not removed_nodes:
+                               break
+                       removed_nodes.clear()
+               self._merge_order_bias(mygraph)
+               def cmp_circular_bias(n1, n2):
+                       """
+                       RDEPEND is stronger than PDEPEND and this function
+                       measures such a strength bias within a circular
+                       dependency relationship.
+                       """
+                       n1_n2_medium = n2 in mygraph.child_nodes(n1,
+                               ignore_priority=priority_range.ignore_medium_soft)
+                       n2_n1_medium = n1 in mygraph.child_nodes(n2,
+                               ignore_priority=priority_range.ignore_medium_soft)
+                       if n1_n2_medium == n2_n1_medium:
+                               return 0
+                       elif n1_n2_medium:
+                               return 1
+                       return -1
+               myblocker_uninstalls = self._blocker_uninstalls.copy()
+               retlist=[]
+               # Contains uninstall tasks that have been scheduled to
+               # occur after overlapping blockers have been installed.
+               scheduled_uninstalls = set()
+               # Contains any Uninstall tasks that have been ignored
+               # in order to avoid the circular deps code path. These
+               # correspond to blocker conflicts that could not be
+               # resolved.
+               ignored_uninstall_tasks = set()
+               have_uninstall_task = False
+               complete = "complete" in self.myparams
+               asap_nodes = []
+
+               def get_nodes(**kwargs):
+                       """
+                       Returns leaf nodes excluding Uninstall instances
+                       since those should be executed as late as possible.
+                       """
+                       return [node for node in mygraph.leaf_nodes(**kwargs) \
+                               if isinstance(node, Package) and \
+                                       (node.operation != "uninstall" or \
+                                       node in scheduled_uninstalls)]
+
+               # sys-apps/portage needs special treatment if ROOT="/"
+               running_root = self._running_root.root
+               from portage.const import PORTAGE_PACKAGE_ATOM
+               runtime_deps = InternalPackageSet(
+                       initial_atoms=[PORTAGE_PACKAGE_ATOM])
+               running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
+                       PORTAGE_PACKAGE_ATOM)
+               replacement_portage = self.mydbapi[running_root].match_pkgs(
+                       PORTAGE_PACKAGE_ATOM)
+
+               if running_portage:
+                       running_portage = running_portage[0]
+               else:
+                       running_portage = None
+
+               if replacement_portage:
+                       replacement_portage = replacement_portage[0]
+               else:
+                       replacement_portage = None
+
+               if replacement_portage == running_portage:
+                       replacement_portage = None
+
+               if replacement_portage is not None:
+                       # update from running_portage to replacement_portage asap
+                       asap_nodes.append(replacement_portage)
+
+               if running_portage is not None:
+                       try:
+                               portage_rdepend = self._select_atoms_highest_available(
+                                       running_root, running_portage.metadata["RDEPEND"],
+                                       myuse=running_portage.use.enabled,
+                                       parent=running_portage, strict=False)
+                       except portage.exception.InvalidDependString, e:
+                               portage.writemsg("!!! Invalid RDEPEND in " + \
+                                       "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+                                       (running_root, running_portage.cpv, e), noiselevel=-1)
+                               del e
+                               portage_rdepend = []
+                       runtime_deps.update(atom for atom in portage_rdepend \
+                               if not atom.startswith("!"))
+
+               def gather_deps(ignore_priority, mergeable_nodes,
+                       selected_nodes, node):
+                       """
+                       Recursively gather a group of nodes that RDEPEND on
+                       eachother. This ensures that they are merged as a group
+                       and get their RDEPENDs satisfied as soon as possible.
+                       """
+                       if node in selected_nodes:
+                               return True
+                       if node not in mergeable_nodes:
+                               return False
+                       if node == replacement_portage and \
+                               mygraph.child_nodes(node,
+                               ignore_priority=priority_range.ignore_medium_soft):
+                               # Make sure that portage always has all of it's
+                               # RDEPENDs installed first.
+                               return False
+                       selected_nodes.add(node)
+                       for child in mygraph.child_nodes(node,
+                               ignore_priority=ignore_priority):
+                               if not gather_deps(ignore_priority,
+                                       mergeable_nodes, selected_nodes, child):
+                                       return False
+                       return True
+
+               def ignore_uninst_or_med(priority):
+                       if priority is BlockerDepPriority.instance:
+                               return True
+                       return priority_range.ignore_medium(priority)
+
+               def ignore_uninst_or_med_soft(priority):
+                       if priority is BlockerDepPriority.instance:
+                               return True
+                       return priority_range.ignore_medium_soft(priority)
+
+               tree_mode = "--tree" in self.myopts
+               # Tracks whether or not the current iteration should prefer asap_nodes
+               # if available.  This is set to False when the previous iteration
+               # failed to select any nodes.  It is reset whenever nodes are
+               # successfully selected.
+               prefer_asap = True
+
+               # Controls whether or not the current iteration should drop edges that
+               # are "satisfied" by installed packages, in order to solve circular
+               # dependencies. The deep runtime dependencies of installed packages are
+               # not checked in this case (bug #199856), so it must be avoided
+               # whenever possible.
+               drop_satisfied = False
+
+               # State of variables for successive iterations that loosen the
+               # criteria for node selection.
+               #
+               # iteration   prefer_asap   drop_satisfied
+               # 1           True          False
+               # 2           False         False
+               # 3           False         True
+               #
+               # If no nodes are selected on the last iteration, it is due to
+               # unresolved blockers or circular dependencies.
+
+               while not mygraph.empty():
+                       self.spinner.update()
+                       selected_nodes = None
+                       ignore_priority = None
+                       if drop_satisfied or (prefer_asap and asap_nodes):
+                               priority_range = DepPrioritySatisfiedRange
+                       else:
+                               priority_range = DepPriorityNormalRange
+                       if prefer_asap and asap_nodes:
+                               # ASAP nodes are merged before their soft deps. Go ahead and
+                               # select root nodes here if necessary, since it's typical for
+                               # the parent to have been removed from the graph already.
+                               asap_nodes = [node for node in asap_nodes \
+                                       if mygraph.contains(node)]
+                               for node in asap_nodes:
+                                       if not mygraph.child_nodes(node,
+                                               ignore_priority=priority_range.ignore_soft):
+                                               selected_nodes = [node]
+                                               asap_nodes.remove(node)
+                                               break
+                       if not selected_nodes and \
+                               not (prefer_asap and asap_nodes):
+                               for i in xrange(priority_range.NONE,
+                                       priority_range.MEDIUM_SOFT + 1):
+                                       ignore_priority = priority_range.ignore_priority[i]
+                                       nodes = get_nodes(ignore_priority=ignore_priority)
+                                       if nodes:
+                                               # If there is a mix of uninstall nodes with other
+                                               # types, save the uninstall nodes for later since
+                                               # sometimes a merge node will render an uninstall
+                                               # node unnecessary (due to occupying the same slot),
+                                               # and we want to avoid executing a separate uninstall
+                                               # task in that case.
+                                               if len(nodes) > 1:
+                                                       good_uninstalls = []
+                                                       with_some_uninstalls_excluded = []
+                                                       for node in nodes:
+                                                               if node.operation == "uninstall":
+                                                                       slot_node = self.mydbapi[node.root
+                                                                               ].match_pkgs(node.slot_atom)
+                                                                       if slot_node and \
+                                                                               slot_node[0].operation == "merge":
+                                                                               continue
+                                                                       good_uninstalls.append(node)
+                                                               with_some_uninstalls_excluded.append(node)
+                                                       if good_uninstalls:
+                                                               nodes = good_uninstalls
+                                                       elif with_some_uninstalls_excluded:
+                                                               nodes = with_some_uninstalls_excluded
+                                                       else:
+                                                               nodes = nodes
+
+                                               if ignore_priority is None and not tree_mode:
+                                                       # Greedily pop all of these nodes since no
+                                                       # relationship has been ignored. This optimization
+                                                       # destroys --tree output, so it's disabled in tree
+                                                       # mode.
+                                                       selected_nodes = nodes
+                                               else:
+                                                       # For optimal merge order:
+                                                       #  * Only pop one node.
+                                                       #  * Removing a root node (node without a parent)
+                                                       #    will not produce a leaf node, so avoid it.
+                                                       #  * It's normal for a selected uninstall to be a
+                                                       #    root node, so don't check them for parents.
+                                                       for node in nodes:
+                                                               if node.operation == "uninstall" or \
+                                                                       mygraph.parent_nodes(node):
+                                                                       selected_nodes = [node]
+                                                                       break
+
+                                               if selected_nodes:
+                                                       break
+
+                       if not selected_nodes:
+                               nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
+                               if nodes:
+                                       mergeable_nodes = set(nodes)
+                                       if prefer_asap and asap_nodes:
+                                               nodes = asap_nodes
+                                       for i in xrange(priority_range.SOFT,
+                                               priority_range.MEDIUM_SOFT + 1):
+                                               ignore_priority = priority_range.ignore_priority[i]
+                                               for node in nodes:
+                                                       if not mygraph.parent_nodes(node):
+                                                               continue
+                                                       selected_nodes = set()
+                                                       if gather_deps(ignore_priority,
+                                                               mergeable_nodes, selected_nodes, node):
+                                                               break
+                                                       else:
+                                                               selected_nodes = None
+                                               if selected_nodes:
+                                                       break
+
+                                       if prefer_asap and asap_nodes and not selected_nodes:
+                                               # We failed to find any asap nodes to merge, so ignore
+                                               # them for the next iteration.
+                                               prefer_asap = False
+                                               continue
+
+                       if selected_nodes and ignore_priority is not None:
+                               # Try to merge ignored medium_soft deps as soon as possible
+                               # if they're not satisfied by installed packages.
+                               for node in selected_nodes:
+                                       children = set(mygraph.child_nodes(node))
+                                       soft = children.difference(
+                                               mygraph.child_nodes(node,
+                                               ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
+                                       medium_soft = children.difference(
+                                               mygraph.child_nodes(node,
+                                                       ignore_priority = \
+                                                       DepPrioritySatisfiedRange.ignore_medium_soft))
+                                       medium_soft.difference_update(soft)
+                                       for child in medium_soft:
+                                               if child in selected_nodes:
+                                                       continue
+                                               if child in asap_nodes:
+                                                       continue
+                                               asap_nodes.append(child)
+
+                       if selected_nodes and len(selected_nodes) > 1:
+                               if not isinstance(selected_nodes, list):
+                                       selected_nodes = list(selected_nodes)
+                               selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
+
+                       if not selected_nodes and not myblocker_uninstalls.is_empty():
+                               # An Uninstall task needs to be executed in order to
+                               # avoid conflict if possible.
+
+                               if drop_satisfied:
+                                       priority_range = DepPrioritySatisfiedRange
+                               else:
+                                       priority_range = DepPriorityNormalRange
+
+                               mergeable_nodes = get_nodes(
+                                       ignore_priority=ignore_uninst_or_med)
+
+                               min_parent_deps = None
+                               uninst_task = None
+                               for task in myblocker_uninstalls.leaf_nodes():
+                                       # Do some sanity checks so that system or world packages
+                                       # don't get uninstalled inappropriately here (only really
+                                       # necessary when --complete-graph has not been enabled).
+
+                                       if task in ignored_uninstall_tasks:
+                                               continue
+
+                                       if task in scheduled_uninstalls:
+                                               # It's been scheduled but it hasn't
+                                               # been executed yet due to dependence
+                                               # on installation of blocking packages.
+                                               continue
+
+                                       root_config = self.roots[task.root]
+                                       inst_pkg = self._pkg_cache[
+                                               ("installed", task.root, task.cpv, "nomerge")]
+
+                                       if self.digraph.contains(inst_pkg):
+                                               continue
+
+                                       forbid_overlap = False
+                                       heuristic_overlap = False
+                                       for blocker in myblocker_uninstalls.parent_nodes(task):
+                                               if blocker.eapi in ("0", "1"):
+                                                       heuristic_overlap = True
+                                               elif blocker.atom.blocker.overlap.forbid:
+                                                       forbid_overlap = True
+                                                       break
+                                       if forbid_overlap and running_root == task.root:
+                                               continue
+
+                                       if heuristic_overlap and running_root == task.root:
+                                               # Never uninstall sys-apps/portage or it's essential
+                                               # dependencies, except through replacement.
+                                               try:
+                                                       runtime_dep_atoms = \
+                                                               list(runtime_deps.iterAtomsForPackage(task))
+                                               except portage.exception.InvalidDependString, e:
+                                                       portage.writemsg("!!! Invalid PROVIDE in " + \
+                                                               "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
+                                                               (task.root, task.cpv, e), noiselevel=-1)
+                                                       del e
+                                                       continue
+
+                                               # Don't uninstall a runtime dep if it appears
+                                               # to be the only suitable one installed.
+                                               skip = False
+                                               vardb = root_config.trees["vartree"].dbapi
+                                               for atom in runtime_dep_atoms:
+                                                       other_version = None
+                                                       for pkg in vardb.match_pkgs(atom):
+                                                               if pkg.cpv == task.cpv and \
+                                                                       pkg.metadata["COUNTER"] == \
+                                                                       task.metadata["COUNTER"]:
+                                                                       continue
+                                                               other_version = pkg
+                                                               break
+                                                       if other_version is None:
+                                                               skip = True
+                                                               break
+                                               if skip:
+                                                       continue
+
+                                               # For packages in the system set, don't take
+                                               # any chances. If the conflict can't be resolved
+                                               # by a normal replacement operation then abort.
+                                               skip = False
+                                               try:
+                                                       for atom in root_config.sets[
+                                                               "system"].iterAtomsForPackage(task):
+                                                               skip = True
+                                                               break
+                                               except portage.exception.InvalidDependString, e:
+                                                       portage.writemsg("!!! Invalid PROVIDE in " + \
+                                                               "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
+                                                               (task.root, task.cpv, e), noiselevel=-1)
+                                                       del e
+                                                       skip = True
+                                               if skip:
+                                                       continue
+
+                                       # Note that the world check isn't always
+                                       # necessary since self._complete_graph() will
+                                       # add all packages from the system and world sets to the
+                                       # graph. This just allows unresolved conflicts to be
+                                       # detected as early as possible, which makes it possible
+                                       # to avoid calling self._complete_graph() when it is
+                                       # unnecessary due to blockers triggering an abortion.
+                                       if not complete:
+                                               # For packages in the world set, go ahead an uninstall
+                                               # when necessary, as long as the atom will be satisfied
+                                               # in the final state.
+                                               graph_db = self.mydbapi[task.root]
+                                               skip = False
+                                               try:
+                                                       for atom in root_config.sets[
+                                                               "world"].iterAtomsForPackage(task):
+                                                               satisfied = False
+                                                               for pkg in graph_db.match_pkgs(atom):
+                                                                       if pkg == inst_pkg:
+                                                                               continue
+                                                                       satisfied = True
+                                                                       break
+                                                               if not satisfied:
+                                                                       skip = True
+                                                                       self._blocked_world_pkgs[inst_pkg] = atom
+                                                                       break
+                                               except portage.exception.InvalidDependString, e:
+                                                       portage.writemsg("!!! Invalid PROVIDE in " + \
+                                                               "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
+                                                               (task.root, task.cpv, e), noiselevel=-1)
+                                                       del e
+                                                       skip = True
+                                               if skip:
+                                                       continue
+
+                                       # Check the deps of parent nodes to ensure that
+                                       # the chosen task produces a leaf node. Maybe
+                                       # this can be optimized some more to make the
+                                       # best possible choice, but the current algorithm
+                                       # is simple and should be near optimal for most
+                                       # common cases.
+                                       mergeable_parent = False
+                                       parent_deps = set()
+                                       for parent in mygraph.parent_nodes(task):
+                                               parent_deps.update(mygraph.child_nodes(parent,
+                                                       ignore_priority=priority_range.ignore_medium_soft))
+                                               if parent in mergeable_nodes and \
+                                                       gather_deps(ignore_uninst_or_med_soft,
+                                                       mergeable_nodes, set(), parent):
+                                                       mergeable_parent = True
+
+                                       if not mergeable_parent:
+                                               continue
+
+                                       parent_deps.remove(task)
+                                       if min_parent_deps is None or \
+                                               len(parent_deps) < min_parent_deps:
+                                               min_parent_deps = len(parent_deps)
+                                               uninst_task = task
+
+                               if uninst_task is not None:
+                                       # The uninstall is performed only after blocking
+                                       # packages have been merged on top of it. File
+                                       # collisions between blocking packages are detected
+                                       # and removed from the list of files to be uninstalled.
+                                       scheduled_uninstalls.add(uninst_task)
+                                       parent_nodes = mygraph.parent_nodes(uninst_task)
+
+                                       # Reverse the parent -> uninstall edges since we want
+                                       # to do the uninstall after blocking packages have
+                                       # been merged on top of it.
+                                       mygraph.remove(uninst_task)
+                                       for blocked_pkg in parent_nodes:
+                                               mygraph.add(blocked_pkg, uninst_task,
+                                                       priority=BlockerDepPriority.instance)
+                                               scheduler_graph.remove_edge(uninst_task, blocked_pkg)
+                                               scheduler_graph.add(blocked_pkg, uninst_task,
+                                                       priority=BlockerDepPriority.instance)
+
+                                       # Reset the state variables for leaf node selection and
+                                       # continue trying to select leaf nodes.
+                                       prefer_asap = True
+                                       drop_satisfied = False
+                                       continue
+
+                       if not selected_nodes:
+                               # Only select root nodes as a last resort. This case should
+                               # only trigger when the graph is nearly empty and the only
+                               # remaining nodes are isolated (no parents or children). Since
+                               # the nodes must be isolated, ignore_priority is not needed.
+                               selected_nodes = get_nodes()
+
+                       if not selected_nodes and not drop_satisfied:
+                               drop_satisfied = True
+                               continue
+
+                       if not selected_nodes and not myblocker_uninstalls.is_empty():
+                               # If possible, drop an uninstall task here in order to avoid
+                               # the circular deps code path. The corresponding blocker will
+                               # still be counted as an unresolved conflict.
+                               uninst_task = None
+                               for node in myblocker_uninstalls.leaf_nodes():
+                                       try:
+                                               mygraph.remove(node)
+                                       except KeyError:
+                                               pass
+                                       else:
+                                               uninst_task = node
+                                               ignored_uninstall_tasks.add(node)
+                                               break
+
+                               if uninst_task is not None:
+                                       # Reset the state variables for leaf node selection and
+                                       # continue trying to select leaf nodes.
+                                       prefer_asap = True
+                                       drop_satisfied = False
+                                       continue
+
+                       if not selected_nodes:
+                               self._circular_deps_for_display = mygraph
+                               raise self._unknown_internal_error()
+
+                       # At this point, we've succeeded in selecting one or more nodes, so
+                       # reset state variables for leaf node selection.
+                       prefer_asap = True
+                       drop_satisfied = False
+
+                       mygraph.difference_update(selected_nodes)
+
+                       for node in selected_nodes:
+                               if isinstance(node, Package) and \
+                                       node.operation == "nomerge":
+                                       continue
+
+                               # Handle interactions between blockers
+                               # and uninstallation tasks.
+                               solved_blockers = set()
+                               uninst_task = None
+                               if isinstance(node, Package) and \
+                                       "uninstall" == node.operation:
+                                       have_uninstall_task = True
+                                       uninst_task = node
+                               else:
+                                       vardb = self.trees[node.root]["vartree"].dbapi
+                                       previous_cpv = vardb.match(node.slot_atom)
+                                       if previous_cpv:
+                                               # The package will be replaced by this one, so remove
+                                               # the corresponding Uninstall task if necessary.
+                                               previous_cpv = previous_cpv[0]
+                                               uninst_task = \
+                                                       ("installed", node.root, previous_cpv, "uninstall")
+                                               try:
+                                                       mygraph.remove(uninst_task)
+                                               except KeyError:
+                                                       pass
+
+                               if uninst_task is not None and \
+                                       uninst_task not in ignored_uninstall_tasks and \
+                                       myblocker_uninstalls.contains(uninst_task):
+                                       blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
+                                       myblocker_uninstalls.remove(uninst_task)
+                                       # Discard any blockers that this Uninstall solves.
+                                       for blocker in blocker_nodes:
+                                               if not myblocker_uninstalls.child_nodes(blocker):
+                                                       myblocker_uninstalls.remove(blocker)
+                                                       solved_blockers.add(blocker)
+
+                               retlist.append(node)
+
+                               if (isinstance(node, Package) and \
+                                       "uninstall" == node.operation) or \
+                                       (uninst_task is not None and \
+                                       uninst_task in scheduled_uninstalls):
+                                       # Include satisfied blockers in the merge list
+                                       # since the user might be interested and also
+                                       # it serves as an indicator that blocking packages
+                                       # will be temporarily installed simultaneously.
+                                       for blocker in solved_blockers:
+                                               retlist.append(Blocker(atom=blocker.atom,
+                                                       root=blocker.root, eapi=blocker.eapi,
+                                                       satisfied=True))
+
+               unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
+               for node in myblocker_uninstalls.root_nodes():
+                       unsolvable_blockers.add(node)
+
+               for blocker in unsolvable_blockers:
+                       retlist.append(blocker)
+
+               # If any Uninstall tasks need to be executed in order
+               # to avoid a conflict, complete the graph with any
+               # dependencies that may have been initially
+               # neglected (to ensure that unsafe Uninstall tasks
+               # are properly identified and blocked from execution).
+               if have_uninstall_task and \
+                       not complete and \
+                       not unsolvable_blockers:
+                       self.myparams.add("complete")
+                       raise self._serialize_tasks_retry("")
+
+               if unsolvable_blockers and \
+                       not self._accept_blocker_conflicts():
+                       self._unsatisfied_blockers_for_display = unsolvable_blockers
+                       self._serialized_tasks_cache = retlist[:]
+                       self._scheduler_graph = scheduler_graph
+                       raise self._unknown_internal_error()
+
+               if self._slot_collision_info and \
+                       not self._accept_blocker_conflicts():
+                       self._serialized_tasks_cache = retlist[:]
+                       self._scheduler_graph = scheduler_graph
+                       raise self._unknown_internal_error()
+
+               return retlist, scheduler_graph
+
+       def _show_circular_deps(self, mygraph):
+               # No leaf nodes are available, so we have a circular
+               # dependency panic situation.  Reduce the noise level to a
+               # minimum via repeated elimination of root nodes since they
+               # have no parents and thus can not be part of a cycle.
+               while True:
+                       root_nodes = mygraph.root_nodes(
+                               ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
+                       if not root_nodes:
+                               break
+                       mygraph.difference_update(root_nodes)
+               # Display the USE flags that are enabled on nodes that are part
+               # of dependency cycles in case that helps the user decide to
+               # disable some of them.
+               display_order = []
+               tempgraph = mygraph.copy()
+               while not tempgraph.empty():
+                       nodes = tempgraph.leaf_nodes()
+                       if not nodes:
+                               node = tempgraph.order[0]
+                       else:
+                               node = nodes[0]
+                       display_order.append(node)
+                       tempgraph.remove(node)
+               display_order.reverse()
+               self.myopts.pop("--quiet", None)
+               self.myopts.pop("--verbose", None)
+               self.myopts["--tree"] = True
+               portage.writemsg("\n\n", noiselevel=-1)
+               self.display(display_order)
+               prefix = colorize("BAD", " * ")
+               portage.writemsg("\n", noiselevel=-1)
+               portage.writemsg(prefix + "Error: circular dependencies:\n",
+                       noiselevel=-1)
+               portage.writemsg("\n", noiselevel=-1)
+               mygraph.debug_print()
+               portage.writemsg("\n", noiselevel=-1)
+               portage.writemsg(prefix + "Note that circular dependencies " + \
+                       "can often be avoided by temporarily\n", noiselevel=-1)
+               portage.writemsg(prefix + "disabling USE flags that trigger " + \
+                       "optional dependencies.\n", noiselevel=-1)
+
+       def _show_merge_list(self):
+               if self._serialized_tasks_cache is not None and \
+                       not (self._displayed_list and \
+                       (self._displayed_list == self._serialized_tasks_cache or \
+                       self._displayed_list == \
+                               list(reversed(self._serialized_tasks_cache)))):
+                       display_list = self._serialized_tasks_cache[:]
+                       if "--tree" in self.myopts:
+                               display_list.reverse()
+                       self.display(display_list)
+
+       def _show_unsatisfied_blockers(self, blockers):
+               self._show_merge_list()
+               msg = "Error: The above package list contains " + \
+                       "packages which cannot be installed " + \
+                       "at the same time on the same system."
+               prefix = colorize("BAD", " * ")
+               from textwrap import wrap
+               portage.writemsg("\n", noiselevel=-1)
+               for line in wrap(msg, 70):
+                       portage.writemsg(prefix + line + "\n", noiselevel=-1)
+
+               # Display the conflicting packages along with the packages
+               # that pulled them in. This is helpful for troubleshooting
+               # cases in which blockers don't solve automatically and
+               # the reasons are not apparent from the normal merge list
+               # display.
+
+               conflict_pkgs = {}
+               for blocker in blockers:
+                       for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
+                               self._blocker_parents.parent_nodes(blocker)):
+                               parent_atoms = self._parent_atoms.get(pkg)
+                               if not parent_atoms:
+                                       atom = self._blocked_world_pkgs.get(pkg)
+                                       if atom is not None:
+                                               parent_atoms = set([("@world", atom)])
+                               if parent_atoms:
+                                       conflict_pkgs[pkg] = parent_atoms
+
+               if conflict_pkgs:
+                       # Reduce noise by pruning packages that are only
+                       # pulled in by other conflict packages.
+                       pruned_pkgs = set()
+                       for pkg, parent_atoms in conflict_pkgs.iteritems():
+                               relevant_parent = False
+                               for parent, atom in parent_atoms:
+                                       if parent not in conflict_pkgs:
+                                               relevant_parent = True
+                                               break
+                               if not relevant_parent:
+                                       pruned_pkgs.add(pkg)
+                       for pkg in pruned_pkgs:
+                               del conflict_pkgs[pkg]
+
+               if conflict_pkgs:
+                       msg = []
+                       msg.append("\n")
+                       indent = "  "
+                       # Max number of parents shown, to avoid flooding the display.
+                       max_parents = 3
+                       for pkg, parent_atoms in conflict_pkgs.iteritems():
+
+                               pruned_list = set()
+
+                               # Prefer packages that are not directly involved in a conflict.
+                               for parent_atom in parent_atoms:
+                                       if len(pruned_list) >= max_parents:
+                                               break
+                                       parent, atom = parent_atom
+                                       if parent not in conflict_pkgs:
+                                               pruned_list.add(parent_atom)
+
+                               for parent_atom in parent_atoms:
+                                       if len(pruned_list) >= max_parents:
+                                               break
+                                       pruned_list.add(parent_atom)
+
+                               omitted_parents = len(parent_atoms) - len(pruned_list)
+                               msg.append(indent + "%s pulled in by\n" % pkg)
+
+                               for parent_atom in pruned_list:
+                                       parent, atom = parent_atom
+                                       msg.append(2*indent)
+                                       if isinstance(parent,
+                                               (PackageArg, AtomArg)):
+                                               # For PackageArg and AtomArg types, it's
+                                               # redundant to display the atom attribute.
+                                               msg.append(str(parent))
+                                       else:
+                                               # Display the specific atom from SetArg or
+                                               # Package types.
+                                               msg.append("%s required by %s" % (atom, parent))
+                                       msg.append("\n")
+
+                               if omitted_parents:
+                                       msg.append(2*indent)
+                                       msg.append("(and %d more)\n" % omitted_parents)
+
+                               msg.append("\n")
+
+                       sys.stderr.write("".join(msg))
+                       sys.stderr.flush()
+
+               if "--quiet" not in self.myopts:
+                       show_blocker_docs_link()
+
+       def display(self, mylist, favorites=[], verbosity=None):
+
+               # This is used to prevent display_problems() from
+               # redundantly displaying this exact same merge list
+               # again via _show_merge_list().
+               self._displayed_list = mylist
+
+               if verbosity is None:
+                       verbosity = ("--quiet" in self.myopts and 1 or \
+                               "--verbose" in self.myopts and 3 or 2)
+               favorites_set = InternalPackageSet(favorites)
+               oneshot = "--oneshot" in self.myopts or \
+                       "--onlydeps" in self.myopts
+               columns = "--columns" in self.myopts
+               changelogs=[]
+               p=[]
+               blockers = []
+
+               counters = PackageCounters()
+
+               if verbosity == 1 and "--verbose" not in self.myopts:
+                       def create_use_string(*args):
+                               return ""
+               else:
+                       def create_use_string(name, cur_iuse, iuse_forced, cur_use,
+                               old_iuse, old_use,
+                               is_new, reinst_flags,
+                               all_flags=(verbosity == 3 or "--quiet" in self.myopts),
+                               alphabetical=("--alphabetical" in self.myopts)):
+                               enabled = []
+                               if alphabetical:
+                                       disabled = enabled
+                                       removed = enabled
+                               else:
+                                       disabled = []
+                                       removed = []
+                               cur_iuse = set(cur_iuse)
+                               enabled_flags = cur_iuse.intersection(cur_use)
+                               removed_iuse = set(old_iuse).difference(cur_iuse)
+                               any_iuse = cur_iuse.union(old_iuse)
+                               any_iuse = list(any_iuse)
+                               any_iuse.sort()
+                               for flag in any_iuse:
+                                       flag_str = None
+                                       isEnabled = False
+                                       reinst_flag = reinst_flags and flag in reinst_flags
+                                       if flag in enabled_flags:
+                                               isEnabled = True
+                                               if is_new or flag in old_use and \
+                                                       (all_flags or reinst_flag):
+                                                       flag_str = red(flag)
+                                               elif flag not in old_iuse:
+                                                       flag_str = yellow(flag) + "%*"
+                                               elif flag not in old_use:
+                                                       flag_str = green(flag) + "*"
+                                       elif flag in removed_iuse:
+                                               if all_flags or reinst_flag:
+                                                       flag_str = yellow("-" + flag) + "%"
+                                                       if flag in old_use:
+                                                               flag_str += "*"
+                                                       flag_str = "(" + flag_str + ")"
+                                                       removed.append(flag_str)
+                                               continue
+                                       else:
+                                               if is_new or flag in old_iuse and \
+                                                       flag not in old_use and \
+                                                       (all_flags or reinst_flag):
+                                                       flag_str = blue("-" + flag)
+                                               elif flag not in old_iuse:
+                                                       flag_str = yellow("-" + flag)
+                                                       if flag not in iuse_forced:
+                                                               flag_str += "%"
+                                               elif flag in old_use:
+                                                       flag_str = green("-" + flag) + "*"
+                                       if flag_str:
+                                               if flag in iuse_forced:
+                                                       flag_str = "(" + flag_str + ")"
+                                               if isEnabled:
+                                                       enabled.append(flag_str)
+                                               else:
+                                                       disabled.append(flag_str)
+
+                               if alphabetical:
+                                       ret = " ".join(enabled)
+                               else:
+                                       ret = " ".join(enabled + disabled + removed)
+                               if ret:
+                                       ret = '%s="%s" ' % (name, ret)
+                               return ret
+
+               repo_display = RepoDisplay(self.roots)
+
+               tree_nodes = []
+               display_list = []
+               mygraph = self.digraph.copy()
+
+               # If there are any Uninstall instances, add the corresponding
+               # blockers to the digraph (useful for --tree display).
+
+               executed_uninstalls = set(node for node in mylist \
+                       if isinstance(node, Package) and node.operation == "unmerge")
+
+               for uninstall in self._blocker_uninstalls.leaf_nodes():
+                       uninstall_parents = \
+                               self._blocker_uninstalls.parent_nodes(uninstall)
+                       if not uninstall_parents:
+                               continue
+
+                       # Remove the corresponding "nomerge" node and substitute
+                       # the Uninstall node.
+                       inst_pkg = self._pkg_cache[
+                               ("installed", uninstall.root, uninstall.cpv, "nomerge")]
+                       try:
+                               mygraph.remove(inst_pkg)
+                       except KeyError:
+                               pass
+
+                       try:
+                               inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
+                       except KeyError:
+                               inst_pkg_blockers = []
+
+                       # Break the Package -> Uninstall edges.
+                       mygraph.remove(uninstall)
+
+                       # Resolution of a package's blockers
+                       # depend on it's own uninstallation.
+                       for blocker in inst_pkg_blockers:
+                               mygraph.add(uninstall, blocker)
+
+                       # Expand Package -> Uninstall edges into
+                       # Package -> Blocker -> Uninstall edges.
+                       for blocker in uninstall_parents:
+                               mygraph.add(uninstall, blocker)
+                               for parent in self._blocker_parents.parent_nodes(blocker):
+                                       if parent != inst_pkg:
+                                               mygraph.add(blocker, parent)
+
+                       # If the uninstall task did not need to be executed because
+                       # of an upgrade, display Blocker -> Upgrade edges since the
+                       # corresponding Blocker -> Uninstall edges will not be shown.
+                       upgrade_node = \
+                               self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
+                       if upgrade_node is not None and \
+                               uninstall not in executed_uninstalls:
+                               for blocker in uninstall_parents:
+                                       mygraph.add(upgrade_node, blocker)
+
+               unsatisfied_blockers = []
+               i = 0
+               depth = 0
+               shown_edges = set()
+               for x in mylist:
+                       if isinstance(x, Blocker) and not x.satisfied:
+                               unsatisfied_blockers.append(x)
+                               continue
+                       graph_key = x
+                       if "--tree" in self.myopts:
+                               depth = len(tree_nodes)
+                               while depth and graph_key not in \
+                                       mygraph.child_nodes(tree_nodes[depth-1]):
+                                               depth -= 1
+                               if depth:
+                                       tree_nodes = tree_nodes[:depth]
+                                       tree_nodes.append(graph_key)
+                                       display_list.append((x, depth, True))
+                                       shown_edges.add((graph_key, tree_nodes[depth-1]))
+                               else:
+                                       traversed_nodes = set() # prevent endless circles
+                                       traversed_nodes.add(graph_key)
+                                       def add_parents(current_node, ordered):
+                                               parent_nodes = None
+                                               # Do not traverse to parents if this node is an
+                                               # an argument or a direct member of a set that has
+                                               # been specified as an argument (system or world).
+                                               if current_node not in self._set_nodes:
+                                                       parent_nodes = mygraph.parent_nodes(current_node)
+                                               if parent_nodes:
+                                                       child_nodes = set(mygraph.child_nodes(current_node))
+                                                       selected_parent = None
+                                                       # First, try to avoid a direct cycle.
+                                                       for node in parent_nodes:
+                                                               if not isinstance(node, (Blocker, Package)):
+                                                                       continue
+                                                               if node not in traversed_nodes and \
+                                                                       node not in child_nodes:
+                                                                       edge = (current_node, node)
+                                                                       if edge in shown_edges:
+                                                                               continue
+                                                                       selected_parent = node
+                                                                       break
+                                                       if not selected_parent:
+                                                               # A direct cycle is unavoidable.
+                                                               for node in parent_nodes:
+                                                                       if not isinstance(node, (Blocker, Package)):
+                                                                               continue
+                                                                       if node not in traversed_nodes:
+                                                                               edge = (current_node, node)
+                                                                               if edge in shown_edges:
+                                                                                       continue
+                                                                               selected_parent = node
+                                                                               break
+                                                       if selected_parent:
+                                                               shown_edges.add((current_node, selected_parent))
+                                                               traversed_nodes.add(selected_parent)
+                                                               add_parents(selected_parent, False)
+                                               display_list.append((current_node,
+                                                       len(tree_nodes), ordered))
+                                               tree_nodes.append(current_node)
+                                       tree_nodes = []
+                                       add_parents(graph_key, True)
+                       else:
+                               display_list.append((x, depth, True))
+               mylist = display_list
+               for x in unsatisfied_blockers:
+                       mylist.append((x, 0, True))
+
+               last_merge_depth = 0
+               for i in xrange(len(mylist)-1,-1,-1):
+                       graph_key, depth, ordered = mylist[i]
+                       if not ordered and depth == 0 and i > 0 \
+                               and graph_key == mylist[i-1][0] and \
+                               mylist[i-1][1] == 0:
+                               # An ordered node got a consecutive duplicate when the tree was
+                               # being filled in.
+                               del mylist[i]
+                               continue
+                       if ordered and graph_key[-1] != "nomerge":
+                               last_merge_depth = depth
+                               continue
+                       if depth >= last_merge_depth or \
+                               i < len(mylist) - 1 and \
+                               depth >= mylist[i+1][1]:
+                                       del mylist[i]
+
+               from portage import flatten
+               from portage.dep import use_reduce, paren_reduce
+               # files to fetch list - avoids counting a same file twice
+               # in size display (verbose mode)
+               myfetchlist=[]
+
+               # Use this set to detect when all the "repoadd" strings are "[0]"
+               # and disable the entire repo display in this case.
+               repoadd_set = set()
+
+               for mylist_index in xrange(len(mylist)):
+                       x, depth, ordered = mylist[mylist_index]
+                       pkg_type = x[0]
+                       myroot = x[1]
+                       pkg_key = x[2]
+                       portdb = self.trees[myroot]["porttree"].dbapi
+                       bindb  = self.trees[myroot]["bintree"].dbapi
+                       vardb = self.trees[myroot]["vartree"].dbapi
+                       vartree = self.trees[myroot]["vartree"]
+                       pkgsettings = self.pkgsettings[myroot]
+
+                       fetch=" "
+                       indent = " " * depth
+
+                       if isinstance(x, Blocker):
+                               if x.satisfied:
+                                       blocker_style = "PKG_BLOCKER_SATISFIED"
+                                       addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
+                               else:
+                                       blocker_style = "PKG_BLOCKER"
+                                       addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
+                               if ordered:
+                                       counters.blocks += 1
+                                       if x.satisfied:
+                                               counters.blocks_satisfied += 1
+                               resolved = portage.key_expand(
+                                       str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
+                               if "--columns" in self.myopts and "--quiet" in self.myopts:
+                                       addl += " " + colorize(blocker_style, resolved)
+                               else:
+                                       addl = "[%s %s] %s%s" % \
+                                               (colorize(blocker_style, "blocks"),
+                                               addl, indent, colorize(blocker_style, resolved))
+                               block_parents = self._blocker_parents.parent_nodes(x)
+                               block_parents = set([pnode[2] for pnode in block_parents])
+                               block_parents = ", ".join(block_parents)
+                               if resolved!=x[2]:
+                                       addl += colorize(blocker_style,
+                                               " (\"%s\" is blocking %s)") % \
+                                               (str(x.atom).lstrip("!"), block_parents)
+                               else:
+                                       addl += colorize(blocker_style,
+                                               " (is blocking %s)") % block_parents
+                               if isinstance(x, Blocker) and x.satisfied:
+                                       if columns:
+                                               continue
+                                       p.append(addl)
+                               else:
+                                       blockers.append(addl)
+                       else:
+                               pkg_status = x[3]
+                               pkg_merge = ordered and pkg_status == "merge"
+                               if not pkg_merge and pkg_status == "merge":
+                                       pkg_status = "nomerge"
+                               built = pkg_type != "ebuild"
+                               installed = pkg_type == "installed"
+                               pkg = x
+                               metadata = pkg.metadata
+                               ebuild_path = None
+                               repo_name = metadata["repository"]
+                               if pkg_type == "ebuild":
+                                       ebuild_path = portdb.findname(pkg_key)
+                                       if not ebuild_path: # shouldn't happen
+                                               raise portage.exception.PackageNotFound(pkg_key)
+                                       repo_path_real = os.path.dirname(os.path.dirname(
+                                               os.path.dirname(ebuild_path)))
+                               else:
+                                       repo_path_real = portdb.getRepositoryPath(repo_name)
+                               pkg_use = list(pkg.use.enabled)
+                               try:
+                                       restrict = flatten(use_reduce(paren_reduce(
+                                               pkg.metadata["RESTRICT"]), uselist=pkg_use))
+                               except portage.exception.InvalidDependString, e:
+                                       if not pkg.installed:
+                                               show_invalid_depstring_notice(x,
+                                                       pkg.metadata["RESTRICT"], str(e))
+                                               del e
+                                               return 1
+                                       restrict = []
+                               if "ebuild" == pkg_type and x[3] != "nomerge" and \
+                                       "fetch" in restrict:
+                                       fetch = red("F")
+                                       if ordered:
+                                               counters.restrict_fetch += 1
+                                       if portdb.fetch_check(pkg_key, pkg_use):
+                                               fetch = green("f")
+                                               if ordered:
+                                                       counters.restrict_fetch_satisfied += 1
+
+                               #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
+                               #param is used for -u, where you still *do* want to see when something is being upgraded.
+                               myoldbest = []
+                               myinslotlist = None
+                               installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
+                               if vardb.cpv_exists(pkg_key):
+                                       addl="  "+yellow("R")+fetch+"  "
+                                       if ordered:
+                                               if pkg_merge:
+                                                       counters.reinst += 1
+                                               elif pkg_status == "uninstall":
+                                                       counters.uninst += 1
+                               # filter out old-style virtual matches
+                               elif installed_versions and \
+                                       portage.cpv_getkey(installed_versions[0]) == \
+                                       portage.cpv_getkey(pkg_key):
+                                       myinslotlist = vardb.match(pkg.slot_atom)
+                                       # If this is the first install of a new-style virtual, we
+                                       # need to filter out old-style virtual matches.
+                                       if myinslotlist and \
+                                               portage.cpv_getkey(myinslotlist[0]) != \
+                                               portage.cpv_getkey(pkg_key):
+                                               myinslotlist = None
+                                       if myinslotlist:
+                                               myoldbest = myinslotlist[:]
+                                               addl = "   " + fetch
+                                               if not portage.dep.cpvequal(pkg_key,
+                                                       portage.best([pkg_key] + myoldbest)):
+                                                       # Downgrade in slot
+                                                       addl += turquoise("U")+blue("D")
+                                                       if ordered:
+                                                               counters.downgrades += 1
+                                               else:
+                                                       # Update in slot
+                                                       addl += turquoise("U") + " "
+                                                       if ordered:
+                                                               counters.upgrades += 1
+                                       else:
+                                               # New slot, mark it new.
+                                               addl = " " + green("NS") + fetch + "  "
+                                               myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
+                                               if ordered:
+                                                       counters.newslot += 1
+
+                                       if "--changelog" in self.myopts:
+                                               inst_matches = vardb.match(pkg.slot_atom)
+                                               if inst_matches:
+                                                       changelogs.extend(self.calc_changelog(
+                                                               portdb.findname(pkg_key),
+                                                               inst_matches[0], pkg_key))
+                               else:
+                                       addl = " " + green("N") + " " + fetch + "  "
+                                       if ordered:
+                                               counters.new += 1
+
+                               verboseadd = ""
+                               repoadd = None
+
+                               if True:
+                                       # USE flag display
+                                       forced_flags = set()
+                                       pkgsettings.setcpv(pkg) # for package.use.{mask,force}
+                                       forced_flags.update(pkgsettings.useforce)
+                                       forced_flags.update(pkgsettings.usemask)
+
+                                       cur_use = [flag for flag in pkg.use.enabled \
+                                               if flag in pkg.iuse.all]
+                                       cur_iuse = sorted(pkg.iuse.all)
+
+                                       if myoldbest and myinslotlist:
+                                               previous_cpv = myoldbest[0]
+                                       else:
+                                               previous_cpv = pkg.cpv
+                                       if vardb.cpv_exists(previous_cpv):
+                                               old_iuse, old_use = vardb.aux_get(
+                                                               previous_cpv, ["IUSE", "USE"])
+                                               old_iuse = list(set(
+                                                       filter_iuse_defaults(old_iuse.split())))
+                                               old_iuse.sort()
+                                               old_use = old_use.split()
+                                               is_new = False
+                                       else:
+                                               old_iuse = []
+                                               old_use = []
+                                               is_new = True
+
+                                       old_use = [flag for flag in old_use if flag in old_iuse]
+
+                                       use_expand = pkgsettings["USE_EXPAND"].lower().split()
+                                       use_expand.sort()
+                                       use_expand.reverse()
+                                       use_expand_hidden = \
+                                               pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
+
+                                       def map_to_use_expand(myvals, forcedFlags=False,
+                                               removeHidden=True):
+                                               ret = {}
+                                               forced = {}
+                                               for exp in use_expand:
+                                                       ret[exp] = []
+                                                       forced[exp] = set()
+                                                       for val in myvals[:]:
+                                                               if val.startswith(exp.lower()+"_"):
+                                                                       if val in forced_flags:
+                                                                               forced[exp].add(val[len(exp)+1:])
+                                                                       ret[exp].append(val[len(exp)+1:])
+                                                                       myvals.remove(val)
+                                               ret["USE"] = myvals
+                                               forced["USE"] = [val for val in myvals \
+                                                       if val in forced_flags]
+                                               if removeHidden:
+                                                       for exp in use_expand_hidden:
+                                                               ret.pop(exp, None)
+                                               if forcedFlags:
+                                                       return ret, forced
+                                               return ret
+
+                                       # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
+                                       # are the only thing that triggered reinstallation.
+                                       reinst_flags_map = {}
+                                       reinstall_for_flags = self._reinstall_nodes.get(pkg)
+                                       reinst_expand_map = None
+                                       if reinstall_for_flags:
+                                               reinst_flags_map = map_to_use_expand(
+                                                       list(reinstall_for_flags), removeHidden=False)
+                                               for k in list(reinst_flags_map):
+                                                       if not reinst_flags_map[k]:
+                                                               del reinst_flags_map[k]
+                                               if not reinst_flags_map.get("USE"):
+                                                       reinst_expand_map = reinst_flags_map.copy()
+                                                       reinst_expand_map.pop("USE", None)
+                                       if reinst_expand_map and \
+                                               not set(reinst_expand_map).difference(
+                                               use_expand_hidden):
+                                               use_expand_hidden = \
+                                                       set(use_expand_hidden).difference(
+                                                       reinst_expand_map)
+
+                                       cur_iuse_map, iuse_forced = \
+                                               map_to_use_expand(cur_iuse, forcedFlags=True)
+                                       cur_use_map = map_to_use_expand(cur_use)
+                                       old_iuse_map = map_to_use_expand(old_iuse)
+                                       old_use_map = map_to_use_expand(old_use)
+
+                                       use_expand.sort()
+                                       use_expand.insert(0, "USE")
+                                       
+                                       for key in use_expand:
+                                               if key in use_expand_hidden:
+                                                       continue
+                                               verboseadd += create_use_string(key.upper(),
+                                                       cur_iuse_map[key], iuse_forced[key],
+                                                       cur_use_map[key], old_iuse_map[key],
+                                                       old_use_map[key], is_new,
+                                                       reinst_flags_map.get(key))
+
+                               if verbosity == 3:
+                                       # size verbose
+                                       mysize=0
+                                       if pkg_type == "ebuild" and pkg_merge:
+                                               try:
+                                                       myfilesdict = portdb.getfetchsizes(pkg_key,
+                                                               useflags=pkg_use, debug=self.edebug)
+                                               except portage.exception.InvalidDependString, e:
+                                                       src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
+                                                       show_invalid_depstring_notice(x, src_uri, str(e))
+                                                       del e
+                                                       return 1
+                                               if myfilesdict is None:
+                                                       myfilesdict="[empty/missing/bad digest]"
+                                               else:
+                                                       for myfetchfile in myfilesdict:
+                                                               if myfetchfile not in myfetchlist:
+                                                                       mysize+=myfilesdict[myfetchfile]
+                                                                       myfetchlist.append(myfetchfile)
+                                                       if ordered:
+                                                               counters.totalsize += mysize
+                                               verboseadd += format_size(mysize)
+
+                                       # overlay verbose
+                                       # assign index for a previous version in the same slot
+                                       has_previous = False
+                                       repo_name_prev = None
+                                       slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
+                                               metadata["SLOT"])
+                                       slot_matches = vardb.match(slot_atom)
+                                       if slot_matches:
+                                               has_previous = True
+                                               repo_name_prev = vardb.aux_get(slot_matches[0],
+                                                       ["repository"])[0]
+
+                                       # now use the data to generate output
+                                       if pkg.installed or not has_previous:
+                                               repoadd = repo_display.repoStr(repo_path_real)
+                                       else:
+                                               repo_path_prev = None
+                                               if repo_name_prev:
+                                                       repo_path_prev = portdb.getRepositoryPath(
+                                                               repo_name_prev)
+                                               if repo_path_prev == repo_path_real:
+                                                       repoadd = repo_display.repoStr(repo_path_real)
+                                               else:
+                                                       repoadd = "%s=>%s" % (
+                                                               repo_display.repoStr(repo_path_prev),
+                                                               repo_display.repoStr(repo_path_real))
+                                       if repoadd:
+                                               repoadd_set.add(repoadd)
+
+                               xs = [portage.cpv_getkey(pkg_key)] + \
+                                       list(portage.catpkgsplit(pkg_key)[2:])
+                               if xs[2] == "r0":
+                                       xs[2] = ""
+                               else:
+                                       xs[2] = "-" + xs[2]
+
+                               mywidth = 130
+                               if "COLUMNWIDTH" in self.settings:
+                                       try:
+                                               mywidth = int(self.settings["COLUMNWIDTH"])
+                                       except ValueError, e:
+                                               portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+                                               portage.writemsg(
+                                                       "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
+                                                       self.settings["COLUMNWIDTH"], noiselevel=-1)
+                                               del e
+                               oldlp = mywidth - 30
+                               newlp = oldlp - 30
+
+                               # Convert myoldbest from a list to a string.
+                               if not myoldbest:
+                                       myoldbest = ""
+                               else:
+                                       for pos, key in enumerate(myoldbest):
+                                               key = portage.catpkgsplit(key)[2] + \
+                                                       "-" + portage.catpkgsplit(key)[3]
+                                               if key[-3:] == "-r0":
+                                                       key = key[:-3]
+                                               myoldbest[pos] = key
+                                       myoldbest = blue("["+", ".join(myoldbest)+"]")
+
+                               pkg_cp = xs[0]
+                               root_config = self.roots[myroot]
+                               system_set = root_config.sets["system"]
+                               world_set  = root_config.sets["world"]
+
+                               pkg_system = False
+                               pkg_world = False
+                               try:
+                                       pkg_system = system_set.findAtomForPackage(pkg)
+                                       pkg_world  = world_set.findAtomForPackage(pkg)
+                                       if not (oneshot or pkg_world) and \
+                                               myroot == self.target_root and \
+                                               favorites_set.findAtomForPackage(pkg):
+                                               # Maybe it will be added to world now.
+                                               if create_world_atom(pkg, favorites_set, root_config):
+                                                       pkg_world = True
+                               except portage.exception.InvalidDependString:
+                                       # This is reported elsewhere if relevant.
+                                       pass
+
+                               def pkgprint(pkg_str):
+                                       if pkg_merge:
+                                               if pkg_system:
+                                                       return colorize("PKG_MERGE_SYSTEM", pkg_str)
+                                               elif pkg_world:
+                                                       return colorize("PKG_MERGE_WORLD", pkg_str)
+                                               else:
+                                                       return colorize("PKG_MERGE", pkg_str)
+                                       elif pkg_status == "uninstall":
+                                               return colorize("PKG_UNINSTALL", pkg_str)
+                                       else:
+                                               if pkg_system:
+                                                       return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
+                                               elif pkg_world:
+                                                       return colorize("PKG_NOMERGE_WORLD", pkg_str)
+                                               else:
+                                                       return colorize("PKG_NOMERGE", pkg_str)
+
+                               try:
+                                       properties = flatten(use_reduce(paren_reduce(
+                                               pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
+                               except portage.exception.InvalidDependString, e:
+                                       if not pkg.installed:
+                                               show_invalid_depstring_notice(pkg,
+                                                       pkg.metadata["PROPERTIES"], str(e))
+                                               del e
+                                               return 1
+                                       properties = []
+                               interactive = "interactive" in properties
+                               if interactive and pkg.operation == "merge":
+                                       addl = colorize("WARN", "I") + addl[1:]
+                                       if ordered:
+                                               counters.interactive += 1
+
+                               if x[1]!="/":
+                                       if myoldbest:
+                                               myoldbest +=" "
+                                       if "--columns" in self.myopts:
+                                               if "--quiet" in self.myopts:
+                                                       myprint=addl+" "+indent+pkgprint(pkg_cp)
+                                                       myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
+                                                       myprint=myprint+myoldbest
+                                                       myprint=myprint+darkgreen("to "+x[1])
+                                                       verboseadd = None
+                                               else:
+                                                       if not pkg_merge:
+                                                               myprint = "[%s] %s%s" % \
+                                                                       (pkgprint(pkg_status.ljust(13)),
+                                                                       indent, pkgprint(pkg.cp))
+                                                       else:
+                                                               myprint = "[%s %s] %s%s" % \
+                                                                       (pkgprint(pkg.type_name), addl,
+                                                                       indent, pkgprint(pkg.cp))
+                                                       if (newlp-nc_len(myprint)) > 0:
+                                                               myprint=myprint+(" "*(newlp-nc_len(myprint)))
+                                                       myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
+                                                       if (oldlp-nc_len(myprint)) > 0:
+                                                               myprint=myprint+" "*(oldlp-nc_len(myprint))
+                                                       myprint=myprint+myoldbest
+                                                       myprint += darkgreen("to " + pkg.root)
+                                       else:
+                                               if not pkg_merge:
+                                                       myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
+                                               else:
+                                                       myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
+                                               myprint += indent + pkgprint(pkg_key) + " " + \
+                                                       myoldbest + darkgreen("to " + myroot)
+                               else:
+                                       if "--columns" in self.myopts:
+                                               if "--quiet" in self.myopts:
+                                                       myprint=addl+" "+indent+pkgprint(pkg_cp)
+                                                       myprint=myprint+" "+green(xs[1]+xs[2])+" "
+                                                       myprint=myprint+myoldbest
+                                                       verboseadd = None
+                                               else:
+                                                       if not pkg_merge:
+                                                               myprint = "[%s] %s%s" % \
+                                                                       (pkgprint(pkg_status.ljust(13)),
+                                                                       indent, pkgprint(pkg.cp))
+                                                       else:
+                                                               myprint = "[%s %s] %s%s" % \
+                                                                       (pkgprint(pkg.type_name), addl,
+                                                                       indent, pkgprint(pkg.cp))
+                                                       if (newlp-nc_len(myprint)) > 0:
+                                                               myprint=myprint+(" "*(newlp-nc_len(myprint)))
+                                                       myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
+                                                       if (oldlp-nc_len(myprint)) > 0:
+                                                               myprint=myprint+(" "*(oldlp-nc_len(myprint)))
+                                                       myprint += myoldbest
+                                       else:
+                                               if not pkg_merge:
+                                                       myprint = "[%s] %s%s %s" % \
+                                                               (pkgprint(pkg_status.ljust(13)),
+                                                               indent, pkgprint(pkg.cpv),
+                                                               myoldbest)
+                                               else:
+                                                       myprint = "[%s %s] %s%s %s" % \
+                                                               (pkgprint(pkg_type), addl, indent,
+                                                               pkgprint(pkg.cpv), myoldbest)
+
+                               if columns and pkg.operation == "uninstall":
+                                       continue
+                               p.append((myprint, verboseadd, repoadd))
+
+                               if "--tree" not in self.myopts and \
+                                       "--quiet" not in self.myopts and \
+                                       not self._opts_no_restart.intersection(self.myopts) and \
+                                       pkg.root == self._running_root.root and \
+                                       portage.match_from_list(
+                                       portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
+                                       not vardb.cpv_exists(pkg.cpv) and \
+                                       "--quiet" not in self.myopts:
+                                               if mylist_index < len(mylist) - 1:
+                                                       p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
+                                                       p.append(colorize("WARN", "    then resume the merge."))
+
+               out = sys.stdout
+               show_repos = repoadd_set and repoadd_set != set(["0"])
+
+               for x in p:
+                       if isinstance(x, basestring):
+                               out.write("%s\n" % (x,))
+                               continue
+
+                       myprint, verboseadd, repoadd = x
+
+                       if verboseadd:
+                               myprint += " " + verboseadd
+
+                       if show_repos and repoadd:
+                               myprint += " " + teal("[%s]" % repoadd)
+
+                       out.write("%s\n" % (myprint,))
+
+               for x in blockers:
+                       print x
+
+               if verbosity == 3:
+                       print
+                       print counters
+                       if show_repos:
+                               sys.stdout.write(str(repo_display))
+
+               if "--changelog" in self.myopts:
+                       print
+                       for revision,text in changelogs:
+                               print bold('*'+revision)
+                               sys.stdout.write(text)
+
+               sys.stdout.flush()
+               return os.EX_OK
+
+       def display_problems(self):
+               """
+               Display problems with the dependency graph such as slot collisions.
+               This is called internally by display() to show the problems _after_
+               the merge list where it is most likely to be seen, but if display()
+               is not going to be called then this method should be called explicitly
+               to ensure that the user is notified of problems with the graph.
+
+               All output goes to stderr, except for unsatisfied dependencies which
+               go to stdout for parsing by programs such as autounmask.
+               """
+
+               # Note that show_masked_packages() sends it's output to
+               # stdout, and some programs such as autounmask parse the
+               # output in cases when emerge bails out. However, when
+               # show_masked_packages() is called for installed packages
+               # here, the message is a warning that is more appropriate
+               # to send to stderr, so temporarily redirect stdout to
+               # stderr. TODO: Fix output code so there's a cleaner way
+               # to redirect everything to stderr.
+               sys.stdout.flush()
+               sys.stderr.flush()
+               stdout = sys.stdout
+               try:
+                       sys.stdout = sys.stderr
+                       self._display_problems()
+               finally:
+                       sys.stdout = stdout
+                       sys.stdout.flush()
+                       sys.stderr.flush()
+
+               # This goes to stdout for parsing by programs like autounmask.
+               for pargs, kwargs in self._unsatisfied_deps_for_display:
+                       self._show_unsatisfied_dep(*pargs, **kwargs)
+
+       def _display_problems(self):
+               if self._circular_deps_for_display is not None:
+                       self._show_circular_deps(
+                               self._circular_deps_for_display)
+
+               # The user is only notified of a slot conflict if
+               # there are no unresolvable blocker conflicts.
+               if self._unsatisfied_blockers_for_display is not None:
+                       self._show_unsatisfied_blockers(
+                               self._unsatisfied_blockers_for_display)
+               else:
+                       self._show_slot_collision_notice()
+
+               # TODO: Add generic support for "set problem" handlers so that
+               # the below warnings aren't special cases for world only.
+
+               if self._missing_args:
+                       world_problems = False
+                       if "world" in self._sets:
+                               # Filter out indirect members of world (from nested sets)
+                               # since only direct members of world are desired here.
+                               world_set = self.roots[self.target_root].sets["world"]
+                               for arg, atom in self._missing_args:
+                                       if arg.name == "world" and atom in world_set:
+                                               world_problems = True
+                                               break
+
+                       if world_problems:
+                               sys.stderr.write("\n!!! Problems have been " + \
+                                       "detected with your world file\n")
+                               sys.stderr.write("!!! Please run " + \
+                                       green("emaint --check world")+"\n\n")
+
+               if self._missing_args:
+                       sys.stderr.write("\n" + colorize("BAD", "!!!") + \
+                               " Ebuilds for the following packages are either all\n")
+                       sys.stderr.write(colorize("BAD", "!!!") + \
+                               " masked or don't exist:\n")
+                       sys.stderr.write(" ".join(str(atom) for arg, atom in \
+                               self._missing_args) + "\n")
+
+               if self._pprovided_args:
+                       arg_refs = {}
+                       for arg, atom in self._pprovided_args:
+                               if isinstance(arg, SetArg):
+                                       parent = arg.name
+                                       arg_atom = (atom, atom)
+                               else:
+                                       parent = "args"
+                                       arg_atom = (arg.arg, atom)
+                               refs = arg_refs.setdefault(arg_atom, [])
+                               if parent not in refs:
+                                       refs.append(parent)
+                       msg = []
+                       msg.append(bad("\nWARNING: "))
+                       if len(self._pprovided_args) > 1:
+                               msg.append("Requested packages will not be " + \
+                                       "merged because they are listed in\n")
+                       else:
+                               msg.append("A requested package will not be " + \
+                                       "merged because it is listed in\n")
+                       msg.append("package.provided:\n\n")
+                       problems_sets = set()
+                       for (arg, atom), refs in arg_refs.iteritems():
+                               ref_string = ""
+                               if refs:
+                                       problems_sets.update(refs)
+                                       refs.sort()
+                                       ref_string = ", ".join(["'%s'" % name for name in refs])
+                                       ref_string = " pulled in by " + ref_string
+                               msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
+                       msg.append("\n")
+                       if "world" in problems_sets:
+                               msg.append("This problem can be solved in one of the following ways:\n\n")
+                               msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
+                               msg.append("  B) Uninstall offending packages (cleans them from world).\n")
+                               msg.append("  C) Remove offending entries from package.provided.\n\n")
+                               msg.append("The best course of action depends on the reason that an offending\n")
+                               msg.append("package.provided entry exists.\n\n")
+                       sys.stderr.write("".join(msg))
+
+               masked_packages = []
+               for pkg in self._masked_installed:
+                       root_config = pkg.root_config
+                       pkgsettings = self.pkgsettings[pkg.root]
+                       mreasons = get_masking_status(pkg, pkgsettings, root_config)
+                       masked_packages.append((root_config, pkgsettings,
+                               pkg.cpv, pkg.metadata, mreasons))
+               if masked_packages:
+                       sys.stderr.write("\n" + colorize("BAD", "!!!") + \
+                               " The following installed packages are masked:\n")
+                       show_masked_packages(masked_packages)
+                       show_mask_docs()
+                       print
+
+       def calc_changelog(self,ebuildpath,current,next):
+               if ebuildpath == None or not os.path.exists(ebuildpath):
+                       return []
+               current = '-'.join(portage.catpkgsplit(current)[1:])
+               if current.endswith('-r0'):
+                       current = current[:-3]
+               next = '-'.join(portage.catpkgsplit(next)[1:])
+               if next.endswith('-r0'):
+                       next = next[:-3]
+               changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
+               try:
+                       changelog = open(changelogpath).read()
+               except SystemExit, e:
+                       raise # Needed else can't exit
+               except:
+                       return []
+               divisions = self.find_changelog_tags(changelog)
+               #print 'XX from',current,'to',next
+               #for div,text in divisions: print 'XX',div
+               # skip entries for all revisions above the one we are about to emerge
+               for i in range(len(divisions)):
+                       if divisions[i][0]==next:
+                               divisions = divisions[i:]
+                               break
+               # find out how many entries we are going to display
+               for i in range(len(divisions)):
+                       if divisions[i][0]==current:
+                               divisions = divisions[:i]
+                               break
+               else:
+                   # couldnt find the current revision in the list. display nothing
+                       return []
+               return divisions
+
+       def find_changelog_tags(self,changelog):
+               divs = []
+               release = None
+               while 1:
+                       match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
+                       if match is None:
+                               if release is not None:
+                                       divs.append((release,changelog))
+                               return divs
+                       if release is not None:
+                               divs.append((release,changelog[:match.start()]))
+                       changelog = changelog[match.end():]
+                       release = match.group(1)
+                       if release.endswith('.ebuild'):
+                               release = release[:-7]
+                       if release.endswith('-r0'):
+                               release = release[:-3]
+
+       def saveNomergeFavorites(self):
+               """Find atoms in favorites that are not in the mergelist and add them
+               to the world file if necessary."""
+               for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
+                       "--oneshot", "--onlydeps", "--pretend"):
+                       if x in self.myopts:
+                               return
+               root_config = self.roots[self.target_root]
+               world_set = root_config.sets["world"]
+
+               world_locked = False
+               if hasattr(world_set, "lock"):
+                       world_set.lock()
+                       world_locked = True
+
+               if hasattr(world_set, "load"):
+                       world_set.load() # maybe it's changed on disk
+
+               args_set = self._sets["args"]
+               portdb = self.trees[self.target_root]["porttree"].dbapi
+               added_favorites = set()
+               for x in self._set_nodes:
+                       pkg_type, root, pkg_key, pkg_status = x
+                       if pkg_status != "nomerge":
+                               continue
+
+                       try:
+                               myfavkey = create_world_atom(x, args_set, root_config)
+                               if myfavkey:
+                                       if myfavkey in added_favorites:
+                                               continue
+                                       added_favorites.add(myfavkey)
+                       except portage.exception.InvalidDependString, e:
+                               writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
+                                       (pkg_key, str(e)), noiselevel=-1)
+                               writemsg("!!! see '%s'\n\n" % os.path.join(
+                                       root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
+                               del e
+               all_added = []
+               for k in self._sets:
+                       if k in ("args", "world") or not root_config.sets[k].world_candidate:
+                               continue
+                       s = SETPREFIX + k
+                       if s in world_set:
+                               continue
+                       all_added.append(SETPREFIX + k)
+               all_added.extend(added_favorites)
+               all_added.sort()
+               for a in all_added:
+                       print ">>> Recording %s in \"world\" favorites file..." % \
+                               colorize("INFORM", str(a))
+               if all_added:
+                       world_set.update(all_added)
+
+               if world_locked:
+                       world_set.unlock()
+
+       def loadResumeCommand(self, resume_data, skip_masked=True,
+               skip_missing=True):
+               """
+               Add a resume command to the graph and validate it in the process.  This
+               will raise a PackageNotFound exception if a package is not available.
+               """
+
+               if not isinstance(resume_data, dict):
+                       return False
+
+               mergelist = resume_data.get("mergelist")
+               if not isinstance(mergelist, list):
+                       mergelist = []
+
+               fakedb = self.mydbapi
+               trees = self.trees
+               serialized_tasks = []
+               masked_tasks = []
+               for x in mergelist:
+                       if not (isinstance(x, list) and len(x) == 4):
+                               continue
+                       pkg_type, myroot, pkg_key, action = x
+                       if pkg_type not in self.pkg_tree_map:
+                               continue
+                       if action != "merge":
+                               continue
+                       tree_type = self.pkg_tree_map[pkg_type]
+                       mydb = trees[myroot][tree_type].dbapi
+                       db_keys = list(self._trees_orig[myroot][
+                               tree_type].dbapi._aux_cache_keys)
+                       try:
+                               metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
+                       except KeyError:
+                               # It does no exist or it is corrupt.
+                               if action == "uninstall":
+                                       continue
+                               if skip_missing:
+                                       # TODO: log these somewhere
+                                       continue
+                               raise portage.exception.PackageNotFound(pkg_key)
+                       installed = action == "uninstall"
+                       built = pkg_type != "ebuild"
+                       root_config = self.roots[myroot]
+                       pkg = Package(built=built, cpv=pkg_key,
+                               installed=installed, metadata=metadata,
+                               operation=action, root_config=root_config,
+                               type_name=pkg_type)
+                       if pkg_type == "ebuild":
+                               pkgsettings = self.pkgsettings[myroot]
+                               pkgsettings.setcpv(pkg)
+                               pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
+                               pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
+                       self._pkg_cache[pkg] = pkg
+
+                       root_config = self.roots[pkg.root]
+                       if "merge" == pkg.operation and \
+                               not visible(root_config.settings, pkg):
+                               if skip_masked:
+                                       masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
+                               else:
+                                       self._unsatisfied_deps_for_display.append(
+                                               ((pkg.root, "="+pkg.cpv), {"myparent":None}))
+
+                       fakedb[myroot].cpv_inject(pkg)
+                       serialized_tasks.append(pkg)
+                       self.spinner.update()
+
+               if self._unsatisfied_deps_for_display:
+                       return False
+
+               if not serialized_tasks or "--nodeps" in self.myopts:
+                       self._serialized_tasks_cache = serialized_tasks
+                       self._scheduler_graph = self.digraph
+               else:
+                       self._select_package = self._select_pkg_from_graph
+                       self.myparams.add("selective")
+                       # Always traverse deep dependencies in order to account for
+                       # potentially unsatisfied dependencies of installed packages.
+                       # This is necessary for correct --keep-going or --resume operation
+                       # in case a package from a group of circularly dependent packages
+                       # fails. In this case, a package which has recently been installed
+                       # may have an unsatisfied circular dependency (pulled in by
+                       # PDEPEND, for example). So, even though a package is already
+                       # installed, it may not have all of it's dependencies satisfied, so
+                       # it may not be usable. If such a package is in the subgraph of
+                       # deep depenedencies of a scheduled build, that build needs to
+                       # be cancelled. In order for this type of situation to be
+                       # recognized, deep traversal of dependencies is required.
+                       self.myparams.add("deep")
+
+                       favorites = resume_data.get("favorites")
+                       args_set = self._sets["args"]
+                       if isinstance(favorites, list):
+                               args = self._load_favorites(favorites)
+                       else:
+                               args = []
+
+                       for task in serialized_tasks:
+                               if isinstance(task, Package) and \
+                                       task.operation == "merge":
+                                       if not self._add_pkg(task, None):
+                                               return False
+
+                       # Packages for argument atoms need to be explicitly
+                       # added via _add_pkg() so that they are included in the
+                       # digraph (needed at least for --tree display).
+                       for arg in args:
+                               for atom in arg.set:
+                                       pkg, existing_node = self._select_package(
+                                               arg.root_config.root, atom)
+                                       if existing_node is None and \
+                                               pkg is not None:
+                                               if not self._add_pkg(pkg, Dependency(atom=atom,
+                                                       root=pkg.root, parent=arg)):
+                                                       return False
+
+                       # Allow unsatisfied deps here to avoid showing a masking
+                       # message for an unsatisfied dep that isn't necessarily
+                       # masked.
+                       if not self._create_graph(allow_unsatisfied=True):
+                               return False
+
+                       unsatisfied_deps = []
+                       for dep in self._unsatisfied_deps:
+                               if not isinstance(dep.parent, Package):
+                                       continue
+                               if dep.parent.operation == "merge":
+                                       unsatisfied_deps.append(dep)
+                                       continue
+
+                               # For unsatisfied deps of installed packages, only account for
+                               # them if they are in the subgraph of dependencies of a package
+                               # which is scheduled to be installed.
+                               unsatisfied_install = False
+                               traversed = set()
+                               dep_stack = self.digraph.parent_nodes(dep.parent)
+                               while dep_stack:
+                                       node = dep_stack.pop()
+                                       if not isinstance(node, Package):
+                                               continue
+                                       if node.operation == "merge":
+                                               unsatisfied_install = True
+                                               break
+                                       if node in traversed:
+                                               continue
+                                       traversed.add(node)
+                                       dep_stack.extend(self.digraph.parent_nodes(node))
+
+                               if unsatisfied_install:
+                                       unsatisfied_deps.append(dep)
+
+                       if masked_tasks or unsatisfied_deps:
+                               # This probably means that a required package
+                               # was dropped via --skipfirst. It makes the
+                               # resume list invalid, so convert it to a
+                               # UnsatisfiedResumeDep exception.
+                               raise self.UnsatisfiedResumeDep(self,
+                                       masked_tasks + unsatisfied_deps)
+                       self._serialized_tasks_cache = None
+                       try:
+                               self.altlist()
+                       except self._unknown_internal_error:
+                               return False
+
+               return True
+
+       def _load_favorites(self, favorites):
+               """
+               Use a list of favorites to resume state from a
+               previous select_files() call. This creates similar
+               DependencyArg instances to those that would have
+               been created by the original select_files() call.
+               This allows Package instances to be matched with
+               DependencyArg instances during graph creation.
+               """
+               root_config = self.roots[self.target_root]
+               getSetAtoms = root_config.setconfig.getSetAtoms
+               sets = root_config.sets
+               args = []
+               for x in favorites:
+                       if not isinstance(x, basestring):
+                               continue
+                       if x in ("system", "world"):
+                               x = SETPREFIX + x
+                       if x.startswith(SETPREFIX):
+                               s = x[len(SETPREFIX):]
+                               if s not in sets:
+                                       continue
+                               if s in self._sets:
+                                       continue
+                               # Recursively expand sets so that containment tests in
+                               # self._get_parent_sets() properly match atoms in nested
+                               # sets (like if world contains system).
+                               expanded_set = InternalPackageSet(
+                                       initial_atoms=getSetAtoms(s))
+                               self._sets[s] = expanded_set
+                               args.append(SetArg(arg=x, set=expanded_set,
+                                       root_config=root_config))
+                       else:
+                               if not portage.isvalidatom(x):
+                                       continue
+                               args.append(AtomArg(arg=x, atom=x,
+                                       root_config=root_config))
+
+               self._set_args(args)
+               return args
+
+       class UnsatisfiedResumeDep(portage.exception.PortageException):
+               """
+               A dependency of a resume list is not installed. This
+               can occur when a required package is dropped from the
+               merge list via --skipfirst.
+               """
+               def __init__(self, depgraph, value):
+                       portage.exception.PortageException.__init__(self, value)
+                       self.depgraph = depgraph
+
+       class _internal_exception(portage.exception.PortageException):
+               def __init__(self, value=""):
+                       portage.exception.PortageException.__init__(self, value)
+
+       class _unknown_internal_error(_internal_exception):
+               """
+               Used by the depgraph internally to terminate graph creation.
+               The specific reason for the failure should have been dumped
+               to stderr, unfortunately, the exact reason for the failure
+               may not be known.
+               """
+
+       class _serialize_tasks_retry(_internal_exception):
+               """
+               This is raised by the _serialize_tasks() method when it needs to
+               be called again for some reason. The only case that it's currently
+               used for is when neglected dependencies need to be added to the
+               graph in order to avoid making a potentially unsafe decision.
+               """
+
+       class _dep_check_composite_db(portage.dbapi):
+               """
+               A dbapi-like interface that is optimized for use in dep_check() calls.
+               This is built on top of the existing depgraph package selection logic.
+               Some packages that have been added to the graph may be masked from this
+               view in order to influence the atom preference selection that occurs
+               via dep_check().
+               """
+               def __init__(self, depgraph, root):
+                       portage.dbapi.__init__(self)
+                       self._depgraph = depgraph
+                       self._root = root
+                       self._match_cache = {}
+                       self._cpv_pkg_map = {}
+
+               def _clear_cache(self):
+                       self._match_cache.clear()
+                       self._cpv_pkg_map.clear()
+
+               def match(self, atom):
+                       ret = self._match_cache.get(atom)
+                       if ret is not None:
+                               return ret[:]
+                       orig_atom = atom
+                       if "/" not in atom:
+                               atom = self._dep_expand(atom)
+                       pkg, existing = self._depgraph._select_package(self._root, atom)
+                       if not pkg:
+                               ret = []
+                       else:
+                               # Return the highest available from select_package() as well as
+                               # any matching slots in the graph db.
+                               slots = set()
+                               slots.add(pkg.metadata["SLOT"])
+                               atom_cp = portage.dep_getkey(atom)
+                               if pkg.cp.startswith("virtual/"):
+                                       # For new-style virtual lookahead that occurs inside
+                                       # dep_check(), examine all slots. This is needed
+                                       # so that newer slots will not unnecessarily be pulled in
+                                       # when a satisfying lower slot is already installed. For
+                                       # example, if virtual/jdk-1.4 is satisfied via kaffe then
+                                       # there's no need to pull in a newer slot to satisfy a
+                                       # virtual/jdk dependency.
+                                       for db, pkg_type, built, installed, db_keys in \
+                                               self._depgraph._filtered_trees[self._root]["dbs"]:
+                                               for cpv in db.match(atom):
+                                                       if portage.cpv_getkey(cpv) != pkg.cp:
+                                                               continue
+                                                       slots.add(db.aux_get(cpv, ["SLOT"])[0])
+                               ret = []
+                               if self._visible(pkg):
+                                       self._cpv_pkg_map[pkg.cpv] = pkg
+                                       ret.append(pkg.cpv)
+                               slots.remove(pkg.metadata["SLOT"])
+                               while slots:
+                                       slot_atom = "%s:%s" % (atom_cp, slots.pop())
+                                       pkg, existing = self._depgraph._select_package(
+                                               self._root, slot_atom)
+                                       if not pkg:
+                                               continue
+                                       if not self._visible(pkg):
+                                               continue
+                                       self._cpv_pkg_map[pkg.cpv] = pkg
+                                       ret.append(pkg.cpv)
+                               if ret:
+                                       self._cpv_sort_ascending(ret)
+                       self._match_cache[orig_atom] = ret
+                       return ret[:]
+
+               def _visible(self, pkg):
+                       if pkg.installed and "selective" not in self._depgraph.myparams:
+                               try:
+                                       arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
+                               except (StopIteration, portage.exception.InvalidDependString):
+                                       arg = None
+                               if arg:
+                                       return False
+                       if pkg.installed:
+                               try:
+                                       if not visible(
+                                               self._depgraph.pkgsettings[pkg.root], pkg):
+                                               return False
+                               except portage.exception.InvalidDependString:
+                                       pass
+                       in_graph = self._depgraph._slot_pkg_map[
+                               self._root].get(pkg.slot_atom)
+                       if in_graph is None:
+                               # Mask choices for packages which are not the highest visible
+                               # version within their slot (since they usually trigger slot
+                               # conflicts).
+                               highest_visible, in_graph = self._depgraph._select_package(
+                                       self._root, pkg.slot_atom)
+                               if pkg != highest_visible:
+                                       return False
+                       elif in_graph != pkg:
+                               # Mask choices for packages that would trigger a slot
+                               # conflict with a previously selected package.
+                               return False
+                       return True
+
+               def _dep_expand(self, atom):
+                       """
+                       This is only needed for old installed packages that may
+                       contain atoms that are not fully qualified with a specific
+                       category. Emulate the cpv_expand() function that's used by
+                       dbapi.match() in cases like this. If there are multiple
+                       matches, it's often due to a new-style virtual that has
+                       been added, so try to filter those out to avoid raising
+                       a ValueError.
+                       """
+                       root_config = self._depgraph.roots[self._root]
+                       orig_atom = atom
+                       expanded_atoms = self._depgraph._dep_expand(root_config, atom)
+                       if len(expanded_atoms) > 1:
+                               non_virtual_atoms = []
+                               for x in expanded_atoms:
+                                       if not portage.dep_getkey(x).startswith("virtual/"):
+                                               non_virtual_atoms.append(x)
+                               if len(non_virtual_atoms) == 1:
+                                       expanded_atoms = non_virtual_atoms
+                       if len(expanded_atoms) > 1:
+                               # compatible with portage.cpv_expand()
+                               raise portage.exception.AmbiguousPackageName(
+                                       [portage.dep_getkey(x) for x in expanded_atoms])
+                       if expanded_atoms:
+                               atom = expanded_atoms[0]
+                       else:
+                               null_atom = insert_category_into_atom(atom, "null")
+                               null_cp = portage.dep_getkey(null_atom)
+                               cat, atom_pn = portage.catsplit(null_cp)
+                               virts_p = root_config.settings.get_virts_p().get(atom_pn)
+                               if virts_p:
+                                       # Allow the resolver to choose which virtual.
+                                       atom = insert_category_into_atom(atom, "virtual")
+                               else:
+                                       atom = insert_category_into_atom(atom, "null")
+                       return atom
+
+               def aux_get(self, cpv, wants):
+                       metadata = self._cpv_pkg_map[cpv].metadata
+                       return [metadata.get(x, "") for x in wants]
+
+
+def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
+
+       if "--quiet" in myopts:
+               print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
+               print "!!! one of the following fully-qualified ebuild names instead:\n"
+               for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
+                       print "    " + colorize("INFORM", cp)
+               return
+
+       s = search(root_config, spinner, "--searchdesc" in myopts,
+               "--quiet" not in myopts, "--usepkg" in myopts,
+               "--usepkgonly" in myopts)
+       null_cp = portage.dep_getkey(insert_category_into_atom(
+               arg, "null"))
+       cat, atom_pn = portage.catsplit(null_cp)
+       s.searchkey = atom_pn
+       for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
+               s.addCP(cp)
+       s.output()
+       print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
+       print "!!! one of the above fully-qualified ebuild names instead.\n"
+
+def insert_category_into_atom(atom, category):
+       alphanum = re.search(r'\w', atom)
+       if alphanum:
+               ret = atom[:alphanum.start()] + "%s/" % category + \
+                       atom[alphanum.start():]
+       else:
+               ret = None
+       return ret
+
+def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
+       """
+       Construct a depgraph for the given resume list. This will raise
+       PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
+       @rtype: tuple
+       @returns: (success, depgraph, dropped_tasks)
+       """
+       skip_masked = True
+       skip_unsatisfied = True
+       mergelist = mtimedb["resume"]["mergelist"]
+       dropped_tasks = set()
+       while True:
+               mydepgraph = depgraph(settings, trees,
+                       myopts, myparams, spinner)
+               try:
+                       success = mydepgraph.loadResumeCommand(mtimedb["resume"],
+                               skip_masked=skip_masked)
+               except depgraph.UnsatisfiedResumeDep, e:
+                       if not skip_unsatisfied:
+                               raise
+
+                       graph = mydepgraph.digraph
+                       unsatisfied_parents = dict((dep.parent, dep.parent) \
+                               for dep in e.value)
+                       traversed_nodes = set()
+                       unsatisfied_stack = list(unsatisfied_parents)
+                       while unsatisfied_stack:
+                               pkg = unsatisfied_stack.pop()
+                               if pkg in traversed_nodes:
+                                       continue
+                               traversed_nodes.add(pkg)
+
+                               # If this package was pulled in by a parent
+                               # package scheduled for merge, removing this
+                               # package may cause the the parent package's
+                               # dependency to become unsatisfied.
+                               for parent_node in graph.parent_nodes(pkg):
+                                       if not isinstance(parent_node, Package) \
+                                               or parent_node.operation not in ("merge", "nomerge"):
+                                               continue
+                                       unsatisfied = \
+                                               graph.child_nodes(parent_node,
+                                               ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
+                                       if pkg in unsatisfied:
+                                               unsatisfied_parents[parent_node] = parent_node
+                                               unsatisfied_stack.append(parent_node)
+
+                       pruned_mergelist = []
+                       for x in mergelist:
+                               if isinstance(x, list) and \
+                                       tuple(x) not in unsatisfied_parents:
+                                       pruned_mergelist.append(x)
+
+                       # If the mergelist doesn't shrink then this loop is infinite.
+                       if len(pruned_mergelist) == len(mergelist):
+                               # This happens if a package can't be dropped because
+                               # it's already installed, but it has unsatisfied PDEPEND.
+                               raise
+                       mergelist[:] = pruned_mergelist
+
+                       # Exclude installed packages that have been removed from the graph due
+                       # to failure to build/install runtime dependencies after the dependent
+                       # package has already been installed.
+                       dropped_tasks.update(pkg for pkg in \
+                               unsatisfied_parents if pkg.operation != "nomerge")
+                       mydepgraph.break_refs(unsatisfied_parents)
+
+                       del e, graph, traversed_nodes, \
+                               unsatisfied_parents, unsatisfied_stack
+                       continue
+               else:
+                       break
+       return (success, mydepgraph, dropped_tasks)
+
+def get_mask_info(root_config, cpv, pkgsettings,
+       db, pkg_type, built, installed, db_keys):
+       eapi_masked = False
+       try:
+               metadata = dict(izip(db_keys,
+                       db.aux_get(cpv, db_keys)))
+       except KeyError:
+               metadata = None
+       if metadata and not built:
+               pkgsettings.setcpv(cpv, mydb=metadata)
+               metadata["USE"] = pkgsettings["PORTAGE_USE"]
+               metadata['CHOST'] = pkgsettings.get('CHOST', '')
+       if metadata is None:
+               mreasons = ["corruption"]
+       else:
+               eapi = metadata['EAPI']
+               if eapi[:1] == '-':
+                       eapi = eapi[1:]
+               if not portage.eapi_is_supported(eapi):
+                       mreasons = ['EAPI %s' % eapi]
+               else:
+                       pkg = Package(type_name=pkg_type, root_config=root_config,
+                               cpv=cpv, built=built, installed=installed, metadata=metadata)
+                       mreasons = get_masking_status(pkg, pkgsettings, root_config)
+       return metadata, mreasons
+
+def show_masked_packages(masked_packages):
+       shown_licenses = set()
+       shown_comments = set()
+       # Maybe there is both an ebuild and a binary. Only
+       # show one of them to avoid redundant appearance.
+       shown_cpvs = set()
+       have_eapi_mask = False
+       for (root_config, pkgsettings, cpv,
+               metadata, mreasons) in masked_packages:
+               if cpv in shown_cpvs:
+                       continue
+               shown_cpvs.add(cpv)
+               comment, filename = None, None
+               if "package.mask" in mreasons:
+                       comment, filename = \
+                               portage.getmaskingreason(
+                               cpv, metadata=metadata,
+                               settings=pkgsettings,
+                               portdb=root_config.trees["porttree"].dbapi,
+                               return_location=True)
+               missing_licenses = []
+               if metadata:
+                       if not portage.eapi_is_supported(metadata["EAPI"]):
+                               have_eapi_mask = True
+                       try:
+                               missing_licenses = \
+                                       pkgsettings._getMissingLicenses(
+                                               cpv, metadata)
+                       except portage.exception.InvalidDependString:
+                               # This will have already been reported
+                               # above via mreasons.
+                               pass
+
+               print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
+               if comment and comment not in shown_comments:
+                       print filename+":"
+                       print comment
+                       shown_comments.add(comment)
+               portdb = root_config.trees["porttree"].dbapi
+               for l in missing_licenses:
+                       l_path = portdb.findLicensePath(l)
+                       if l in shown_licenses:
+                               continue
+                       msg = ("A copy of the '%s' license" + \
+                       " is located at '%s'.") % (l, l_path)
+                       print msg
+                       print
+                       shown_licenses.add(l)
+       return have_eapi_mask
+
+def show_mask_docs():
+       print "For more information, see the MASKED PACKAGES section in the emerge"
+       print "man page or refer to the Gentoo Handbook."
+
+def filter_iuse_defaults(iuse):
+       for flag in iuse:
+               if flag.startswith("+") or flag.startswith("-"):
+                       yield flag[1:]
+               else:
+                       yield flag
+
+def show_blocker_docs_link():
+       print
+       print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
+       print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
+       print
+       print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
+       print
+
+def get_masking_status(pkg, pkgsettings, root_config):
+
+       mreasons = portage.getmaskingstatus(
+               pkg, settings=pkgsettings,
+               portdb=root_config.trees["porttree"].dbapi)
+
+       if not pkg.installed:
+               if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
+                       mreasons.append("CHOST: %s" % \
+                               pkg.metadata["CHOST"])
+
+       if not pkg.metadata["SLOT"]:
+               mreasons.append("invalid: SLOT is undefined")
+
+       return mreasons
diff --git a/pym/_emerge/is_valid_package_atom.py b/pym/_emerge/is_valid_package_atom.py
new file mode 100644 (file)
index 0000000..bdfedc9
--- /dev/null
@@ -0,0 +1,16 @@
+import re
+
+try:
+       import portage
+except ImportError:
+       from os import path as osp
+       import sys
+       sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+       import portage
+
+def is_valid_package_atom(x):
+       if "/" not in x:
+               alphanum = re.search(r'\w', x)
+               if alphanum:
+                       x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
+       return portage.isvalidatom(x)