Scheduler: terminate _merge_wait_queue
[portage.git] / pym / _emerge / Scheduler.py
index e94b0461bb135238bb2e38ba4fc6afdf4e51c162..b961e83c0e98d7e1321c483e0042c046f99a31e5 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import print_function
@@ -7,10 +7,12 @@ import gc
 import gzip
 import logging
 import shutil
+import signal
 import sys
 import tempfile
 import textwrap
 import time
+import warnings
 import weakref
 import zlib
 
@@ -18,7 +20,7 @@ import portage
 from portage import StringIO
 from portage import os
 from portage import _encodings
-from portage import _unicode_encode
+from portage import _unicode_decode, _unicode_encode
 from portage.cache.mappings import slot_dict_class
 from portage.const import LIBC_PACKAGE_ATOM
 from portage.elog.messages import eerror
@@ -32,7 +34,9 @@ from portage.package.ebuild.digestcheck import digestcheck
 from portage.package.ebuild.digestgen import digestgen
 from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
 
+from _emerge.BinpkgFetcher import BinpkgFetcher
 from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
+from _emerge.BinpkgVerifier import BinpkgVerifier
 from _emerge.Blocker import Blocker
 from _emerge.BlockerDB import BlockerDB
 from _emerge.clear_caches import clear_caches
@@ -61,6 +65,9 @@ if sys.hexversion >= 0x3000000:
 
 class Scheduler(PollScheduler):
 
+       # max time between display status updates (milliseconds)
+       _max_display_latency = 3000
+
        _opts_ignore_blockers = \
                frozenset(["--buildpkgonly",
                "--fetchonly", "--fetch-all-uri",
@@ -137,17 +144,23 @@ class Scheduler(PollScheduler):
                        portage.exception.PortageException.__init__(self, value)
 
        def __init__(self, settings, trees, mtimedb, myopts,
-               spinner, mergelist, favorites, digraph):
+               spinner, mergelist=None, favorites=None, graph_config=None):
                PollScheduler.__init__(self)
+
+               if mergelist is not None:
+                       warnings.warn("The mergelist parameter of the " + \
+                               "_emerge.Scheduler constructor is now unused. Use " + \
+                               "the graph_config parameter instead.",
+                               DeprecationWarning, stacklevel=2)
+
                self.settings = settings
                self.target_root = settings["ROOT"]
                self.trees = trees
                self.myopts = myopts
                self._spinner = spinner
                self._mtimedb = mtimedb
-               self._mergelist = mergelist
                self._favorites = favorites
-               self._args_set = InternalPackageSet(favorites)
+               self._args_set = InternalPackageSet(favorites, allow_repo=True)
                self._build_opts = self._build_opts_class()
                for k in self._build_opts.__slots__:
                        setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
@@ -203,8 +216,6 @@ class Scheduler(PollScheduler):
                for root in self.trees:
                        self._config_pool[root] = []
 
-               self._init_installed_graph()
-
                fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
                        schedule=self._schedule_fetch)
                self._sched_iface = self._iface_class(
@@ -230,7 +241,8 @@ class Scheduler(PollScheduler):
                self._failed_pkgs_die_msgs = []
                self._post_mod_echo_msgs = []
                self._parallel_fetch = False
-               merge_count = len([x for x in mergelist \
+               self._init_graph(graph_config)
+               merge_count = len([x for x in self._mergelist \
                        if isinstance(x, Package) and x.operation == "merge"])
                self._pkg_count = self._pkg_count_class(
                        curval=0, maxval=merge_count)
@@ -244,8 +256,6 @@ class Scheduler(PollScheduler):
                self._job_delay_exp = 1.5
                self._previous_job_start_time = None
 
-               self._set_digraph(digraph)
-
                # This is used to memoize the _choose_pkg() result when
                # no packages can be chosen until one of the existing
                # jobs completes.
@@ -283,7 +293,26 @@ class Scheduler(PollScheduler):
                        self._running_portage = self._pkg(cpv, "installed",
                                self._running_root, installed=True)
 
-       def _init_installed_graph(self):
+       def _terminate_tasks(self):
+               self._status_display.quiet = True
+               # Remove running_tasks that have been added to queues but
+               # haven't been started yet, since we're going to discard
+               # them and their start/exit handlers won't be called.
+               for build in self._task_queues.jobs._task_queue:
+                       self._running_tasks.remove(build.pkg)
+               if self._merge_wait_queue:
+                       for merge in self._merge_wait_queue:
+                               self._running_tasks.remove(merge.merge.pkg)
+                       del self._merge_wait_queue[:]
+               for merge in self._task_queues.merge._task_queue:
+                       # Setup phases may be scheduled in this queue, but
+                       # we're only interested in the PackageMerge instances.
+                       if isinstance(merge, PackageMerge):
+                               self._running_tasks.remove(merge.merge.pkg)
+               for q in self._task_queues.values():
+                       q.clear()
+
+       def _init_graph(self, graph_config):
                """
                Initialization structures used for dependency calculations
                involving currently installed packages.
@@ -292,25 +321,72 @@ class Scheduler(PollScheduler):
                # that's updated incrementally with each upgrade/uninstall operation
                # This will be useful for making quick and safe decisions with respect
                # to aggressive parallelization discussed in bug #279623.
+               self._set_graph_config(graph_config)
                self._blocker_db = {}
                for root in self.trees:
-                       self._blocker_db[root] = \
-                               BlockerDB(FakeVartree(self.trees[root]["root_config"]))
+                       if graph_config is None:
+                               fake_vartree = FakeVartree(self.trees[root]["root_config"],
+                                       pkg_cache=self._pkg_cache)
+                       else:
+                               fake_vartree = graph_config.trees[root]['vartree']
+                       self._blocker_db[root] = BlockerDB(fake_vartree)
 
-       def _destroy_installed_graph(self):
+       def _destroy_graph(self):
                """
-               Use this to free memory before calling _calc_resume_list().
-               After _calc_resume_list(), the _init_installed_graph() and
-               _set_digraph() methods need to be called in order to
-               re-generate the structures that this method destroys. 
+               Use this to free memory at the beginning of _calc_resume_list().
+               After _calc_resume_list(), the _init_graph() method
+               must to be called in order to re-generate the structures that
+               this method destroys. 
                """
                self._blocker_db = None
-               self._set_digraph(None)
+               self._set_graph_config(None)
                gc.collect()
 
        def _poll(self, timeout=None):
+
                self._schedule()
-               PollScheduler._poll(self, timeout=timeout)
+
+               if timeout is None:
+                       while True:
+                               if not self._poll_event_handlers:
+                                       self._schedule()
+                                       if not self._poll_event_handlers:
+                                               raise StopIteration(
+                                                       "timeout is None and there are no poll() event handlers")
+                               previous_count = len(self._poll_event_queue)
+                               PollScheduler._poll(self, timeout=self._max_display_latency)
+                               self._status_display.display()
+                               if previous_count != len(self._poll_event_queue):
+                                       break
+
+               elif timeout <= self._max_display_latency:
+                       PollScheduler._poll(self, timeout=timeout)
+                       if timeout == 0:
+                               # The display is updated by _schedule() above, so it would be
+                               # redundant to update it here when timeout is 0.
+                               pass
+                       else:
+                               self._status_display.display()
+
+               else:
+                       remaining_timeout = timeout
+                       start_time = time.time()
+                       while True:
+                               previous_count = len(self._poll_event_queue)
+                               PollScheduler._poll(self,
+                                       timeout=min(self._max_display_latency, remaining_timeout))
+                               self._status_display.display()
+                               if previous_count != len(self._poll_event_queue):
+                                       break
+                               elapsed_time = time.time() - start_time
+                               if elapsed_time < 0:
+                                       # The system clock has changed such that start_time
+                                       # is now in the future, so just assume that the
+                                       # timeout has already elapsed.
+                                       break
+                               remaining_timeout = timeout - 1000 * elapsed_time
+                               if remaining_timeout <= 0:
+                                       break
 
        def _set_max_jobs(self, max_jobs):
                self._max_jobs = max_jobs
@@ -374,15 +450,32 @@ class Scheduler(PollScheduler):
                                interactive_tasks.append(task)
                return interactive_tasks
 
-       def _set_digraph(self, digraph):
+       def _set_graph_config(self, graph_config):
+
+               if graph_config is None:
+                       self._graph_config = None
+                       self._pkg_cache = {}
+                       self._digraph = None
+                       self._mergelist = []
+                       self._deep_system_deps.clear()
+                       return
+
+               self._graph_config = graph_config
+               self._pkg_cache = graph_config.pkg_cache
+               self._digraph = graph_config.graph
+               self._mergelist = graph_config.mergelist
+
                if "--nodeps" in self.myopts or \
-                       digraph is None or \
                        (self._max_jobs is not True and self._max_jobs < 2):
                        # save some memory
                        self._digraph = None
+                       graph_config.graph = None
+                       graph_config.pkg_cache.clear()
+                       self._deep_system_deps.clear()
+                       for pkg in self._mergelist:
+                               self._pkg_cache[pkg] = pkg
                        return
 
-               self._digraph = digraph
                self._find_system_deps()
                self._prune_digraph()
                self._prevent_builddir_collisions()
@@ -589,7 +682,9 @@ class Scheduler(PollScheduler):
                type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
                root_config = self.trees[pkg_dblink.myroot]["root_config"]
                installed = type_name == "installed"
-               return self._pkg(cpv, type_name, root_config, installed=installed)
+               repo = pkg_dblink.settings.get("PORTAGE_REPO_NAME")
+               return self._pkg(cpv, type_name, root_config,
+                       installed=installed, myrepo=repo)
 
        def _dblink_elog(self, pkg_dblink, phase, func, msgs):
 
@@ -627,7 +722,6 @@ class Scheduler(PollScheduler):
 
                scheduler = self._sched_iface
                settings = pkg_dblink.settings
-               pkg = self._dblink_pkg(pkg_dblink)
                background = self._background
                log_path = settings.get("PORTAGE_LOG_FILE")
 
@@ -682,7 +776,7 @@ class Scheduler(PollScheduler):
                                'digest' not in pkgsettings.features:
                                continue
                        portdb = x.root_config.trees['porttree'].dbapi
-                       ebuild_path = portdb.findname(x.cpv)
+                       ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
                        if ebuild_path is None:
                                raise AssertionError("ebuild not found for '%s'" % x.cpv)
                        pkgsettings['O'] = os.path.dirname(ebuild_path)
@@ -759,7 +853,7 @@ class Scheduler(PollScheduler):
                        root_config = x.root_config
                        portdb = root_config.trees["porttree"].dbapi
                        quiet_config = quiet_settings[root_config.root]
-                       ebuild_path = portdb.findname(x.cpv)
+                       ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
                        if ebuild_path is None:
                                raise AssertionError("ebuild not found for '%s'" % x.cpv)
                        quiet_config["O"] = os.path.dirname(ebuild_path)
@@ -853,7 +947,8 @@ class Scheduler(PollScheduler):
                                return True
                        elif pkg.cpv != self._running_portage.cpv or \
                                '9999' in pkg.cpv or \
-                               'git' in pkg.inherited:
+                               'git' in pkg.inherited or \
+                               'git-2' in pkg.inherited:
                                return True
                return False
 
@@ -902,17 +997,30 @@ class Scheduler(PollScheduler):
                        if myopt not in bad_resume_opts:
                                if myarg is True:
                                        mynewargv.append(myopt)
+                               elif isinstance(myarg, list):
+                                       # arguments like --exclude that use 'append' action
+                                       for x in myarg:
+                                               mynewargv.append("%s=%s" % (myopt, x))
                                else:
-                                       mynewargv.append(myopt +"="+ str(myarg))
+                                       mynewargv.append("%s=%s" % (myopt, myarg))
                # priority only needs to be adjusted on the first run
                os.environ["PORTAGE_NICENESS"] = "0"
                os.execv(mynewargv[0], mynewargv)
 
        def _run_pkg_pretend(self):
-               shown_verifying_msg = False
+               """
+               Since pkg_pretend output may be important, this method sends all
+               output directly to stdout (regardless of options like --quiet or
+               --jobs).
+               """
 
                failures = 0
 
+               # Use a local PollScheduler instance here, since we don't
+               # want tasks here to trigger the usual Scheduler callbacks
+               # that handle job scheduling and status display.
+               sched_iface = PollScheduler().sched_iface
+
                for x in self._mergelist:
                        if not isinstance(x, Package):
                                continue
@@ -926,68 +1034,85 @@ class Scheduler(PollScheduler):
                        if "pretend" not in x.metadata.defined_phases:
                                continue
 
-                       if not shown_verifying_msg and self._background:
-                               shown_verifying_msg = True
-                               self._status_msg("Running pre-merge checks")
-
-                       if not self._background:
-                               out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
-                               portage.util.writemsg_stdout(out_str, noiselevel=-1)
+                       out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
+                       portage.util.writemsg_stdout(out_str, noiselevel=-1)
 
                        root_config = x.root_config
                        settings = self.pkgsettings[root_config.root]
+                       settings.setcpv(x)
                        tmpdir = tempfile.mkdtemp()
                        tmpdir_orig = settings["PORTAGE_TMPDIR"]
                        settings["PORTAGE_TMPDIR"] = tmpdir
 
-                       if x.built:
-                               tree = "bintree"
-                               bintree = root_config.trees["bintree"].dbapi.bintree
-                               if bintree.isremote(x.cpv):
-                                       fetcher = BinpkgPrefetcher(background=self._background,
-                                               logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=x, scheduler=self._sched_iface)
-                                       fetcher.start()
-                                       fetcher.wait()
-
-                               tbz2_file = bintree.getname(x.cpv)
-                               infloc = os.path.join(tmpdir, x.category, x.pf, "build-info")
-                               os.makedirs(infloc)
-                               portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
-                               ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
-
-                       else:
-                               tree = "porttree"
-                               portdb = root_config.trees["porttree"].dbapi
-                               ebuild_path = portdb.findname(x.cpv)
-                               if ebuild_path is None:
-                                       raise AssertionError("ebuild not found for '%s'" % x.cpv)
-
-                       portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
-                               "pretend", root_config.root, settings,
-                               debug=(settings.get("PORTAGE_DEBUG", "") == 1),
-                               mydbapi=self.trees[settings["ROOT"]][tree].dbapi, use_cache=1)
-                       prepare_build_dirs(root_config.root, settings, cleanup=0)
-
-                       vardb = root_config.trees['vartree'].dbapi
-                       settings["REPLACING_VERSIONS"] = " ".join(
-                               set(portage.versions.cpv_getversion(match) \
-                                       for match in vardb.match(x.slot_atom) + \
-                                       vardb.match('='+x.cpv)))
-                       pretend_phase = EbuildPhase(background=self._background,
-                               phase="pretend", scheduler=self._sched_iface,
-                               settings=settings)
-
-                       pretend_phase.start()
-                       ret = pretend_phase.wait()
+                       try:
+                               if x.built:
+                                       tree = "bintree"
+                                       bintree = root_config.trees["bintree"].dbapi.bintree
+                                       fetched = False
+
+                                       # Display fetch on stdout, so that it's always clear what
+                                       # is consuming time here.
+                                       if bintree.isremote(x.cpv):
+                                               fetcher = BinpkgFetcher(pkg=x,
+                                                       scheduler=sched_iface)
+                                               fetcher.start()
+                                               if fetcher.wait() != os.EX_OK:
+                                                       failures += 1
+                                                       continue
+                                               fetched = fetcher.pkg_path
+
+                                       verifier = BinpkgVerifier(pkg=x,
+                                               scheduler=sched_iface)
+                                       verifier.start()
+                                       if verifier.wait() != os.EX_OK:
+                                               failures += 1
+                                               continue
 
-                       portage.elog.elog_process(x.cpv, settings)
+                                       if fetched:
+                                               bintree.inject(x.cpv, filename=fetched)
+                                       tbz2_file = bintree.getname(x.cpv)
+                                       infloc = os.path.join(tmpdir, x.category, x.pf, "build-info")
+                                       os.makedirs(infloc)
+                                       portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
+                                       ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
+                                       settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+                                       settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
 
-                       if ret == os.EX_OK:
+                               else:
+                                       tree = "porttree"
+                                       portdb = root_config.trees["porttree"].dbapi
+                                       ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+                                       if ebuild_path is None:
+                                               raise AssertionError("ebuild not found for '%s'" % x.cpv)
+                                       settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+                                       if self._build_opts.buildpkgonly:
+                                               settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
+                                       else:
+                                               settings.configdict["pkg"]["MERGE_TYPE"] = "source"
+
+                               portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
+                                       "pretend", settings=settings,
+                                       db=self.trees[settings["ROOT"]][tree].dbapi)
+                               prepare_build_dirs(root_config.root, settings, cleanup=0)
+
+                               vardb = root_config.trees['vartree'].dbapi
+                               settings["REPLACING_VERSIONS"] = " ".join(
+                                       set(portage.versions.cpv_getversion(match) \
+                                               for match in vardb.match(x.slot_atom) + \
+                                               vardb.match('='+x.cpv)))
+                               pretend_phase = EbuildPhase(
+                                       phase="pretend", scheduler=sched_iface,
+                                       settings=settings)
+
+                               pretend_phase.start()
+                               ret = pretend_phase.wait()
+                               if ret != os.EX_OK:
+                                       failures += 1
+                               portage.elog.elog_process(x.cpv, settings)
+                       finally:
                                shutil.rmtree(tmpdir)
-                       settings["PORTAGE_TMPDIR"] = tmpdir_orig
+                               settings["PORTAGE_TMPDIR"] = tmpdir_orig
 
-                       if ret != os.EX_OK:
-                               failures += 1
                if failures:
                        return 1
                return os.EX_OK
@@ -1057,7 +1182,36 @@ class Scheduler(PollScheduler):
                        return rval
 
                while True:
-                       rval = self._merge()
+
+                       received_signal = []
+
+                       def sighandler(signum, frame):
+                               signal.signal(signal.SIGINT, signal.SIG_IGN)
+                               signal.signal(signal.SIGTERM, signal.SIG_IGN)
+                               portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
+                                       {"signal":signum})
+                               self.terminate()
+                               received_signal.append(128 + signum)
+
+                       earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
+                       earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+
+                       try:
+                               rval = self._merge()
+                       finally:
+                               # Restore previous handlers
+                               if earlier_sigint_handler is not None:
+                                       signal.signal(signal.SIGINT, earlier_sigint_handler)
+                               else:
+                                       signal.signal(signal.SIGINT, signal.SIG_DFL)
+                               if earlier_sigterm_handler is not None:
+                                       signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+                               else:
+                                       signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+                       if received_signal:
+                               sys.exit(received_signal[0])
+
                        if rval == os.EX_OK or fetchonly or not keep_going:
                                break
                        if "resume" not in mtimedb:
@@ -1078,10 +1232,6 @@ class Scheduler(PollScheduler):
                        if not mergelist:
                                break
 
-                       # free some memory before creating
-                       # the resume depgraph
-                       self._destroy_installed_graph()
-
                        if not self._calc_resume_list():
                                break
 
@@ -1089,10 +1239,6 @@ class Scheduler(PollScheduler):
                        if not self._mergelist:
                                break
 
-                       # Initialize the installed graph again
-                       # since it was destroyed above in order
-                       # to free memory.
-                       self._init_installed_graph()
                        self._save_resume_list()
                        self._pkg_count.curval = 0
                        self._pkg_count.maxval = len([x for x in self._mergelist \
@@ -1186,7 +1332,9 @@ class Scheduler(PollScheduler):
                                printer.eerror(line)
                        printer.eerror("")
                        for failed_pkg in self._failed_pkgs_all:
-                               msg = " %s" % (colorize('INFORM', failed_pkg.pkg.__str__()),)
+                               # Use _unicode_decode() to force unicode format string so
+                               # that Package.__unicode__() is called in python2.
+                               msg = _unicode_decode(" %s") % (failed_pkg.pkg,)
                                log_path = self._locate_failure_log(failed_pkg)
                                if log_path is not None:
                                        msg += ", Log file:"
@@ -1302,9 +1450,9 @@ class Scheduler(PollScheduler):
                                build_dir=build_dir, build_log=build_log,
                                pkg=pkg,
                                returncode=merge.returncode))
-                       self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
-
-                       self._status_display.failed = len(self._failed_pkgs)
+                       if not self._terminated_tasks:
+                               self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
+                               self._status_display.failed = len(self._failed_pkgs)
                        return
 
                self._task_complete(pkg)
@@ -1312,9 +1460,15 @@ class Scheduler(PollScheduler):
                if pkg_to_replace is not None:
                        # When a package is replaced, mark it's uninstall
                        # task complete (if any).
-                       uninst_hash_key = \
-                               ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
-                       self._task_complete(uninst_hash_key)
+                       if self._digraph is not None and \
+                               pkg_to_replace in self._digraph:
+                               try:
+                                       self._pkg_queue.remove(pkg_to_replace)
+                               except ValueError:
+                                       pass
+                               self._task_complete(pkg_to_replace)
+                       else:
+                               self._pkg_cache.pop(pkg_to_replace, None)
 
                if pkg.installed:
                        return
@@ -1331,7 +1485,13 @@ class Scheduler(PollScheduler):
                mtimedb.commit()
 
        def _build_exit(self, build):
-               if build.returncode == os.EX_OK:
+               if build.returncode == os.EX_OK and self._terminated_tasks:
+                       # We've been interrupted, so we won't
+                       # add this to the merge queue.
+                       self.curval += 1
+                       self._running_tasks.remove(build.pkg)
+                       self._deallocate_config(build.settings)
+               elif build.returncode == os.EX_OK:
                        self.curval += 1
                        merge = PackageMerge(merge=build)
                        if not build.build_opts.buildpkgonly and \
@@ -1354,9 +1514,9 @@ class Scheduler(PollScheduler):
                                build_dir=build_dir, build_log=build_log,
                                pkg=build.pkg,
                                returncode=build.returncode))
-                       self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
-
-                       self._status_display.failed = len(self._failed_pkgs)
+                       if not self._terminated_tasks:
+                               self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
+                               self._status_display.failed = len(self._failed_pkgs)
                        self._deallocate_config(build.settings)
                self._jobs -= 1
                self._status_display.running = self._jobs
@@ -1400,6 +1560,7 @@ class Scheduler(PollScheduler):
                self._status_display.reset()
                self._digraph = None
                self._task_queues.fetch.clear()
+               self._prefetchers.clear()
 
        def _choose_pkg(self):
                """
@@ -1533,7 +1694,7 @@ class Scheduler(PollScheduler):
                                self._poll_loop()
 
        def _keep_scheduling(self):
-               return bool(self._pkg_queue and \
+               return bool(not self._terminated_tasks and self._pkg_queue and \
                        not (self._failed_pkgs and not self._build_opts.fetchonly))
 
        def _is_work_scheduled(self):
@@ -1651,10 +1812,14 @@ class Scheduler(PollScheduler):
                if pkg.operation != "uninstall":
                        vardb = pkg.root_config.trees["vartree"].dbapi
                        previous_cpv = vardb.match(pkg.slot_atom)
+                       if not previous_cpv and vardb.cpv_exists(pkg.cpv):
+                               # same cpv, different SLOT
+                               previous_cpv = [pkg.cpv]
                        if previous_cpv:
                                previous_cpv = previous_cpv.pop()
                                pkg_to_replace = self._pkg(previous_cpv,
-                                       "installed", pkg.root_config, installed=True)
+                                       "installed", pkg.root_config, installed=True,
+                                       operation="uninstall")
 
                task = MergeListItem(args_set=self._args_set,
                        background=self._background, binpkg_opts=self._binpkg_opts,
@@ -1734,6 +1899,10 @@ class Scheduler(PollScheduler):
                """
                print(colorize("GOOD", "*** Resuming merge..."))
 
+               # free some memory before creating
+               # the resume depgraph
+               self._destroy_graph()
+
                myparams = create_depgraph_params(self.myopts, None)
                success = False
                e = None
@@ -1794,12 +1963,7 @@ class Scheduler(PollScheduler):
                        self._post_mod_echo_msgs.append(mydepgraph.display_problems)
                        return False
                mydepgraph.display_problems()
-
-               mylist = mydepgraph.altlist()
-               mydepgraph.break_refs(mylist)
-               mydepgraph.break_refs(dropped_tasks)
-               self._mergelist = mylist
-               self._set_digraph(mydepgraph.schedulerGraph())
+               self._init_graph(mydepgraph.schedulerGraph())
 
                msg_width = 75
                for task in dropped_tasks:
@@ -1886,29 +2050,46 @@ class Scheduler(PollScheduler):
                        if world_locked:
                                world_set.unlock()
 
-       def _pkg(self, cpv, type_name, root_config, installed=False):
+       def _pkg(self, cpv, type_name, root_config, installed=False,
+               operation=None, myrepo=None):
                """
                Get a package instance from the cache, or create a new
                one if necessary. Raises KeyError from aux_get if it
                failures for some reason (package does not exist or is
                corrupt).
                """
-               operation = "merge"
-               if installed:
-                       operation = "nomerge"
 
-               if self._digraph is not None:
-                       # Reuse existing instance when available.
-                       pkg = self._digraph.get(
-                               (type_name, root_config.root, cpv, operation))
-                       if pkg is not None:
-                               return pkg
+               if type_name != "ebuild":
+                       # For installed (and binary) packages we don't care for the repo
+                       # when it comes to hashing, because there can only be one cpv.
+                       # So overwrite the repo_key with type_name.
+                       repo_key = type_name
+                       myrepo = None
+               elif myrepo is None:
+                       raise AssertionError(
+                               "Scheduler._pkg() called without 'myrepo' argument")
+               else:
+                       repo_key = myrepo
+
+               if operation is None:
+                       if installed:
+                               operation = "nomerge"
+                       else:
+                               operation = "merge"
+
+               # Reuse existing instance when available.
+               pkg = self._pkg_cache.get(
+                       (type_name, root_config.root, cpv, operation, repo_key))
+               if pkg is not None:
+                       return pkg
 
                tree_type = depgraph.pkg_tree_map[type_name]
                db = root_config.trees[tree_type].dbapi
                db_keys = list(self.trees[root_config.root][
                        tree_type].dbapi._aux_cache_keys)
-               metadata = zip(db_keys, db.aux_get(cpv, db_keys))
-               return Package(built=(type_name != 'ebuild'),
-                       cpv=cpv, metadata=metadata,
-                       root_config=root_config, installed=installed)
+               metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
+               pkg = Package(built=(type_name != "ebuild"),
+                       cpv=cpv, installed=installed, metadata=metadata,
+                       root_config=root_config, type_name=type_name)
+               self._pkg_cache[pkg] = pkg
+               return pkg