http://scons.tigris.org/issues/show_bug.cgi?id=2329
[scons.git] / src / engine / SCons / Taskmaster.py
index 7439168a02fca5e4c53433eec2d8ab5a4b167cd8..b2c4204e9de1e1637dd76adbcf676f073482e068 100644 (file)
@@ -20,6 +20,7 @@
 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #
+from __future__ import generators  ### KEEP FOR COMPATIBILITY FIXERS
 
 __doc__ = """
 Generic Taskmaster module for the SCons build engine.
@@ -50,15 +51,22 @@ interface and the SCons build engine.  There are two key classes here:
 
 __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
 
-import string
+from itertools import chain
+import operator
 import sys
 import traceback
 
-import SCons.Node
 import SCons.Errors
+import SCons.Node
+import SCons.Warnings
 
 StateString = SCons.Node.StateString
-
+NODE_NO_STATE = SCons.Node.no_state
+NODE_PENDING = SCons.Node.pending
+NODE_EXECUTING = SCons.Node.executing
+NODE_UP_TO_DATE = SCons.Node.up_to_date
+NODE_EXECUTED = SCons.Node.executed
+NODE_FAILED = SCons.Node.failed
 
 
 # A subsystem for recording stats about how different Nodes are handled by
@@ -131,6 +139,10 @@ class Task:
         self.node = node
         self.exc_clear()
 
+    def trace_message(self, method, node, description='node'):
+        fmt = '%-20s %s %s\n'
+        return fmt % (method + ':', description, self.tm.trace_node(node))
+
     def display(self, message):
         """
         Hook to allow the calling interface to display a message.
@@ -152,6 +164,8 @@ class Task:
         unlink underlying files and make all necessary directories before
         the Action is actually called to build the targets.
         """
+        T = self.tm.trace
+        if T: T.write(self.trace_message('Task.prepare()', self.node))
 
         # Now that it's the appropriate time, give the TaskMaster a
         # chance to raise any exceptions it encountered while preparing
@@ -162,7 +176,19 @@ class Task:
             self.display(self.tm.message)
             self.tm.message = None
 
-        for t in self.targets:
+        # Let the targets take care of any necessary preparations.
+        # This includes verifying that all of the necessary sources
+        # and dependencies exist, removing the target file(s), etc.
+        #
+        # As of April 2008, the get_executor().prepare() method makes
+        # sure that all of the aggregate sources necessary to build this
+        # Task's target(s) exist in one up-front check.  The individual
+        # target t.prepare() methods check that each target's explicit
+        # or implicit dependencies exists, and also initialize the
+        # .sconsign info.
+        executor = self.targets[0].get_executor()
+        executor.prepare()
+        for t in executor.get_action_targets():
             t.prepare()
             for s in t.side_effects:
                 s.prepare()
@@ -172,6 +198,17 @@ class Task:
         """
         return self.node
 
+    def needs_execute(self):
+        # TODO(deprecate):  "return True" is the old default behavior;
+        # change it to NotImplementedError (after running through the
+        # Deprecation Cycle) so the desired behavior is explicitly
+        # determined by which concrete subclass is used.
+        #raise NotImplementedError
+        msg = ('Direct use of the Taskmaster.Task class will be deprecated\n'
+               + '\tin a future release.')
+        SCons.Warnings.warn(SCons.Warnings.TaskmasterNeedsExecuteWarning, msg)
+        return True
+
     def execute(self):
         """
         Called to execute the task.
@@ -180,17 +217,24 @@ class Task:
         so only do thread safe stuff here.  Do thread unsafe stuff in
         prepare(), executed() or failed().
         """
+        T = self.tm.trace
+        if T: T.write(self.trace_message('Task.execute()', self.node))
 
         try:
             everything_was_cached = 1
             for t in self.targets:
-                if not t.retrieve_from_cache():
+                if t.retrieve_from_cache():
+                    # Call the .built() method without calling the
+                    # .push_to_cache() method, since we just got the
+                    # target from the cache and don't need to push
+                    # it back there.
+                    t.set_state(NODE_EXECUTED)
+                    t.built()
+                else:
                     everything_was_cached = 0
                     break
             if not everything_was_cached:
                 self.targets[0].build()
-        except KeyboardInterrupt:
-            raise
         except SystemExit:
             exc_value = sys.exc_info()[1]
             raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code)
@@ -198,37 +242,85 @@ class Task:
             raise
         except SCons.Errors.BuildError:
             raise
-        except:
-            raise SCons.Errors.TaskmasterException(self.targets[0],
-                                                   sys.exc_info())
+        except Exception, e:
+            buildError = SCons.Errors.convert_to_BuildError(e)
+            buildError.node = self.targets[0]
+            buildError.exc_info = sys.exc_info()
+            raise buildError
+
+    def executed_without_callbacks(self):
+        """
+        Called when the task has been successfully executed
+        and the Taskmaster instance doesn't want to call
+        the Node's callback methods.
+        """
+        T = self.tm.trace
+        if T: T.write(self.trace_message('Task.executed_without_callbacks()',
+                                         self.node))
 
-    def executed(self):
+        for t in self.targets:
+            if t.get_state() == NODE_EXECUTING:
+                for side_effect in t.side_effects:
+                    side_effect.set_state(NODE_NO_STATE)
+                t.set_state(NODE_EXECUTED)
+
+    def executed_with_callbacks(self):
         """
-        Called when the task has been successfully executed.
+        Called when the task has been successfully executed and
+        the Taskmaster instance wants to call the Node's callback
+        methods.
 
         This may have been a do-nothing operation (to preserve build
-        order), so we have to check the node's state before deciding
-        whether it was "built" or just "visited."
+        order), so we must check the node's state before deciding whether
+        it was "built", in which case we call the appropriate Node method.
+        In any event, we always call "visited()", which will handle any
+        post-visit actions that must take place regardless of whether
+        or not the target was an actual built target or a source Node.
         """
+        T = self.tm.trace
+        if T: T.write(self.trace_message('Task.executed_with_callbacks()',
+                                         self.node))
+
         for t in self.targets:
-            if t.get_state() == SCons.Node.executing:
-                t.set_state(SCons.Node.executed)
+            if t.get_state() == NODE_EXECUTING:
+                for side_effect in t.side_effects:
+                    side_effect.set_state(NODE_NO_STATE)
+                t.set_state(NODE_EXECUTED)
+                t.push_to_cache()
                 t.built()
-            else:
-                t.visited()
+            t.visited()
+
+    executed = executed_with_callbacks
 
     def failed(self):
         """
         Default action when a task fails:  stop the build.
+
+        Note: Although this function is normally invoked on nodes in
+        the executing state, it might also be invoked on up-to-date
+        nodes when using Configure().
         """
         self.fail_stop()
 
     def fail_stop(self):
         """
         Explicit stop-the-build failure.
+
+        This sets failure status on the target nodes and all of
+        their dependent parent nodes.
+
+        Note: Although this function is normally invoked on nodes in
+        the executing state, it might also be invoked on up-to-date
+        nodes when using Configure().
         """
-        for t in self.targets:
-            t.set_state(SCons.Node.failed)
+        T = self.tm.trace
+        if T: T.write(self.trace_message('Task.failed_stop()', self.node))
+
+        # Invoke will_not_build() to clean-up the pending children
+        # list.
+        self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
+
+        # Tell the taskmaster to not start any new tasks
         self.tm.stop()
 
         # We're stopping because of a build failure, but give the
@@ -243,12 +335,15 @@ class Task:
 
         This sets failure status on the target nodes and all of
         their dependent parent nodes.
+
+        Note: Although this function is normally invoked on nodes in
+        the executing state, it might also be invoked on up-to-date
+        nodes when using Configure().
         """
-        for t in self.targets:
-            # Set failure state on all of the parents that were dependent
-            # on this failed build.
-            def set_state(node): node.set_state(SCons.Node.failed)
-            t.call_for_all_waiting_parents(set_state)
+        T = self.tm.trace
+        if T: T.write(self.trace_message('Task.failed_continue()', self.node))
+
+        self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
 
     def make_ready_all(self):
         """
@@ -257,11 +352,15 @@ class Task:
         This is used when the interface needs every target Node to be
         visited--the canonical example being the "scons -c" option.
         """
+        T = self.tm.trace
+        if T: T.write(self.trace_message('Task.make_ready_all()', self.node))
+
         self.out_of_date = self.targets[:]
         for t in self.targets:
-            t.disambiguate().set_state(SCons.Node.executing)
+            t.disambiguate().set_state(NODE_EXECUTING)
             for s in t.side_effects:
-                s.set_state(SCons.Node.executing)
+                # add disambiguate here to mirror the call on targets above
+                s.disambiguate().set_state(NODE_EXECUTING)
 
     def make_ready_current(self):
         """
@@ -270,15 +369,38 @@ class Task:
 
         This is the default behavior for building only what's necessary.
         """
+        T = self.tm.trace
+        if T: T.write(self.trace_message('Task.make_ready_current()',
+                                         self.node))
+
         self.out_of_date = []
+        needs_executing = False
         for t in self.targets:
-            if t.disambiguate().current():
-                t.set_state(SCons.Node.up_to_date)
-            else:
+            try:
+                t.disambiguate().make_ready()
+                is_up_to_date = not t.has_builder() or \
+                                (not t.always_build and t.is_up_to_date())
+            except EnvironmentError, e:
+                raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename)
+
+            if not is_up_to_date:
                 self.out_of_date.append(t)
-                t.set_state(SCons.Node.executing)
+                needs_executing = True
+
+        if needs_executing:
+            for t in self.targets:
+                t.set_state(NODE_EXECUTING)
                 for s in t.side_effects:
-                    s.set_state(SCons.Node.executing)
+                    # add disambiguate here to mirror the call on targets in first loop above
+                    s.disambiguate().set_state(NODE_EXECUTING)
+        else:
+            for t in self.targets:
+                # We must invoke visited() to ensure that the node
+                # information has been computed before allowing the
+                # parent nodes to execute. (That could occur in a
+                # parallel build...)
+                t.visited()
+                t.set_state(NODE_UP_TO_DATE)
 
     make_ready = make_ready_current
 
@@ -292,6 +414,8 @@ class Task:
         waiting parent Nodes, or Nodes waiting on a common side effect,
         that can be put back on the candidates list.
         """
+        T = self.tm.trace
+        if T: T.write(self.trace_message('Task.postprocess()', self.node))
 
         # We may have built multiple targets, some of which may have
         # common parents waiting for this build.  Count up how many
@@ -300,28 +424,40 @@ class Task:
         # back on the candidates list if the Node is also a waiting
         # parent.
 
+        targets = set(self.targets)
+
+        pending_children = self.tm.pending_children
         parents = {}
-        for t in self.targets:
-            for p in t.waiting_parents.keys():
+        for t in targets:
+            # A node can only be in the pending_children set if it has
+            # some waiting_parents.
+            if t.waiting_parents:
+                if T: T.write(self.trace_message('Task.postprocess()',
+                                                 t,
+                                                 'removing'))
+                pending_children.discard(t)
+            for p in t.waiting_parents:
                 parents[p] = parents.get(p, 0) + 1
 
-        for t in self.targets:
+        for t in targets:
             for s in t.side_effects:
-                if s.get_state() == SCons.Node.executing:
-                    s.set_state(SCons.Node.no_state)
-                    for p in s.waiting_parents.keys():
-                        if not parents.has_key(p):
-                            parents[p] = 1
-                for p in s.waiting_s_e.keys():
+                if s.get_state() == NODE_EXECUTING:
+                    s.set_state(NODE_NO_STATE)
+                    for p in s.waiting_parents:
+                        parents[p] = parents.get(p, 0) + 1
+                for p in s.waiting_s_e:
                     if p.ref_count == 0:
                         self.tm.candidates.append(p)
 
         for p, subtract in parents.items():
             p.ref_count = p.ref_count - subtract
+            if T: T.write(self.trace_message('Task.postprocess()',
+                                             p,
+                                             'adjusted parent ref count'))
             if p.ref_count == 0:
                 self.tm.candidates.append(p)
 
-        for t in self.targets:
+        for t in targets:
             t.postprocess()
 
     # Exception handling subsystem.
@@ -377,13 +513,40 @@ class Task:
             exc_traceback = None
         raise exc_type, exc_value, exc_traceback
 
+class AlwaysTask(Task):
+    def needs_execute(self):
+        """
+        Always returns True (indicating this Task should always
+        be executed).
+
+        Subclasses that need this behavior (as opposed to the default
+        of only executing Nodes that are out of date w.r.t. their
+        dependencies) can use this as follows:
+
+            class MyTaskSubclass(SCons.Taskmaster.Task):
+                needs_execute = SCons.Taskmaster.Task.execute_always
+        """
+        return True
+
+class OutOfDateTask(Task):
+    def needs_execute(self):
+        """
+        Returns True (indicating this Task should be executed) if this
+        Task's target state indicates it needs executing, which has
+        already been determined by an earlier up-to-date check.
+        """
+        return self.targets[0].get_state() == SCons.Node.executing
+
 
-def find_cycle(stack):
-    if stack[0] == stack[-1]:
-        return stack
-    for n in stack[-1].waiting_parents.keys():
+def find_cycle(stack, visited):
+    if stack[-1] in visited:
+        return None
+    visited.add(stack[-1])
+    for n in stack[-1].waiting_parents:
         stack.append(n)
-        if find_cycle(stack):
+        if stack[0] == stack[-1]:
+            return stack
+        if find_cycle(stack, visited):
             return stack
         stack.pop()
     return None
@@ -394,10 +557,13 @@ class Taskmaster:
     The Taskmaster for walking the dependency DAG.
     """
 
-    def __init__(self, targets=[], tasker=Task, order=None, trace=None):
-        self.top_targets = targets[:]
-        self.top_targets.reverse()
+    def __init__(self, targets=[], tasker=None, order=None, trace=None):
+        self.original_top = targets
+        self.top_targets_left = targets[:]
+        self.top_targets_left.reverse()
         self.candidates = []
+        if tasker is None:
+            tasker = OutOfDateTask
         self.tasker = tasker
         if not order:
             order = lambda l: l
@@ -405,6 +571,7 @@ class Taskmaster:
         self.message = None
         self.trace = trace
         self.next_candidate = self.find_next_candidate
+        self.pending_children = set()
 
     def find_next_candidate(self):
         """
@@ -430,7 +597,7 @@ class Taskmaster:
         except IndexError:
             pass
         try:
-            node = self.top_targets.pop()
+            node = self.top_targets_left.pop()
         except IndexError:
             return None
         self.current_top = node
@@ -445,9 +612,104 @@ class Taskmaster:
     def no_next_candidate(self):
         """
         Stops Taskmaster processing by not returning a next candidate.
+
+        Note that we have to clean-up the Taskmaster candidate list
+        because the cycle detection depends on the fact all nodes have
+        been processed somehow.
         """
+        while self.candidates:
+            candidates = self.candidates
+            self.candidates = []
+            self.will_not_build(candidates)
         return None
 
+    def _validate_pending_children(self):
+        """
+        Validate the content of the pending_children set. Assert if an
+        internal error is found.
+
+        This function is used strictly for debugging the taskmaster by
+        checking that no invariants are violated. It is not used in
+        normal operation.
+
+        The pending_children set is used to detect cycles in the
+        dependency graph. We call a "pending child" a child that is
+        found in the "pending" state when checking the dependencies of
+        its parent node.
+
+        A pending child can occur when the Taskmaster completes a loop
+        through a cycle. For example, lets imagine a graph made of
+        three node (A, B and C) making a cycle. The evaluation starts
+        at node A. The taskmaster first consider whether node A's
+        child B is up-to-date. Then, recursively, node B needs to
+        check whether node C is up-to-date. This leaves us with a
+        dependency graph looking like:
+
+                                      Next candidate \
+                                                      \
+        Node A (Pending) --> Node B(Pending) --> Node C (NoState)
+                ^                                     |
+                |                                     |
+                +-------------------------------------+
+
+        Now, when the Taskmaster examines the Node C's child Node A,
+        it finds that Node A is in the "pending" state. Therefore,
+        Node A is a pending child of node C.
+
+        Pending children indicate that the Taskmaster has potentially
+        loop back through a cycle. We say potentially because it could
+        also occur when a DAG is evaluated in parallel. For example,
+        consider the following graph:
+
+
+        Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ...
+                |                                     ^
+                |                                     |
+                +----------> Node D (NoState) --------+
+                                  /
+                  Next candidate /
+
+        The Taskmaster first evaluates the nodes A, B, and C and
+        starts building some children of node C. Assuming, that the
+        maximum parallel level has not been reached, the Taskmaster
+        will examine Node D. It will find that Node C is a pending
+        child of Node D.
+
+        In summary, evaluating a graph with a cycle will always
+        involve a pending child at one point. A pending child might
+        indicate either a cycle or a diamond-shaped DAG. Only a
+        fraction of the nodes ends-up being a "pending child" of
+        another node. This keeps the pending_children set small in
+        practice.
+
+        We can differentiate between the two cases if we wait until
+        the end of the build. At this point, all the pending children
+        nodes due to a diamond-shaped DAG will have been properly
+        built (or will have failed to build). But, the pending
+        children involved in a cycle will still be in the pending
+        state.
+
+        The taskmaster removes nodes from the pending_children set as
+        soon as a pending_children node moves out of the pending
+        state. This also helps to keep the pending_children set small.
+        """
+
+        for n in self.pending_children:
+            assert n.state in (NODE_PENDING, NODE_EXECUTING), \
+                (str(n), StateString[n.state])
+            assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents))
+            for p in n.waiting_parents:
+                assert p.ref_count > 0, (str(n), str(p), p.ref_count)
+
+
+    def trace_message(self, message):
+        return 'Taskmaster: %s\n' % message
+
+    def trace_node(self, node):
+        return '<%-10s %-3s %s>' % (StateString[node.get_state()],
+                                    node.ref_count,
+                                    repr(str(node)))
+
     def _find_next_ready_node(self):
         """
         Finds the next node that is ready to be built.
@@ -473,15 +735,25 @@ class Taskmaster:
         self.ready_exc = None
 
         T = self.trace
+        if T: T.write('\n' + self.trace_message('Looking for a node to evaluate'))
 
-        while 1:
+        while True:
             node = self.next_candidate()
             if node is None:
+                if T: T.write(self.trace_message('No candidate anymore.') + '\n')
                 return None
 
             node = node.disambiguate()
             state = node.get_state()
 
+            # For debugging only:
+            #
+            # try:
+            #     self._validate_pending_children()
+            # except:
+            #     self.ready_exc = sys.exc_info()
+            #     return node
+
             if CollectStats:
                 if not hasattr(node, 'stats'):
                     node.stats = Stats()
@@ -491,149 +763,141 @@ class Taskmaster:
             else:
                 S = None
 
-            if T: T.write('Taskmaster: %s:' % repr(str(node)))
+            if T: T.write(self.trace_message('    Considering node %s and its children:' % self.trace_node(node)))
 
-            # Skip this node if it has already been evaluated:
-            if state > SCons.Node.pending:
+            if state == NODE_NO_STATE:
+                # Mark this node as being on the execution stack:
+                node.set_state(NODE_PENDING)
+            elif state > NODE_PENDING:
+                # Skip this node if it has already been evaluated:
                 if S: S.already_handled = S.already_handled + 1
-                if T: T.write(' already handled (%s)\n' % StateString[state])
+                if T: T.write(self.trace_message('       already handled (executed)'))
                 continue
 
-            # Mark this node as being on the execution stack:
-            node.set_state(SCons.Node.pending)
+            executor = node.get_executor()
 
             try:
-                children = node.children()
+                children = executor.get_all_children()
             except SystemExit:
                 exc_value = sys.exc_info()[1]
                 e = SCons.Errors.ExplicitExit(node, exc_value.code)
                 self.ready_exc = (SCons.Errors.ExplicitExit, e)
-                if T: T.write(' SystemExit\n')
+                if T: T.write(self.trace_message('       SystemExit'))
                 return node
-            except KeyboardInterrupt:
-                if T: T.write(' KeyboardInterrupt\n')
-                raise
-            except:
+            except Exception, e:
                 # We had a problem just trying to figure out the
                 # children (like a child couldn't be linked in to a
-                # BuildDir, or a Scanner threw something).  Arrange to
+                # VariantDir, or a Scanner threw something).  Arrange to
                 # raise the exception when the Task is "executed."
                 self.ready_exc = sys.exc_info()
                 if S: S.problem = S.problem + 1
-                if T: T.write(' exception\n')
+                if T: T.write(self.trace_message('       exception %s while scanning children.\n' % e))
                 return node
 
-            if T and children:
-                c = map(str, children)
-                c.sort()
-                T.write(' children:\n    %s\n   ' % c)
-
-            childinfo = map(lambda N: (N.get_state(),
-                                       N.is_derived() or N.is_pseudo_derived(),
-                                       N), children)
-
-            # Skip this node if any of its children have failed.  This
-            # catches the case where we're descending a top-level target
-            # and one of our children failed while trying to be built
-            # by a *previous* descent of an earlier top-level target.
-            failed_children = filter(lambda I: I[0] == SCons.Node.failed,
-                                     childinfo)
-            if failed_children:
-                node.set_state(SCons.Node.failed)
-                if S: S.child_failed = S.child_failed + 1
-                if T:
-                    c = map(str, failed_children)
-                    c.sort()
-                    T.write(' children failed:\n    %s\n' % c)
-                continue
+            children_not_visited = []
+            children_pending = set()
+            children_not_ready = []
+            children_failed = False
+
+            for child in chain(executor.get_all_prerequisites(), children):
+                childstate = child.get_state()
+
+                if T: T.write(self.trace_message('       ' + self.trace_node(child)))
+
+                if childstate == NODE_NO_STATE:
+                    children_not_visited.append(child)
+                elif childstate == NODE_PENDING:
+                    children_pending.add(child)
+                elif childstate == NODE_FAILED:
+                    children_failed = True
+
+                if childstate <= NODE_EXECUTING:
+                    children_not_ready.append(child)
+
+
+            # These nodes have not even been visited yet.  Add
+            # them to the list so that on some next pass we can
+            # take a stab at evaluating them (or their children).
+            children_not_visited.reverse()
+            self.candidates.extend(self.order(children_not_visited))
+            #if T and children_not_visited:
+            #    T.write(self.trace_message('     adding to candidates: %s' % map(str, children_not_visited)))
+            #    T.write(self.trace_message('     candidates now: %s\n' % map(str, self.candidates)))
+
+            # Skip this node if any of its children have failed.
+            #
+            # This catches the case where we're descending a top-level
+            # target and one of our children failed while trying to be
+            # built by a *previous* descent of an earlier top-level
+            # target.
+            #
+            # It can also occur if a node is reused in multiple
+            # targets. One first descends though the one of the
+            # target, the next time occurs through the other target.
+            #
+            # Note that we can only have failed_children if the
+            # --keep-going flag was used, because without it the build
+            # will stop before diving in the other branch.
+            #
+            # Note that even if one of the children fails, we still
+            # added the other children to the list of candidate nodes
+            # to keep on building (--keep-going).
+            if children_failed:
+                for n in executor.get_action_targets():
+                    n.set_state(NODE_FAILED)
 
-            # Detect dependency cycles:
-            pending_nodes = filter(lambda I: I[0] == SCons.Node.pending, childinfo)
-            if pending_nodes:
-                for p in pending_nodes:
-                    cycle = find_cycle([p[2], node])
-                    if cycle:
-                        desc = "Dependency cycle: " + string.join(map(str, cycle), " -> ")
-                        if T: T.write(' dependency cycle\n')
-                        raise SCons.Errors.UserError, desc
-
-            # Select all of the dependencies that are derived targets
-            # (that is, children who have builders or are side effects).
-            derived_children = filter(lambda I: I[1], childinfo)
-
-            not_started = filter(lambda I: not I[0], derived_children)
-            if not_started:
-                not_started = map(lambda I: I[2], not_started)
-
-                # We're waiting on one more derived targets that have
-                # not yet started building.  Add this node to the
-                # waiting_parents lists of those derived files so that
-                # when they've finished building, our implicit dependency
-                # list will get cleared and we'll re-scan the newly-built
-                # file(s) for updated implicit dependencies.
-                map(lambda n, P=node: n.add_to_waiting_parents(P), not_started)
-                node.ref_count = len(not_started)
-
-                # Now we add these derived targets to the candidates
-                # list so they can be examined and built.  We have to
-                # add ourselves back to the list first, though, so we get
-                # a chance to re-scan and build after the dependencies.
-                #
-                # We reverse the order in which the children are added
-                # to the candidates stack so the order in which they're
-                # popped matches the order in which they show up in our
-                # children's list.  This is more logical / efficient for
-                # builders with multiple targets, since the "primary"
-                # target will be examined first.
-                self.candidates.append(node)
-                not_started.reverse()
-                self.candidates.extend(self.order(not_started))
-
-                if S: S.not_started = S.not_started + 1
-                if T:
-                    c = map(str, not_started)
-                    c.sort()
-                    T.write(' waiting on unstarted children:\n    %s\n' % c)
+                if S: S.child_failed = S.child_failed + 1
+                if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
                 continue
 
-            not_built = filter(lambda I: I[0] <= SCons.Node.executing, derived_children)
-            if not_built:
-                not_built = map(lambda I: I[2], not_built)
+            if children_not_ready:
+                for child in children_not_ready:
+                    # We're waiting on one or more derived targets
+                    # that have not yet finished building.
+                    if S: S.not_built = S.not_built + 1
 
-                # We're waiting on one or more derived targets that have
-                # started building but not yet finished.  Add this node
-                # to the waiting parents lists of those derived files
-                # so that when they've finished building, our implicit
-                # dependency list will get cleared and we'll re-scan the
-                # newly-built file(s) for updated implicit dependencies.
-                map(lambda n, P=node: n.add_to_waiting_parents(P), not_built)
-                node.ref_count = len(not_built)
+                    # Add this node to the waiting parents lists of
+                    # anything we're waiting on, with a reference
+                    # count so we can be put back on the list for
+                    # re-evaluation when they've all finished.
+                    node.ref_count =  node.ref_count + child.add_to_waiting_parents(node)
+                    if T: T.write(self.trace_message('     adjusted ref count: %s, child %s' %
+                                  (self.trace_node(node), repr(str(child)))))
 
-                if S: S.not_built = S.not_built + 1
                 if T:
-                    c = map(str, not_built)
-                    c.sort()
-                    T.write(' waiting on unfinished children:\n    %s\n' % c)
+                    for pc in children_pending:
+                        T.write(self.trace_message('       adding %s to the pending children set\n' %
+                                self.trace_node(pc)))
+                self.pending_children = self.pending_children | children_pending
+
                 continue
 
-            # Skip this node if it has side-effects that are currently being
-            # built themselves or waiting for something else being built.
-            side_effects = filter(lambda N:
-                                  N.get_state() == SCons.Node.executing,
-                                  node.side_effects)
-            if side_effects:
-                map(lambda n, P=node: n.add_to_waiting_s_e(P), side_effects)
+            # Skip this node if it has side-effects that are
+            # currently being built:
+            wait_side_effects = False
+            for se in executor.get_action_side_effects():
+                if se.get_state() == NODE_EXECUTING:
+                    se.add_to_waiting_s_e(node)
+                    wait_side_effects = True
+
+            if wait_side_effects:
                 if S: S.side_effects = S.side_effects + 1
-                if T:
-                    c = map(str, side_effects)
-                    c.sort()
-                    T.write(' waiting on side effects:\n    %s\n' % c)
                 continue
 
             # The default when we've gotten through all of the checks above:
             # this node is ready to be built.
             if S: S.build = S.build + 1
-            if T: T.write(' evaluating %s\n' % node)
+            if T: T.write(self.trace_message('Evaluating %s\n' %
+                                             self.trace_node(node)))
+
+            # For debugging only:
+            #
+            # try:
+            #     self._validate_pending_children()
+            # except:
+            #     self.ready_exc = sys.exc_info()
+            #     return node
+
             return node
 
         return None
@@ -650,16 +914,14 @@ class Taskmaster:
         if node is None:
             return None
 
-        tlist = node.get_executor().targets
+        tlist = node.get_executor().get_all_targets()
 
-        task = self.tasker(self, tlist, node is self.current_top, node)
+        task = self.tasker(self, tlist, node in self.original_top, node)
         try:
             task.make_ready()
-        except KeyboardInterrupt:
-            raise
         except:
             # We had a problem just trying to get this task ready (like
-            # a child couldn't be linked in to a BuildDir when deciding
+            # a child couldn't be linked in to a VariantDir when deciding
             # whether this node is current).  Arrange to raise the
             # exception when the Task is "executed."
             self.ready_exc = sys.exc_info()
@@ -671,8 +933,100 @@ class Taskmaster:
 
         return task
 
+    def will_not_build(self, nodes, node_func=lambda n: None):
+        """
+        Perform clean-up about nodes that will never be built. Invokes
+        a user defined function on all of these nodes (including all
+        of their parents).
+        """
+
+        T = self.trace
+
+        pending_children = self.pending_children
+
+        to_visit = set(nodes)
+        pending_children = pending_children - to_visit
+
+        if T:
+            for n in nodes:
+                T.write(self.trace_message('       removing node %s from the pending children set\n' %
+                        self.trace_node(n)))
+        try:
+            while True:
+                try:
+                    node = to_visit.pop()
+                except AttributeError:
+                    # Python 1.5.2
+                    if len(to_visit):
+                        node = to_visit[0]
+                        to_visit.remove(node)
+                    else:
+                        break
+
+                node_func(node)
+
+                # Prune recursion by flushing the waiting children
+                # list immediately.
+                parents = node.waiting_parents
+                node.waiting_parents = set()
+
+                to_visit = to_visit | parents
+                pending_children = pending_children - parents
+
+                for p in parents:
+                    p.ref_count = p.ref_count - 1
+                    if T: T.write(self.trace_message('       removing parent %s from the pending children set\n' %
+                                  self.trace_node(p)))
+        except KeyError:
+            # The container to_visit has been emptied.
+            pass
+
+        # We have the stick back the pending_children list into the
+        # task master because the python 1.5.2 compatibility does not
+        # allow us to use in-place updates
+        self.pending_children = pending_children
+
     def stop(self):
         """
         Stops the current build completely.
         """
         self.next_candidate = self.no_next_candidate
+
+    def cleanup(self):
+        """
+        Check for dependency cycles.
+        """
+        if not self.pending_children:
+            return
+
+        # TODO(1.5)
+        #nclist = [ (n, find_cycle([n], set())) for n in self.pending_children ]
+        nclist = [(n, find_cycle([n], set())) for n in self.pending_children]
+
+        # TODO(1.5)
+        #genuine_cycles = [
+        #    node for node, cycle in nclist
+        #             if cycle or node.get_state() != NODE_EXECUTED
+        #]
+        genuine_cycles = [t for t in nclist if t[1] or t[0].get_state() != NODE_EXECUTED]
+        if not genuine_cycles:
+            # All of the "cycles" found were single nodes in EXECUTED state,
+            # which is to say, they really weren't cycles.  Just return.
+            return
+
+        desc = 'Found dependency cycle(s):\n'
+        for node, cycle in nclist:
+            if cycle:
+                desc = desc + "  " + " -> ".join(map(str, cycle)) + "\n"
+            else:
+                desc = desc + \
+                    "  Internal Error: no cycle found for node %s (%s) in state %s\n" %  \
+                    (node, repr(node), StateString[node.get_state()])
+
+        raise SCons.Errors.UserError, desc
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4: