4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
12 # The above copyright notice and this permission notice shall be included
13 # in all copies or substantial portions of the Software.
15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
16 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
17 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19 # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21 # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 Generic Taskmaster module for the SCons build engine.
27 This module contains the primary interface(s) between a wrapping user
28 interface and the SCons build engine. There are two key classes here:
31 This is the main engine for walking the dependency graph and
32 calling things to decide what does or doesn't need to be built.
35 This is the base class for allowing a wrapping interface to
36 decide what does or doesn't actually need to be done. The
37 intention is for a wrapping interface to subclass this as
38 appropriate for different types of behavior it may need.
40 The canonical example is the SCons native Python interface,
41 which has Task subclasses that handle its specific behavior,
42 like printing "`foo' is up to date" when a top-level target
43 doesn't need to be built, and handling the -c option by removing
44 targets as its "build" action. There is also a separate subclass
45 for suppressing this output when the -q option is used.
47 The Taskmaster instantiates a Task object for each (set of)
48 target(s) that it decides need to be evaluated and/or built.
51 __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
53 from itertools import chain
62 StateString = SCons.Node.StateString
63 NODE_NO_STATE = SCons.Node.no_state
64 NODE_PENDING = SCons.Node.pending
65 NODE_EXECUTING = SCons.Node.executing
66 NODE_UP_TO_DATE = SCons.Node.up_to_date
67 NODE_EXECUTED = SCons.Node.executed
68 NODE_FAILED = SCons.Node.failed
71 # A subsystem for recording stats about how different Nodes are handled by
72 # the main Taskmaster loop. There's no external control here (no need for
73 # a --debug= option); enable it by changing the value of CollectStats.
79 A simple class for holding statistics about the disposition of a
80 Node by the Taskmaster. If we're collecting statistics, each Node
81 processed by the Taskmaster gets one of these attached, in which case
82 the Taskmaster records its decision each time it processes the Node.
83 (Ideally, that's just once per Node.)
87 Instantiates a Taskmaster.Stats object, initializing all
88 appropriate counters to zero.
91 self.already_handled = 0
100 fmt = "%(considered)3d "\
101 "%(already_handled)3d " \
103 "%(child_failed)3d " \
105 "%(side_effects)3d " \
109 StatsNodes.sort(lambda a, b: cmp(str(a), str(b)))
111 print (fmt % n.stats.__dict__) + str(n)
117 Default SCons build engine task.
119 This controls the interaction of the actual building of node
120 and the rest of the engine.
122 This is expected to handle all of the normally-customizable
123 aspects of controlling a build, so any given application
124 *should* be able to do what it wants by sub-classing this
125 class and overriding methods as appropriate. If an application
126 needs to customze something by sub-classing Taskmaster (or
127 some other build engine class), we should first try to migrate
128 that functionality into this class.
130 Note that it's generally a good idea for sub-classes to call
131 these methods explicitly to update state, etc., rather than
132 roll their own interaction with Taskmaster from scratch.
134 def __init__(self, tm, targets, top, node):
136 self.targets = targets
141 def trace_message(self, method, node, description='node'):
142 fmt = '%-20s %s %s\n'
143 return fmt % (method + ':', description, self.tm.trace_node(node))
145 def display(self, message):
147 Hook to allow the calling interface to display a message.
149 This hook gets called as part of preparing a task for execution
150 (that is, a Node to be built). As part of figuring out what Node
151 should be built next, the actually target list may be altered,
152 along with a message describing the alteration. The calling
153 interface can subclass Task and provide a concrete implementation
154 of this method to see those messages.
160 Called just before the task is executed.
162 This is mainly intended to give the target Nodes a chance to
163 unlink underlying files and make all necessary directories before
164 the Action is actually called to build the targets.
167 if T: T.write(self.trace_message('Task.prepare()', self.node))
169 # Now that it's the appropriate time, give the TaskMaster a
170 # chance to raise any exceptions it encountered while preparing
172 self.exception_raise()
175 self.display(self.tm.message)
176 self.tm.message = None
178 # Let the targets take care of any necessary preparations.
179 # This includes verifying that all of the necessary sources
180 # and dependencies exist, removing the target file(s), etc.
182 # As of April 2008, the get_executor().prepare() method makes
183 # sure that all of the aggregate sources necessary to build this
184 # Task's target(s) exist in one up-front check. The individual
185 # target t.prepare() methods check that each target's explicit
186 # or implicit dependencies exists, and also initialize the
188 self.targets[0].get_executor().prepare()
189 for t in self.targets:
191 for s in t.side_effects:
194 def get_target(self):
195 """Fetch the target being built or updated by this task.
199 def needs_execute(self):
200 # TODO(deprecate): "return True" is the old default behavior;
201 # change it to NotImplementedError (after running through the
202 # Deprecation Cycle) so the desired behavior is explicitly
203 # determined by which concrete subclass is used.
204 #raise NotImplementedError
209 Called to execute the task.
211 This method is called from multiple threads in a parallel build,
212 so only do thread safe stuff here. Do thread unsafe stuff in
213 prepare(), executed() or failed().
216 if T: T.write(self.trace_message('Task.execute()', self.node))
219 everything_was_cached = 1
220 for t in self.targets:
221 if not t.retrieve_from_cache():
222 everything_was_cached = 0
224 if not everything_was_cached:
225 self.targets[0].build()
227 exc_value = sys.exc_info()[1]
228 raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code)
229 except SCons.Errors.UserError:
231 except SCons.Errors.BuildError:
234 buildError = SCons.Errors.convert_to_BuildError(e)
235 buildError.node = self.targets[0]
236 buildError.exc_info = sys.exc_info()
239 def executed_without_callbacks(self):
241 Called when the task has been successfully executed
242 and the Taskmaster instance doesn't want to call
243 the Node's callback methods.
246 if T: T.write(self.trace_message('Task.executed_without_callbacks()',
249 for t in self.targets:
250 if t.get_state() == NODE_EXECUTING:
251 for side_effect in t.side_effects:
252 side_effect.set_state(NODE_NO_STATE)
253 t.set_state(NODE_EXECUTED)
255 def executed_with_callbacks(self):
257 Called when the task has been successfully executed and
258 the Taskmaster instance wants to call the Node's callback
261 This may have been a do-nothing operation (to preserve build
262 order), so we must check the node's state before deciding whether
263 it was "built", in which case we call the appropriate Node method.
264 In any event, we always call "visited()", which will handle any
265 post-visit actions that must take place regardless of whether
266 or not the target was an actual built target or a source Node.
269 if T: T.write(self.trace_message('Task.executed_with_callbacks()',
272 for t in self.targets:
273 if t.get_state() == NODE_EXECUTING:
274 for side_effect in t.side_effects:
275 side_effect.set_state(NODE_NO_STATE)
276 t.set_state(NODE_EXECUTED)
280 executed = executed_with_callbacks
284 Default action when a task fails: stop the build.
286 Note: Although this function is normally invoked on nodes in
287 the executing state, it might also be invoked on up-to-date
288 nodes when using Configure().
294 Explicit stop-the-build failure.
296 This sets failure status on the target nodes and all of
297 their dependent parent nodes.
299 Note: Although this function is normally invoked on nodes in
300 the executing state, it might also be invoked on up-to-date
301 nodes when using Configure().
304 if T: T.write(self.trace_message('Task.failed_stop()', self.node))
306 # Invoke will_not_build() to clean-up the pending children
308 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
310 # Tell the taskmaster to not start any new tasks
313 # We're stopping because of a build failure, but give the
314 # calling Task class a chance to postprocess() the top-level
315 # target under which the build failure occurred.
316 self.targets = [self.tm.current_top]
319 def fail_continue(self):
321 Explicit continue-the-build failure.
323 This sets failure status on the target nodes and all of
324 their dependent parent nodes.
326 Note: Although this function is normally invoked on nodes in
327 the executing state, it might also be invoked on up-to-date
328 nodes when using Configure().
331 if T: T.write(self.trace_message('Task.failed_continue()', self.node))
333 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
335 def make_ready_all(self):
337 Marks all targets in a task ready for execution.
339 This is used when the interface needs every target Node to be
340 visited--the canonical example being the "scons -c" option.
343 if T: T.write(self.trace_message('Task.make_ready_all()', self.node))
345 self.out_of_date = self.targets[:]
346 for t in self.targets:
347 t.disambiguate().set_state(NODE_EXECUTING)
348 for s in t.side_effects:
349 s.set_state(NODE_EXECUTING)
351 def make_ready_current(self):
353 Marks all targets in a task ready for execution if any target
356 This is the default behavior for building only what's necessary.
359 if T: T.write(self.trace_message('Task.make_ready_current()',
362 self.out_of_date = []
363 needs_executing = False
364 for t in self.targets:
366 t.disambiguate().make_ready()
367 is_up_to_date = not t.has_builder() or \
368 (not t.always_build and t.is_up_to_date())
369 except EnvironmentError, e:
370 raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename)
372 if not is_up_to_date:
373 self.out_of_date.append(t)
374 needs_executing = True
377 for t in self.targets:
378 t.set_state(NODE_EXECUTING)
379 for s in t.side_effects:
380 s.set_state(NODE_EXECUTING)
382 for t in self.targets:
383 # We must invoke visited() to ensure that the node
384 # information has been computed before allowing the
385 # parent nodes to execute. (That could occur in a
388 t.set_state(NODE_UP_TO_DATE)
390 make_ready = make_ready_current
392 def postprocess(self):
394 Post-processes a task after it's been executed.
396 This examines all the targets just built (or not, we don't care
397 if the build was successful, or even if there was no build
398 because everything was up-to-date) to see if they have any
399 waiting parent Nodes, or Nodes waiting on a common side effect,
400 that can be put back on the candidates list.
403 if T: T.write(self.trace_message('Task.postprocess()', self.node))
405 # We may have built multiple targets, some of which may have
406 # common parents waiting for this build. Count up how many
407 # targets each parent was waiting for so we can subtract the
408 # values later, and so we *don't* put waiting side-effect Nodes
409 # back on the candidates list if the Node is also a waiting
412 targets = set(self.targets)
414 pending_children = self.tm.pending_children
417 # A node can only be in the pending_children set if it has
418 # some waiting_parents.
419 if t.waiting_parents:
420 if T: T.write(self.trace_message('Task.postprocess()',
423 pending_children.discard(t)
424 for p in t.waiting_parents:
425 parents[p] = parents.get(p, 0) + 1
428 for s in t.side_effects:
429 if s.get_state() == NODE_EXECUTING:
430 s.set_state(NODE_NO_STATE)
431 for p in s.waiting_parents:
432 parents[p] = parents.get(p, 0) + 1
433 for p in s.waiting_s_e:
435 self.tm.candidates.append(p)
437 for p, subtract in parents.items():
438 p.ref_count = p.ref_count - subtract
439 if T: T.write(self.trace_message('Task.postprocess()',
441 'adjusted parent ref count'))
443 self.tm.candidates.append(p)
448 # Exception handling subsystem.
450 # Exceptions that occur while walking the DAG or examining Nodes
451 # must be raised, but must be raised at an appropriate time and in
452 # a controlled manner so we can, if necessary, recover gracefully,
453 # possibly write out signature information for Nodes we've updated,
454 # etc. This is done by having the Taskmaster tell us about the
455 # exception, and letting
459 Returns info about a recorded exception.
461 return self.exception
465 Clears any recorded exception.
467 This also changes the "exception_raise" attribute to point
468 to the appropriate do-nothing method.
470 self.exception = (None, None, None)
471 self.exception_raise = self._no_exception_to_raise
473 def exception_set(self, exception=None):
475 Records an exception to be raised at the appropriate time.
477 This also changes the "exception_raise" attribute to point
478 to the method that will, in fact
481 exception = sys.exc_info()
482 self.exception = exception
483 self.exception_raise = self._exception_raise
485 def _no_exception_to_raise(self):
488 def _exception_raise(self):
490 Raises a pending exception that was recorded while getting a
491 Task ready for execution.
493 exc = self.exc_info()[:]
495 exc_type, exc_value, exc_traceback = exc
497 exc_type, exc_value = exc
499 raise exc_type, exc_value, exc_traceback
501 class AlwaysTask(Task):
502 def needs_execute(self):
504 Always returns True (indicating this Task should always
507 Subclasses that need this behavior (as opposed to the default
508 of only executing Nodes that are out of date w.r.t. their
509 dependencies) can use this as follows:
511 class MyTaskSubclass(SCons.Taskmaster.Task):
512 needs_execute = SCons.Taskmaster.Task.execute_always
516 class OutOfDateTask(Task):
517 def needs_execute(self):
519 Returns True (indicating this Task should be executed) if this
520 Task's target state indicates it needs executing, which has
521 already been determined by an earlier up-to-date check.
523 return self.targets[0].get_state() == SCons.Node.executing
526 def find_cycle(stack, visited):
527 if stack[-1] in visited:
529 visited.add(stack[-1])
530 for n in stack[-1].waiting_parents:
532 if stack[0] == stack[-1]:
534 if find_cycle(stack, visited):
542 The Taskmaster for walking the dependency DAG.
545 def __init__(self, targets=[], tasker=None, order=None, trace=None):
546 self.original_top = targets
547 self.top_targets_left = targets[:]
548 self.top_targets_left.reverse()
551 tasker = OutOfDateTask
558 self.next_candidate = self.find_next_candidate
559 self.pending_children = set()
561 def find_next_candidate(self):
563 Returns the next candidate Node for (potential) evaluation.
565 The candidate list (really a stack) initially consists of all of
566 the top-level (command line) targets provided when the Taskmaster
567 was initialized. While we walk the DAG, visiting Nodes, all the
568 children that haven't finished processing get pushed on to the
569 candidate list. Each child can then be popped and examined in
570 turn for whether *their* children are all up-to-date, in which
571 case a Task will be created for their actual evaluation and
574 Here is where we also allow candidate Nodes to alter the list of
575 Nodes that should be examined. This is used, for example, when
576 invoking SCons in a source directory. A source directory Node can
577 return its corresponding build directory Node, essentially saying,
578 "Hey, you really need to build this thing over here instead."
581 return self.candidates.pop()
585 node = self.top_targets_left.pop()
588 self.current_top = node
589 alt, message = node.alter_targets()
591 self.message = message
592 self.candidates.append(node)
593 self.candidates.extend(self.order(alt))
594 node = self.candidates.pop()
597 def no_next_candidate(self):
599 Stops Taskmaster processing by not returning a next candidate.
601 Note that we have to clean-up the Taskmaster candidate list
602 because the cycle detection depends on the fact all nodes have
603 been processed somehow.
605 while self.candidates:
606 candidates = self.candidates
608 self.will_not_build(candidates)
611 def _validate_pending_children(self):
613 Validate the content of the pending_children set. Assert if an
614 internal error is found.
616 This function is used strictly for debugging the taskmaster by
617 checking that no invariants are violated. It is not used in
620 The pending_children set is used to detect cycles in the
621 dependency graph. We call a "pending child" a child that is
622 found in the "pending" state when checking the dependencies of
625 A pending child can occur when the Taskmaster completes a loop
626 through a cycle. For example, lets imagine a graph made of
627 three node (A, B and C) making a cycle. The evaluation starts
628 at node A. The taskmaster first consider whether node A's
629 child B is up-to-date. Then, recursively, node B needs to
630 check whether node C is up-to-date. This leaves us with a
631 dependency graph looking like:
635 Node A (Pending) --> Node B(Pending) --> Node C (NoState)
638 +-------------------------------------+
640 Now, when the Taskmaster examines the Node C's child Node A,
641 it finds that Node A is in the "pending" state. Therefore,
642 Node A is a pending child of node C.
644 Pending children indicate that the Taskmaster has potentially
645 loop back through a cycle. We say potentially because it could
646 also occur when a DAG is evaluated in parallel. For example,
647 consider the following graph:
650 Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ...
653 +----------> Node D (NoState) --------+
657 The Taskmaster first evaluates the nodes A, B, and C and
658 starts building some children of node C. Assuming, that the
659 maximum parallel level has not been reached, the Taskmaster
660 will examine Node D. It will find that Node C is a pending
663 In summary, evaluating a graph with a cycle will always
664 involve a pending child at one point. A pending child might
665 indicate either a cycle or a diamond-shaped DAG. Only a
666 fraction of the nodes ends-up being a "pending child" of
667 another node. This keeps the pending_children set small in
670 We can differentiate between the two cases if we wait until
671 the end of the build. At this point, all the pending children
672 nodes due to a diamond-shaped DAG will have been properly
673 built (or will have failed to build). But, the pending
674 children involved in a cycle will still be in the pending
677 The taskmaster removes nodes from the pending_children set as
678 soon as a pending_children node moves out of the pending
679 state. This also helps to keep the pending_children set small.
682 for n in self.pending_children:
683 assert n.state in (NODE_PENDING, NODE_EXECUTING), \
684 (str(n), StateString[n.state])
685 assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents))
686 for p in n.waiting_parents:
687 assert p.ref_count > 0, (str(n), str(p), p.ref_count)
690 def trace_message(self, message):
691 return 'Taskmaster: %s\n' % message
693 def trace_node(self, node):
694 return '<%-10s %-3s %s>' % (StateString[node.get_state()],
698 def _find_next_ready_node(self):
700 Finds the next node that is ready to be built.
702 This is *the* main guts of the DAG walk. We loop through the
703 list of candidates, looking for something that has no un-built
704 children (i.e., that is a leaf Node or has dependencies that are
705 all leaf Nodes or up-to-date). Candidate Nodes are re-scanned
706 (both the target Node itself and its sources, which are always
707 scanned in the context of a given target) to discover implicit
708 dependencies. A Node that must wait for some children to be
709 built will be put back on the candidates list after the children
710 have finished building. A Node that has been put back on the
711 candidates list in this way may have itself (or its sources)
712 re-scanned, in order to handle generated header files (e.g.) and
713 the implicit dependencies therein.
715 Note that this method does not do any signature calculation or
716 up-to-date check itself. All of that is handled by the Task
717 class. This is purely concerned with the dependency graph walk.
720 self.ready_exc = None
723 if T: T.write('\n' + self.trace_message('Looking for a node to evaluate'))
726 node = self.next_candidate()
728 if T: T.write(self.trace_message('No candidate anymore.') + '\n')
731 node = node.disambiguate()
732 state = node.get_state()
734 # For debugging only:
737 # self._validate_pending_children()
739 # self.ready_exc = sys.exc_info()
743 if not hasattr(node, 'stats'):
745 StatsNodes.append(node)
747 S.considered = S.considered + 1
751 if T: T.write(self.trace_message(' Considering node %s and its children:' % self.trace_node(node)))
753 if state == NODE_NO_STATE:
754 # Mark this node as being on the execution stack:
755 node.set_state(NODE_PENDING)
756 elif state > NODE_PENDING:
757 # Skip this node if it has already been evaluated:
758 if S: S.already_handled = S.already_handled + 1
759 if T: T.write(self.trace_message(' already handled (executed)'))
763 children = node.children()
765 exc_value = sys.exc_info()[1]
766 e = SCons.Errors.ExplicitExit(node, exc_value.code)
767 self.ready_exc = (SCons.Errors.ExplicitExit, e)
768 if T: T.write(self.trace_message(' SystemExit'))
771 # We had a problem just trying to figure out the
772 # children (like a child couldn't be linked in to a
773 # VariantDir, or a Scanner threw something). Arrange to
774 # raise the exception when the Task is "executed."
775 self.ready_exc = sys.exc_info()
776 if S: S.problem = S.problem + 1
777 if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e))
780 children_not_visited = []
781 children_pending = set()
782 children_not_ready = []
783 children_failed = False
785 for child in chain(children,node.prerequisites):
786 childstate = child.get_state()
788 if T: T.write(self.trace_message(' ' + self.trace_node(child)))
790 if childstate == NODE_NO_STATE:
791 children_not_visited.append(child)
792 elif childstate == NODE_PENDING:
793 children_pending.add(child)
794 elif childstate == NODE_FAILED:
795 children_failed = True
797 if childstate <= NODE_EXECUTING:
798 children_not_ready.append(child)
801 # These nodes have not even been visited yet. Add
802 # them to the list so that on some next pass we can
803 # take a stab at evaluating them (or their children).
804 children_not_visited.reverse()
805 self.candidates.extend(self.order(children_not_visited))
806 #if T and children_not_visited:
807 # T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited)))
808 # T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates)))
810 # Skip this node if any of its children have failed.
812 # This catches the case where we're descending a top-level
813 # target and one of our children failed while trying to be
814 # built by a *previous* descent of an earlier top-level
817 # It can also occur if a node is reused in multiple
818 # targets. One first descends though the one of the
819 # target, the next time occurs through the other target.
821 # Note that we can only have failed_children if the
822 # --keep-going flag was used, because without it the build
823 # will stop before diving in the other branch.
825 # Note that even if one of the children fails, we still
826 # added the other children to the list of candidate nodes
827 # to keep on building (--keep-going).
829 node.set_state(NODE_FAILED)
831 if S: S.child_failed = S.child_failed + 1
832 if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
835 if children_not_ready:
836 for child in children_not_ready:
837 # We're waiting on one or more derived targets
838 # that have not yet finished building.
839 if S: S.not_built = S.not_built + 1
841 # Add this node to the waiting parents lists of
842 # anything we're waiting on, with a reference
843 # count so we can be put back on the list for
844 # re-evaluation when they've all finished.
845 node.ref_count = node.ref_count + child.add_to_waiting_parents(node)
846 if T: T.write(self.trace_message(' adjusted ref count: %s, child %s' %
847 (self.trace_node(node), repr(str(child)))))
850 for pc in children_pending:
851 T.write(self.trace_message(' adding %s to the pending children set\n' %
852 self.trace_node(pc)))
853 self.pending_children = self.pending_children | children_pending
857 # Skip this node if it has side-effects that are
858 # currently being built:
859 wait_side_effects = False
860 for se in node.side_effects:
861 if se.get_state() == NODE_EXECUTING:
862 se.add_to_waiting_s_e(node)
863 wait_side_effects = True
865 if wait_side_effects:
866 if S: S.side_effects = S.side_effects + 1
869 # The default when we've gotten through all of the checks above:
870 # this node is ready to be built.
871 if S: S.build = S.build + 1
872 if T: T.write(self.trace_message('Evaluating %s\n' %
873 self.trace_node(node)))
875 # For debugging only:
878 # self._validate_pending_children()
880 # self.ready_exc = sys.exc_info()
889 Returns the next task to be executed.
891 This simply asks for the next Node to be evaluated, and then wraps
892 it in the specific Task subclass with which we were initialized.
894 node = self._find_next_ready_node()
899 tlist = node.get_executor().targets
901 task = self.tasker(self, tlist, node in self.original_top, node)
905 # We had a problem just trying to get this task ready (like
906 # a child couldn't be linked in to a VariantDir when deciding
907 # whether this node is current). Arrange to raise the
908 # exception when the Task is "executed."
909 self.ready_exc = sys.exc_info()
912 task.exception_set(self.ready_exc)
914 self.ready_exc = None
918 def will_not_build(self, nodes, node_func=lambda n: None):
920 Perform clean-up about nodes that will never be built. Invokes
921 a user defined function on all of these nodes (including all
927 pending_children = self.pending_children
929 to_visit = set(nodes)
930 pending_children = pending_children - to_visit
934 T.write(self.trace_message(' removing node %s from the pending children set\n' %
939 node = to_visit.pop()
940 except AttributeError:
944 to_visit.remove(node)
950 # Prune recursion by flushing the waiting children
952 parents = node.waiting_parents
953 node.waiting_parents = set()
955 to_visit = to_visit | parents
956 pending_children = pending_children - parents
959 p.ref_count = p.ref_count - 1
960 if T: T.write(self.trace_message(' removing parent %s from the pending children set\n' %
963 # The container to_visit has been emptied.
966 # We have the stick back the pending_children list into the
967 # task master because the python 1.5.2 compatibility does not
968 # allow us to use in-place updates
969 self.pending_children = pending_children
973 Stops the current build completely.
975 self.next_candidate = self.no_next_candidate
979 Check for dependency cycles.
981 if not self.pending_children:
985 #nclist = [ (n, find_cycle([n], set())) for n in self.pending_children ]
986 nclist = map(lambda n: (n, find_cycle([n], set())), self.pending_children)
990 # node for node, cycle in nclist
991 # if cycle or node.get_state() != NODE_EXECUTED
993 genuine_cycles = filter(lambda t: t[1] or t[0].get_state() != NODE_EXECUTED, nclist)
994 if not genuine_cycles:
995 # All of the "cycles" found were single nodes in EXECUTED state,
996 # which is to say, they really weren't cycles. Just return.
999 desc = 'Found dependency cycle(s):\n'
1000 for node, cycle in nclist:
1002 desc = desc + " " + string.join(map(str, cycle), " -> ") + "\n"
1005 " Internal Error: no cycle found for node %s (%s) in state %s\n" % \
1006 (node, repr(node), StateString[node.get_state()])
1008 raise SCons.Errors.UserError, desc