4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
12 # The above copyright notice and this permission notice shall be included
13 # in all copies or substantial portions of the Software.
15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
16 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
17 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19 # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21 # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 from __future__ import generators ### KEEP FOR COMPATIBILITY FIXERS
26 Generic Taskmaster module for the SCons build engine.
28 This module contains the primary interface(s) between a wrapping user
29 interface and the SCons build engine. There are two key classes here:
32 This is the main engine for walking the dependency graph and
33 calling things to decide what does or doesn't need to be built.
36 This is the base class for allowing a wrapping interface to
37 decide what does or doesn't actually need to be done. The
38 intention is for a wrapping interface to subclass this as
39 appropriate for different types of behavior it may need.
41 The canonical example is the SCons native Python interface,
42 which has Task subclasses that handle its specific behavior,
43 like printing "`foo' is up to date" when a top-level target
44 doesn't need to be built, and handling the -c option by removing
45 targets as its "build" action. There is also a separate subclass
46 for suppressing this output when the -q option is used.
48 The Taskmaster instantiates a Task object for each (set of)
49 target(s) that it decides need to be evaluated and/or built.
52 __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
54 from itertools import chain
63 StateString = SCons.Node.StateString
64 NODE_NO_STATE = SCons.Node.no_state
65 NODE_PENDING = SCons.Node.pending
66 NODE_EXECUTING = SCons.Node.executing
67 NODE_UP_TO_DATE = SCons.Node.up_to_date
68 NODE_EXECUTED = SCons.Node.executed
69 NODE_FAILED = SCons.Node.failed
72 # A subsystem for recording stats about how different Nodes are handled by
73 # the main Taskmaster loop. There's no external control here (no need for
74 # a --debug= option); enable it by changing the value of CollectStats.
80 A simple class for holding statistics about the disposition of a
81 Node by the Taskmaster. If we're collecting statistics, each Node
82 processed by the Taskmaster gets one of these attached, in which case
83 the Taskmaster records its decision each time it processes the Node.
84 (Ideally, that's just once per Node.)
88 Instantiates a Taskmaster.Stats object, initializing all
89 appropriate counters to zero.
92 self.already_handled = 0
101 fmt = "%(considered)3d "\
102 "%(already_handled)3d " \
104 "%(child_failed)3d " \
106 "%(side_effects)3d " \
110 StatsNodes.sort(lambda a, b: cmp(str(a), str(b)))
112 print (fmt % n.stats.__dict__) + str(n)
118 Default SCons build engine task.
120 This controls the interaction of the actual building of node
121 and the rest of the engine.
123 This is expected to handle all of the normally-customizable
124 aspects of controlling a build, so any given application
125 *should* be able to do what it wants by sub-classing this
126 class and overriding methods as appropriate. If an application
127 needs to customze something by sub-classing Taskmaster (or
128 some other build engine class), we should first try to migrate
129 that functionality into this class.
131 Note that it's generally a good idea for sub-classes to call
132 these methods explicitly to update state, etc., rather than
133 roll their own interaction with Taskmaster from scratch.
135 def __init__(self, tm, targets, top, node):
137 self.targets = targets
142 def trace_message(self, method, node, description='node'):
143 fmt = '%-20s %s %s\n'
144 return fmt % (method + ':', description, self.tm.trace_node(node))
146 def display(self, message):
148 Hook to allow the calling interface to display a message.
150 This hook gets called as part of preparing a task for execution
151 (that is, a Node to be built). As part of figuring out what Node
152 should be built next, the actually target list may be altered,
153 along with a message describing the alteration. The calling
154 interface can subclass Task and provide a concrete implementation
155 of this method to see those messages.
161 Called just before the task is executed.
163 This is mainly intended to give the target Nodes a chance to
164 unlink underlying files and make all necessary directories before
165 the Action is actually called to build the targets.
168 if T: T.write(self.trace_message(u'Task.prepare()', self.node))
170 # Now that it's the appropriate time, give the TaskMaster a
171 # chance to raise any exceptions it encountered while preparing
173 self.exception_raise()
176 self.display(self.tm.message)
177 self.tm.message = None
179 # Let the targets take care of any necessary preparations.
180 # This includes verifying that all of the necessary sources
181 # and dependencies exist, removing the target file(s), etc.
183 # As of April 2008, the get_executor().prepare() method makes
184 # sure that all of the aggregate sources necessary to build this
185 # Task's target(s) exist in one up-front check. The individual
186 # target t.prepare() methods check that each target's explicit
187 # or implicit dependencies exists, and also initialize the
189 executor = self.targets[0].get_executor()
191 for t in executor.get_action_targets():
193 for s in t.side_effects:
196 def get_target(self):
197 """Fetch the target being built or updated by this task.
201 def needs_execute(self):
202 # TODO(deprecate): "return True" is the old default behavior;
203 # change it to NotImplementedError (after running through the
204 # Deprecation Cycle) so the desired behavior is explicitly
205 # determined by which concrete subclass is used.
206 #raise NotImplementedError
207 msg = ('Direct use of the Taskmaster.Task class will be deprecated\n'
208 + '\tin a future release.')
209 SCons.Warnings.warn(SCons.Warnings.TaskmasterNeedsExecuteWarning, msg)
214 Called to execute the task.
216 This method is called from multiple threads in a parallel build,
217 so only do thread safe stuff here. Do thread unsafe stuff in
218 prepare(), executed() or failed().
221 if T: T.write(self.trace_message(u'Task.execute()', self.node))
224 everything_was_cached = 1
225 for t in self.targets:
226 if t.retrieve_from_cache():
227 # Call the .built() method without calling the
228 # .push_to_cache() method, since we just got the
229 # target from the cache and don't need to push
231 t.set_state(NODE_EXECUTED)
234 everything_was_cached = 0
236 if not everything_was_cached:
237 self.targets[0].build()
239 exc_value = sys.exc_info()[1]
240 raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code)
241 except SCons.Errors.UserError:
243 except SCons.Errors.BuildError:
246 buildError = SCons.Errors.convert_to_BuildError(e)
247 buildError.node = self.targets[0]
248 buildError.exc_info = sys.exc_info()
251 def executed_without_callbacks(self):
253 Called when the task has been successfully executed
254 and the Taskmaster instance doesn't want to call
255 the Node's callback methods.
258 if T: T.write(self.trace_message('Task.executed_without_callbacks()',
261 for t in self.targets:
262 if t.get_state() == NODE_EXECUTING:
263 for side_effect in t.side_effects:
264 side_effect.set_state(NODE_NO_STATE)
265 t.set_state(NODE_EXECUTED)
267 def executed_with_callbacks(self):
269 Called when the task has been successfully executed and
270 the Taskmaster instance wants to call the Node's callback
273 This may have been a do-nothing operation (to preserve build
274 order), so we must check the node's state before deciding whether
275 it was "built", in which case we call the appropriate Node method.
276 In any event, we always call "visited()", which will handle any
277 post-visit actions that must take place regardless of whether
278 or not the target was an actual built target or a source Node.
281 if T: T.write(self.trace_message('Task.executed_with_callbacks()',
284 for t in self.targets:
285 if t.get_state() == NODE_EXECUTING:
286 for side_effect in t.side_effects:
287 side_effect.set_state(NODE_NO_STATE)
288 t.set_state(NODE_EXECUTED)
293 executed = executed_with_callbacks
297 Default action when a task fails: stop the build.
299 Note: Although this function is normally invoked on nodes in
300 the executing state, it might also be invoked on up-to-date
301 nodes when using Configure().
307 Explicit stop-the-build failure.
309 This sets failure status on the target nodes and all of
310 their dependent parent nodes.
312 Note: Although this function is normally invoked on nodes in
313 the executing state, it might also be invoked on up-to-date
314 nodes when using Configure().
317 if T: T.write(self.trace_message('Task.failed_stop()', self.node))
319 # Invoke will_not_build() to clean-up the pending children
321 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
323 # Tell the taskmaster to not start any new tasks
326 # We're stopping because of a build failure, but give the
327 # calling Task class a chance to postprocess() the top-level
328 # target under which the build failure occurred.
329 self.targets = [self.tm.current_top]
332 def fail_continue(self):
334 Explicit continue-the-build failure.
336 This sets failure status on the target nodes and all of
337 their dependent parent nodes.
339 Note: Although this function is normally invoked on nodes in
340 the executing state, it might also be invoked on up-to-date
341 nodes when using Configure().
344 if T: T.write(self.trace_message('Task.failed_continue()', self.node))
346 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
348 def make_ready_all(self):
350 Marks all targets in a task ready for execution.
352 This is used when the interface needs every target Node to be
353 visited--the canonical example being the "scons -c" option.
356 if T: T.write(self.trace_message('Task.make_ready_all()', self.node))
358 self.out_of_date = self.targets[:]
359 for t in self.targets:
360 t.disambiguate().set_state(NODE_EXECUTING)
361 for s in t.side_effects:
362 # add disambiguate here to mirror the call on targets above
363 s.disambiguate().set_state(NODE_EXECUTING)
365 def make_ready_current(self):
367 Marks all targets in a task ready for execution if any target
370 This is the default behavior for building only what's necessary.
373 if T: T.write(self.trace_message(u'Task.make_ready_current()',
376 self.out_of_date = []
377 needs_executing = False
378 for t in self.targets:
380 t.disambiguate().make_ready()
381 is_up_to_date = not t.has_builder() or \
382 (not t.always_build and t.is_up_to_date())
383 except EnvironmentError, e:
384 raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename)
386 if not is_up_to_date:
387 self.out_of_date.append(t)
388 needs_executing = True
391 for t in self.targets:
392 t.set_state(NODE_EXECUTING)
393 for s in t.side_effects:
394 # add disambiguate here to mirror the call on targets in first loop above
395 s.disambiguate().set_state(NODE_EXECUTING)
397 for t in self.targets:
398 # We must invoke visited() to ensure that the node
399 # information has been computed before allowing the
400 # parent nodes to execute. (That could occur in a
403 t.set_state(NODE_UP_TO_DATE)
405 make_ready = make_ready_current
407 def postprocess(self):
409 Post-processes a task after it's been executed.
411 This examines all the targets just built (or not, we don't care
412 if the build was successful, or even if there was no build
413 because everything was up-to-date) to see if they have any
414 waiting parent Nodes, or Nodes waiting on a common side effect,
415 that can be put back on the candidates list.
418 if T: T.write(self.trace_message(u'Task.postprocess()', self.node))
420 # We may have built multiple targets, some of which may have
421 # common parents waiting for this build. Count up how many
422 # targets each parent was waiting for so we can subtract the
423 # values later, and so we *don't* put waiting side-effect Nodes
424 # back on the candidates list if the Node is also a waiting
427 targets = set(self.targets)
429 pending_children = self.tm.pending_children
432 # A node can only be in the pending_children set if it has
433 # some waiting_parents.
434 if t.waiting_parents:
435 if T: T.write(self.trace_message(u'Task.postprocess()',
438 pending_children.discard(t)
439 for p in t.waiting_parents:
440 parents[p] = parents.get(p, 0) + 1
443 for s in t.side_effects:
444 if s.get_state() == NODE_EXECUTING:
445 s.set_state(NODE_NO_STATE)
446 for p in s.waiting_parents:
447 parents[p] = parents.get(p, 0) + 1
448 for p in s.waiting_s_e:
450 self.tm.candidates.append(p)
452 for p, subtract in parents.items():
453 p.ref_count = p.ref_count - subtract
454 if T: T.write(self.trace_message(u'Task.postprocess()',
456 'adjusted parent ref count'))
458 self.tm.candidates.append(p)
463 # Exception handling subsystem.
465 # Exceptions that occur while walking the DAG or examining Nodes
466 # must be raised, but must be raised at an appropriate time and in
467 # a controlled manner so we can, if necessary, recover gracefully,
468 # possibly write out signature information for Nodes we've updated,
469 # etc. This is done by having the Taskmaster tell us about the
470 # exception, and letting
474 Returns info about a recorded exception.
476 return self.exception
480 Clears any recorded exception.
482 This also changes the "exception_raise" attribute to point
483 to the appropriate do-nothing method.
485 self.exception = (None, None, None)
486 self.exception_raise = self._no_exception_to_raise
488 def exception_set(self, exception=None):
490 Records an exception to be raised at the appropriate time.
492 This also changes the "exception_raise" attribute to point
493 to the method that will, in fact
496 exception = sys.exc_info()
497 self.exception = exception
498 self.exception_raise = self._exception_raise
500 def _no_exception_to_raise(self):
503 def _exception_raise(self):
505 Raises a pending exception that was recorded while getting a
506 Task ready for execution.
508 exc = self.exc_info()[:]
510 exc_type, exc_value, exc_traceback = exc
512 exc_type, exc_value = exc
514 raise exc_type, exc_value, exc_traceback
516 class AlwaysTask(Task):
517 def needs_execute(self):
519 Always returns True (indicating this Task should always
522 Subclasses that need this behavior (as opposed to the default
523 of only executing Nodes that are out of date w.r.t. their
524 dependencies) can use this as follows:
526 class MyTaskSubclass(SCons.Taskmaster.Task):
527 needs_execute = SCons.Taskmaster.Task.execute_always
531 class OutOfDateTask(Task):
532 def needs_execute(self):
534 Returns True (indicating this Task should be executed) if this
535 Task's target state indicates it needs executing, which has
536 already been determined by an earlier up-to-date check.
538 return self.targets[0].get_state() == SCons.Node.executing
541 def find_cycle(stack, visited):
542 if stack[-1] in visited:
544 visited.add(stack[-1])
545 for n in stack[-1].waiting_parents:
547 if stack[0] == stack[-1]:
549 if find_cycle(stack, visited):
557 The Taskmaster for walking the dependency DAG.
560 def __init__(self, targets=[], tasker=None, order=None, trace=None):
561 self.original_top = targets
562 self.top_targets_left = targets[:]
563 self.top_targets_left.reverse()
566 tasker = OutOfDateTask
573 self.next_candidate = self.find_next_candidate
574 self.pending_children = set()
576 def find_next_candidate(self):
578 Returns the next candidate Node for (potential) evaluation.
580 The candidate list (really a stack) initially consists of all of
581 the top-level (command line) targets provided when the Taskmaster
582 was initialized. While we walk the DAG, visiting Nodes, all the
583 children that haven't finished processing get pushed on to the
584 candidate list. Each child can then be popped and examined in
585 turn for whether *their* children are all up-to-date, in which
586 case a Task will be created for their actual evaluation and
589 Here is where we also allow candidate Nodes to alter the list of
590 Nodes that should be examined. This is used, for example, when
591 invoking SCons in a source directory. A source directory Node can
592 return its corresponding build directory Node, essentially saying,
593 "Hey, you really need to build this thing over here instead."
596 return self.candidates.pop()
600 node = self.top_targets_left.pop()
603 self.current_top = node
604 alt, message = node.alter_targets()
606 self.message = message
607 self.candidates.append(node)
608 self.candidates.extend(self.order(alt))
609 node = self.candidates.pop()
612 def no_next_candidate(self):
614 Stops Taskmaster processing by not returning a next candidate.
616 Note that we have to clean-up the Taskmaster candidate list
617 because the cycle detection depends on the fact all nodes have
618 been processed somehow.
620 while self.candidates:
621 candidates = self.candidates
623 self.will_not_build(candidates)
626 def _validate_pending_children(self):
628 Validate the content of the pending_children set. Assert if an
629 internal error is found.
631 This function is used strictly for debugging the taskmaster by
632 checking that no invariants are violated. It is not used in
635 The pending_children set is used to detect cycles in the
636 dependency graph. We call a "pending child" a child that is
637 found in the "pending" state when checking the dependencies of
640 A pending child can occur when the Taskmaster completes a loop
641 through a cycle. For example, lets imagine a graph made of
642 three node (A, B and C) making a cycle. The evaluation starts
643 at node A. The taskmaster first consider whether node A's
644 child B is up-to-date. Then, recursively, node B needs to
645 check whether node C is up-to-date. This leaves us with a
646 dependency graph looking like:
650 Node A (Pending) --> Node B(Pending) --> Node C (NoState)
653 +-------------------------------------+
655 Now, when the Taskmaster examines the Node C's child Node A,
656 it finds that Node A is in the "pending" state. Therefore,
657 Node A is a pending child of node C.
659 Pending children indicate that the Taskmaster has potentially
660 loop back through a cycle. We say potentially because it could
661 also occur when a DAG is evaluated in parallel. For example,
662 consider the following graph:
665 Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ...
668 +----------> Node D (NoState) --------+
672 The Taskmaster first evaluates the nodes A, B, and C and
673 starts building some children of node C. Assuming, that the
674 maximum parallel level has not been reached, the Taskmaster
675 will examine Node D. It will find that Node C is a pending
678 In summary, evaluating a graph with a cycle will always
679 involve a pending child at one point. A pending child might
680 indicate either a cycle or a diamond-shaped DAG. Only a
681 fraction of the nodes ends-up being a "pending child" of
682 another node. This keeps the pending_children set small in
685 We can differentiate between the two cases if we wait until
686 the end of the build. At this point, all the pending children
687 nodes due to a diamond-shaped DAG will have been properly
688 built (or will have failed to build). But, the pending
689 children involved in a cycle will still be in the pending
692 The taskmaster removes nodes from the pending_children set as
693 soon as a pending_children node moves out of the pending
694 state. This also helps to keep the pending_children set small.
697 for n in self.pending_children:
698 assert n.state in (NODE_PENDING, NODE_EXECUTING), \
699 (str(n), StateString[n.state])
700 assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents))
701 for p in n.waiting_parents:
702 assert p.ref_count > 0, (str(n), str(p), p.ref_count)
705 def trace_message(self, message):
706 return 'Taskmaster: %s\n' % message
708 def trace_node(self, node):
709 return '<%-10s %-3s %s>' % (StateString[node.get_state()],
713 def _find_next_ready_node(self):
715 Finds the next node that is ready to be built.
717 This is *the* main guts of the DAG walk. We loop through the
718 list of candidates, looking for something that has no un-built
719 children (i.e., that is a leaf Node or has dependencies that are
720 all leaf Nodes or up-to-date). Candidate Nodes are re-scanned
721 (both the target Node itself and its sources, which are always
722 scanned in the context of a given target) to discover implicit
723 dependencies. A Node that must wait for some children to be
724 built will be put back on the candidates list after the children
725 have finished building. A Node that has been put back on the
726 candidates list in this way may have itself (or its sources)
727 re-scanned, in order to handle generated header files (e.g.) and
728 the implicit dependencies therein.
730 Note that this method does not do any signature calculation or
731 up-to-date check itself. All of that is handled by the Task
732 class. This is purely concerned with the dependency graph walk.
735 self.ready_exc = None
738 if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate'))
741 node = self.next_candidate()
743 if T: T.write(self.trace_message('No candidate anymore.') + u'\n')
746 node = node.disambiguate()
747 state = node.get_state()
749 # For debugging only:
752 # self._validate_pending_children()
754 # self.ready_exc = sys.exc_info()
758 if not hasattr(node, 'stats'):
760 StatsNodes.append(node)
762 S.considered = S.considered + 1
766 if T: T.write(self.trace_message(u' Considering node %s and its children:' % self.trace_node(node)))
768 if state == NODE_NO_STATE:
769 # Mark this node as being on the execution stack:
770 node.set_state(NODE_PENDING)
771 elif state > NODE_PENDING:
772 # Skip this node if it has already been evaluated:
773 if S: S.already_handled = S.already_handled + 1
774 if T: T.write(self.trace_message(u' already handled (executed)'))
777 executor = node.get_executor()
780 children = executor.get_all_children()
782 exc_value = sys.exc_info()[1]
783 e = SCons.Errors.ExplicitExit(node, exc_value.code)
784 self.ready_exc = (SCons.Errors.ExplicitExit, e)
785 if T: T.write(self.trace_message(' SystemExit'))
788 # We had a problem just trying to figure out the
789 # children (like a child couldn't be linked in to a
790 # VariantDir, or a Scanner threw something). Arrange to
791 # raise the exception when the Task is "executed."
792 self.ready_exc = sys.exc_info()
793 if S: S.problem = S.problem + 1
794 if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e))
797 children_not_visited = []
798 children_pending = set()
799 children_not_ready = []
800 children_failed = False
802 for child in chain(executor.get_all_prerequisites(), children):
803 childstate = child.get_state()
805 if T: T.write(self.trace_message(u' ' + self.trace_node(child)))
807 if childstate == NODE_NO_STATE:
808 children_not_visited.append(child)
809 elif childstate == NODE_PENDING:
810 children_pending.add(child)
811 elif childstate == NODE_FAILED:
812 children_failed = True
814 if childstate <= NODE_EXECUTING:
815 children_not_ready.append(child)
818 # These nodes have not even been visited yet. Add
819 # them to the list so that on some next pass we can
820 # take a stab at evaluating them (or their children).
821 children_not_visited.reverse()
822 self.candidates.extend(self.order(children_not_visited))
823 #if T and children_not_visited:
824 # T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited)))
825 # T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates)))
827 # Skip this node if any of its children have failed.
829 # This catches the case where we're descending a top-level
830 # target and one of our children failed while trying to be
831 # built by a *previous* descent of an earlier top-level
834 # It can also occur if a node is reused in multiple
835 # targets. One first descends though the one of the
836 # target, the next time occurs through the other target.
838 # Note that we can only have failed_children if the
839 # --keep-going flag was used, because without it the build
840 # will stop before diving in the other branch.
842 # Note that even if one of the children fails, we still
843 # added the other children to the list of candidate nodes
844 # to keep on building (--keep-going).
846 for n in executor.get_action_targets():
847 n.set_state(NODE_FAILED)
849 if S: S.child_failed = S.child_failed + 1
850 if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
853 if children_not_ready:
854 for child in children_not_ready:
855 # We're waiting on one or more derived targets
856 # that have not yet finished building.
857 if S: S.not_built = S.not_built + 1
859 # Add this node to the waiting parents lists of
860 # anything we're waiting on, with a reference
861 # count so we can be put back on the list for
862 # re-evaluation when they've all finished.
863 node.ref_count = node.ref_count + child.add_to_waiting_parents(node)
864 if T: T.write(self.trace_message(u' adjusted ref count: %s, child %s' %
865 (self.trace_node(node), repr(str(child)))))
868 for pc in children_pending:
869 T.write(self.trace_message(' adding %s to the pending children set\n' %
870 self.trace_node(pc)))
871 self.pending_children = self.pending_children | children_pending
875 # Skip this node if it has side-effects that are
876 # currently being built:
877 wait_side_effects = False
878 for se in executor.get_action_side_effects():
879 if se.get_state() == NODE_EXECUTING:
880 se.add_to_waiting_s_e(node)
881 wait_side_effects = True
883 if wait_side_effects:
884 if S: S.side_effects = S.side_effects + 1
887 # The default when we've gotten through all of the checks above:
888 # this node is ready to be built.
889 if S: S.build = S.build + 1
890 if T: T.write(self.trace_message(u'Evaluating %s\n' %
891 self.trace_node(node)))
893 # For debugging only:
896 # self._validate_pending_children()
898 # self.ready_exc = sys.exc_info()
907 Returns the next task to be executed.
909 This simply asks for the next Node to be evaluated, and then wraps
910 it in the specific Task subclass with which we were initialized.
912 node = self._find_next_ready_node()
917 tlist = node.get_executor().get_all_targets()
919 task = self.tasker(self, tlist, node in self.original_top, node)
923 # We had a problem just trying to get this task ready (like
924 # a child couldn't be linked in to a VariantDir when deciding
925 # whether this node is current). Arrange to raise the
926 # exception when the Task is "executed."
927 self.ready_exc = sys.exc_info()
930 task.exception_set(self.ready_exc)
932 self.ready_exc = None
936 def will_not_build(self, nodes, node_func=lambda n: None):
938 Perform clean-up about nodes that will never be built. Invokes
939 a user defined function on all of these nodes (including all
945 pending_children = self.pending_children
947 to_visit = set(nodes)
948 pending_children = pending_children - to_visit
952 T.write(self.trace_message(' removing node %s from the pending children set\n' %
957 node = to_visit.pop()
958 except AttributeError:
962 to_visit.remove(node)
968 # Prune recursion by flushing the waiting children
970 parents = node.waiting_parents
971 node.waiting_parents = set()
973 to_visit = to_visit | parents
974 pending_children = pending_children - parents
977 p.ref_count = p.ref_count - 1
978 if T: T.write(self.trace_message(' removing parent %s from the pending children set\n' %
981 # The container to_visit has been emptied.
984 # We have the stick back the pending_children list into the
985 # task master because the python 1.5.2 compatibility does not
986 # allow us to use in-place updates
987 self.pending_children = pending_children
991 Stops the current build completely.
993 self.next_candidate = self.no_next_candidate
997 Check for dependency cycles.
999 if not self.pending_children:
1003 #nclist = [ (n, find_cycle([n], set())) for n in self.pending_children ]
1004 nclist = [(n, find_cycle([n], set())) for n in self.pending_children]
1008 # node for node, cycle in nclist
1009 # if cycle or node.get_state() != NODE_EXECUTED
1011 genuine_cycles = [t for t in nclist if t[1] or t[0].get_state() != NODE_EXECUTED]
1012 if not genuine_cycles:
1013 # All of the "cycles" found were single nodes in EXECUTED state,
1014 # which is to say, they really weren't cycles. Just return.
1017 desc = 'Found dependency cycle(s):\n'
1018 for node, cycle in nclist:
1020 desc = desc + " " + " -> ".join(map(str, cycle)) + "\n"
1023 " Internal Error: no cycle found for node %s (%s) in state %s\n" % \
1024 (node, repr(node), StateString[node.get_state()])
1026 raise SCons.Errors.UserError(desc)
1030 # indent-tabs-mode:nil
1032 # vim: set expandtab tabstop=4 shiftwidth=4: