2 # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
12 # The above copyright notice and this permission notice shall be included
13 # in all copies or substantial portions of the Software.
15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
16 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
17 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
19 # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
20 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
21 # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 Generic Taskmaster module for the SCons build engine.
27 This module contains the primary interface(s) between a wrapping user
28 interface and the SCons build engine. There are two key classes here:
31 This is the main engine for walking the dependency graph and
32 calling things to decide what does or doesn't need to be built.
35 This is the base class for allowing a wrapping interface to
36 decide what does or doesn't actually need to be done. The
37 intention is for a wrapping interface to subclass this as
38 appropriate for different types of behavior it may need.
40 The canonical example is the SCons native Python interface,
41 which has Task subclasses that handle its specific behavior,
42 like printing "`foo' is up to date" when a top-level target
43 doesn't need to be built, and handling the -c option by removing
44 targets as its "build" action. There is also a separate subclass
45 for suppressing this output when the -q option is used.
47 The Taskmaster instantiates a Task object for each (set of)
48 target(s) that it decides need to be evaluated and/or built.
51 __revision__ = "src/engine/SCons/Taskmaster.py 3842 2008/12/20 22:59:52 scons"
53 from itertools import chain
62 StateString = SCons.Node.StateString
63 NODE_NO_STATE = SCons.Node.no_state
64 NODE_PENDING = SCons.Node.pending
65 NODE_EXECUTING = SCons.Node.executing
66 NODE_UP_TO_DATE = SCons.Node.up_to_date
67 NODE_EXECUTED = SCons.Node.executed
68 NODE_FAILED = SCons.Node.failed
71 # A subsystem for recording stats about how different Nodes are handled by
72 # the main Taskmaster loop. There's no external control here (no need for
73 # a --debug= option); enable it by changing the value of CollectStats.
79 A simple class for holding statistics about the disposition of a
80 Node by the Taskmaster. If we're collecting statistics, each Node
81 processed by the Taskmaster gets one of these attached, in which case
82 the Taskmaster records its decision each time it processes the Node.
83 (Ideally, that's just once per Node.)
87 Instantiates a Taskmaster.Stats object, initializing all
88 appropriate counters to zero.
91 self.already_handled = 0
100 fmt = "%(considered)3d "\
101 "%(already_handled)3d " \
103 "%(child_failed)3d " \
105 "%(side_effects)3d " \
109 StatsNodes.sort(lambda a, b: cmp(str(a), str(b)))
111 print (fmt % n.stats.__dict__) + str(n)
117 Default SCons build engine task.
119 This controls the interaction of the actual building of node
120 and the rest of the engine.
122 This is expected to handle all of the normally-customizable
123 aspects of controlling a build, so any given application
124 *should* be able to do what it wants by sub-classing this
125 class and overriding methods as appropriate. If an application
126 needs to customze something by sub-classing Taskmaster (or
127 some other build engine class), we should first try to migrate
128 that functionality into this class.
130 Note that it's generally a good idea for sub-classes to call
131 these methods explicitly to update state, etc., rather than
132 roll their own interaction with Taskmaster from scratch.
134 def __init__(self, tm, targets, top, node):
136 self.targets = targets
141 def trace_message(self, method, node, description='node'):
142 fmt = '%-20s %s %s\n'
143 return fmt % (method + ':', description, self.tm.trace_node(node))
145 def display(self, message):
147 Hook to allow the calling interface to display a message.
149 This hook gets called as part of preparing a task for execution
150 (that is, a Node to be built). As part of figuring out what Node
151 should be built next, the actually target list may be altered,
152 along with a message describing the alteration. The calling
153 interface can subclass Task and provide a concrete implementation
154 of this method to see those messages.
160 Called just before the task is executed.
162 This is mainly intended to give the target Nodes a chance to
163 unlink underlying files and make all necessary directories before
164 the Action is actually called to build the targets.
167 if T: T.write(self.trace_message('Task.prepare()', self.node))
169 # Now that it's the appropriate time, give the TaskMaster a
170 # chance to raise any exceptions it encountered while preparing
172 self.exception_raise()
175 self.display(self.tm.message)
176 self.tm.message = None
178 # Let the targets take care of any necessary preparations.
179 # This includes verifying that all of the necessary sources
180 # and dependencies exist, removing the target file(s), etc.
182 # As of April 2008, the get_executor().prepare() method makes
183 # sure that all of the aggregate sources necessary to build this
184 # Task's target(s) exist in one up-front check. The individual
185 # target t.prepare() methods check that each target's explicit
186 # or implicit dependencies exists, and also initialize the
188 self.targets[0].get_executor().prepare()
189 for t in self.targets:
191 for s in t.side_effects:
194 def get_target(self):
195 """Fetch the target being built or updated by this task.
199 def needs_execute(self):
201 Called to determine whether the task's execute() method should
204 This method allows one to skip the somethat costly execution
205 of the execute() method in a seperate thread. For example,
206 that would be unnecessary for up-to-date targets.
212 Called to execute the task.
214 This method is called from multiple threads in a parallel build,
215 so only do thread safe stuff here. Do thread unsafe stuff in
216 prepare(), executed() or failed().
219 if T: T.write(self.trace_message('Task.execute()', self.node))
222 everything_was_cached = 1
223 for t in self.targets:
224 if not t.retrieve_from_cache():
225 everything_was_cached = 0
227 if not everything_was_cached:
228 self.targets[0].build()
230 exc_value = sys.exc_info()[1]
231 raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code)
232 except SCons.Errors.UserError:
234 except SCons.Errors.BuildError:
237 buildError = SCons.Errors.convert_to_BuildError(e)
238 buildError.node = self.targets[0]
239 buildError.exc_info = sys.exc_info()
242 def executed_without_callbacks(self):
244 Called when the task has been successfully executed
245 and the Taskmaster instance doesn't want to call
246 the Node's callback methods.
249 if T: T.write(self.trace_message('Task.executed_without_callbacks()',
252 for t in self.targets:
253 if t.get_state() == NODE_EXECUTING:
254 for side_effect in t.side_effects:
255 side_effect.set_state(NODE_NO_STATE)
256 t.set_state(NODE_EXECUTED)
258 def executed_with_callbacks(self):
260 Called when the task has been successfully executed and
261 the Taskmaster instance wants to call the Node's callback
264 This may have been a do-nothing operation (to preserve build
265 order), so we must check the node's state before deciding whether
266 it was "built", in which case we call the appropriate Node method.
267 In any event, we always call "visited()", which will handle any
268 post-visit actions that must take place regardless of whether
269 or not the target was an actual built target or a source Node.
272 if T: T.write(self.trace_message('Task.executed_with_callbacks()',
275 for t in self.targets:
276 if t.get_state() == NODE_EXECUTING:
277 for side_effect in t.side_effects:
278 side_effect.set_state(NODE_NO_STATE)
279 t.set_state(NODE_EXECUTED)
283 executed = executed_with_callbacks
287 Default action when a task fails: stop the build.
289 Note: Although this function is normally invoked on nodes in
290 the executing state, it might also be invoked on up-to-date
291 nodes when using Configure().
297 Explicit stop-the-build failure.
299 This sets failure status on the target nodes and all of
300 their dependent parent nodes.
302 Note: Although this function is normally invoked on nodes in
303 the executing state, it might also be invoked on up-to-date
304 nodes when using Configure().
307 if T: T.write(self.trace_message('Task.failed_stop()', self.node))
309 # Invoke will_not_build() to clean-up the pending children
311 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
313 # Tell the taskmaster to not start any new tasks
316 # We're stopping because of a build failure, but give the
317 # calling Task class a chance to postprocess() the top-level
318 # target under which the build failure occurred.
319 self.targets = [self.tm.current_top]
322 def fail_continue(self):
324 Explicit continue-the-build failure.
326 This sets failure status on the target nodes and all of
327 their dependent parent nodes.
329 Note: Although this function is normally invoked on nodes in
330 the executing state, it might also be invoked on up-to-date
331 nodes when using Configure().
334 if T: T.write(self.trace_message('Task.failed_continue()', self.node))
336 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
338 def make_ready_all(self):
340 Marks all targets in a task ready for execution.
342 This is used when the interface needs every target Node to be
343 visited--the canonical example being the "scons -c" option.
346 if T: T.write(self.trace_message('Task.make_ready_all()', self.node))
348 self.out_of_date = self.targets[:]
349 for t in self.targets:
350 t.disambiguate().set_state(NODE_EXECUTING)
351 for s in t.side_effects:
352 s.set_state(NODE_EXECUTING)
354 def make_ready_current(self):
356 Marks all targets in a task ready for execution if any target
359 This is the default behavior for building only what's necessary.
362 if T: T.write(self.trace_message('Task.make_ready_current()',
365 self.out_of_date = []
366 needs_executing = False
367 for t in self.targets:
369 t.disambiguate().make_ready()
370 is_up_to_date = not t.has_builder() or \
371 (not t.always_build and t.is_up_to_date())
372 except EnvironmentError, e:
373 raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename)
375 if not is_up_to_date:
376 self.out_of_date.append(t)
377 needs_executing = True
380 for t in self.targets:
381 t.set_state(NODE_EXECUTING)
382 for s in t.side_effects:
383 s.set_state(NODE_EXECUTING)
385 for t in self.targets:
386 # We must invoke visited() to ensure that the node
387 # information has been computed before allowing the
388 # parent nodes to execute. (That could occur in a
391 t.set_state(NODE_UP_TO_DATE)
393 make_ready = make_ready_current
395 def postprocess(self):
397 Post-processes a task after it's been executed.
399 This examines all the targets just built (or not, we don't care
400 if the build was successful, or even if there was no build
401 because everything was up-to-date) to see if they have any
402 waiting parent Nodes, or Nodes waiting on a common side effect,
403 that can be put back on the candidates list.
406 if T: T.write(self.trace_message('Task.postprocess()', self.node))
408 # We may have built multiple targets, some of which may have
409 # common parents waiting for this build. Count up how many
410 # targets each parent was waiting for so we can subtract the
411 # values later, and so we *don't* put waiting side-effect Nodes
412 # back on the candidates list if the Node is also a waiting
415 targets = set(self.targets)
417 pending_children = self.tm.pending_children
420 # A node can only be in the pending_children set if it has
421 # some waiting_parents.
422 if t.waiting_parents:
423 if T: T.write(self.trace_message('Task.postprocess()',
426 pending_children.discard(t)
427 for p in t.waiting_parents:
428 parents[p] = parents.get(p, 0) + 1
431 for s in t.side_effects:
432 if s.get_state() == NODE_EXECUTING:
433 s.set_state(NODE_NO_STATE)
434 for p in s.waiting_parents:
435 parents[p] = parents.get(p, 0) + 1
436 for p in s.waiting_s_e:
438 self.tm.candidates.append(p)
440 for p, subtract in parents.items():
441 p.ref_count = p.ref_count - subtract
442 if T: T.write(self.trace_message('Task.postprocess()',
444 'adjusted parent ref count'))
446 self.tm.candidates.append(p)
451 # Exception handling subsystem.
453 # Exceptions that occur while walking the DAG or examining Nodes
454 # must be raised, but must be raised at an appropriate time and in
455 # a controlled manner so we can, if necessary, recover gracefully,
456 # possibly write out signature information for Nodes we've updated,
457 # etc. This is done by having the Taskmaster tell us about the
458 # exception, and letting
462 Returns info about a recorded exception.
464 return self.exception
468 Clears any recorded exception.
470 This also changes the "exception_raise" attribute to point
471 to the appropriate do-nothing method.
473 self.exception = (None, None, None)
474 self.exception_raise = self._no_exception_to_raise
476 def exception_set(self, exception=None):
478 Records an exception to be raised at the appropriate time.
480 This also changes the "exception_raise" attribute to point
481 to the method that will, in fact
484 exception = sys.exc_info()
485 self.exception = exception
486 self.exception_raise = self._exception_raise
488 def _no_exception_to_raise(self):
491 def _exception_raise(self):
493 Raises a pending exception that was recorded while getting a
494 Task ready for execution.
496 exc = self.exc_info()[:]
498 exc_type, exc_value, exc_traceback = exc
500 exc_type, exc_value = exc
502 raise exc_type, exc_value, exc_traceback
505 def find_cycle(stack, visited):
506 if stack[-1] in visited:
508 visited.add(stack[-1])
509 for n in stack[-1].waiting_parents:
511 if stack[0] == stack[-1]:
513 if find_cycle(stack, visited):
521 The Taskmaster for walking the dependency DAG.
524 def __init__(self, targets=[], tasker=Task, order=None, trace=None):
525 self.original_top = targets
526 self.top_targets_left = targets[:]
527 self.top_targets_left.reverse()
535 self.next_candidate = self.find_next_candidate
536 self.pending_children = set()
538 def find_next_candidate(self):
540 Returns the next candidate Node for (potential) evaluation.
542 The candidate list (really a stack) initially consists of all of
543 the top-level (command line) targets provided when the Taskmaster
544 was initialized. While we walk the DAG, visiting Nodes, all the
545 children that haven't finished processing get pushed on to the
546 candidate list. Each child can then be popped and examined in
547 turn for whether *their* children are all up-to-date, in which
548 case a Task will be created for their actual evaluation and
551 Here is where we also allow candidate Nodes to alter the list of
552 Nodes that should be examined. This is used, for example, when
553 invoking SCons in a source directory. A source directory Node can
554 return its corresponding build directory Node, essentially saying,
555 "Hey, you really need to build this thing over here instead."
558 return self.candidates.pop()
562 node = self.top_targets_left.pop()
565 self.current_top = node
566 alt, message = node.alter_targets()
568 self.message = message
569 self.candidates.append(node)
570 self.candidates.extend(self.order(alt))
571 node = self.candidates.pop()
574 def no_next_candidate(self):
576 Stops Taskmaster processing by not returning a next candidate.
578 Note that we have to clean-up the Taskmaster candidate list
579 because the cycle detection depends on the fact all nodes have
580 been processed somehow.
582 while self.candidates:
583 candidates = self.candidates
585 self.will_not_build(candidates)
588 def _validate_pending_children(self):
590 Validate the content of the pending_children set. Assert if an
591 internal error is found.
593 This function is used strictly for debugging the taskmaster by
594 checking that no invariants are violated. It is not used in
597 The pending_children set is used to detect cycles in the
598 dependency graph. We call a "pending child" a child that is
599 found in the "pending" state when checking the dependencies of
602 A pending child can occur when the Taskmaster completes a loop
603 through a cycle. For example, lets imagine a graph made of
604 three node (A, B and C) making a cycle. The evaluation starts
605 at node A. The taskmaster first consider whether node A's
606 child B is up-to-date. Then, recursively, node B needs to
607 check whether node C is up-to-date. This leaves us with a
608 dependency graph looking like:
612 Node A (Pending) --> Node B(Pending) --> Node C (NoState)
615 +-------------------------------------+
617 Now, when the Taskmaster examines the Node C's child Node A,
618 it finds that Node A is in the "pending" state. Therefore,
619 Node A is a pending child of node C.
621 Pending children indicate that the Taskmaster has potentially
622 loop back through a cycle. We say potentially because it could
623 also occur when a DAG is evaluated in parallel. For example,
624 consider the following graph:
627 Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ...
630 +----------> Node D (NoState) --------+
634 The Taskmaster first evaluates the nodes A, B, and C and
635 starts building some children of node C. Assuming, that the
636 maximum parallel level has not been reached, the Taskmaster
637 will examine Node D. It will find that Node C is a pending
640 In summary, evaluating a graph with a cycle will always
641 involve a pending child at one point. A pending child might
642 indicate either a cycle or a diamond-shaped DAG. Only a
643 fraction of the nodes ends-up being a "pending child" of
644 another node. This keeps the pending_children set small in
647 We can differentiate between the two cases if we wait until
648 the end of the build. At this point, all the pending children
649 nodes due to a diamond-shaped DAG will have been properly
650 built (or will have failed to build). But, the pending
651 children involved in a cycle will still be in the pending
654 The taskmaster removes nodes from the pending_children set as
655 soon as a pending_children node moves out of the pending
656 state. This also helps to keep the pending_children set small.
659 for n in self.pending_children:
660 assert n.state in (NODE_PENDING, NODE_EXECUTING), \
661 (str(n), StateString[n.state])
662 assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents))
663 for p in n.waiting_parents:
664 assert p.ref_count > 0, (str(n), str(p), p.ref_count)
667 def trace_message(self, message):
668 return 'Taskmaster: %s\n' % message
670 def trace_node(self, node):
671 return '<%-10s %-3s %s>' % (StateString[node.get_state()],
675 def _find_next_ready_node(self):
677 Finds the next node that is ready to be built.
679 This is *the* main guts of the DAG walk. We loop through the
680 list of candidates, looking for something that has no un-built
681 children (i.e., that is a leaf Node or has dependencies that are
682 all leaf Nodes or up-to-date). Candidate Nodes are re-scanned
683 (both the target Node itself and its sources, which are always
684 scanned in the context of a given target) to discover implicit
685 dependencies. A Node that must wait for some children to be
686 built will be put back on the candidates list after the children
687 have finished building. A Node that has been put back on the
688 candidates list in this way may have itself (or its sources)
689 re-scanned, in order to handle generated header files (e.g.) and
690 the implicit dependencies therein.
692 Note that this method does not do any signature calculation or
693 up-to-date check itself. All of that is handled by the Task
694 class. This is purely concerned with the dependency graph walk.
697 self.ready_exc = None
700 if T: T.write('\n' + self.trace_message('Looking for a node to evaluate'))
703 node = self.next_candidate()
705 if T: T.write(self.trace_message('No candidate anymore.') + '\n')
708 node = node.disambiguate()
709 state = node.get_state()
711 # For debugging only:
714 # self._validate_pending_children()
716 # self.ready_exc = sys.exc_info()
720 if not hasattr(node, 'stats'):
722 StatsNodes.append(node)
724 S.considered = S.considered + 1
728 if T: T.write(self.trace_message(' Considering node %s and its children:' % self.trace_node(node)))
730 if state == NODE_NO_STATE:
731 # Mark this node as being on the execution stack:
732 node.set_state(NODE_PENDING)
733 elif state > NODE_PENDING:
734 # Skip this node if it has already been evaluated:
735 if S: S.already_handled = S.already_handled + 1
736 if T: T.write(self.trace_message(' already handled (executed)'))
740 children = node.children()
742 exc_value = sys.exc_info()[1]
743 e = SCons.Errors.ExplicitExit(node, exc_value.code)
744 self.ready_exc = (SCons.Errors.ExplicitExit, e)
745 if T: T.write(self.trace_message(' SystemExit'))
748 # We had a problem just trying to figure out the
749 # children (like a child couldn't be linked in to a
750 # VariantDir, or a Scanner threw something). Arrange to
751 # raise the exception when the Task is "executed."
752 self.ready_exc = sys.exc_info()
753 if S: S.problem = S.problem + 1
754 if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e))
757 children_not_visited = []
758 children_pending = set()
759 children_not_ready = []
760 children_failed = False
762 for child in chain(children,node.prerequisites):
763 childstate = child.get_state()
765 if T: T.write(self.trace_message(' ' + self.trace_node(child)))
767 if childstate == NODE_NO_STATE:
768 children_not_visited.append(child)
769 elif childstate == NODE_PENDING:
770 children_pending.add(child)
771 elif childstate == NODE_FAILED:
772 children_failed = True
774 if childstate <= NODE_EXECUTING:
775 children_not_ready.append(child)
778 # These nodes have not even been visited yet. Add
779 # them to the list so that on some next pass we can
780 # take a stab at evaluating them (or their children).
781 children_not_visited.reverse()
782 self.candidates.extend(self.order(children_not_visited))
783 #if T and children_not_visited:
784 # T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited)))
785 # T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates)))
787 # Skip this node if any of its children have failed.
789 # This catches the case where we're descending a top-level
790 # target and one of our children failed while trying to be
791 # built by a *previous* descent of an earlier top-level
794 # It can also occur if a node is reused in multiple
795 # targets. One first descends though the one of the
796 # target, the next time occurs through the other target.
798 # Note that we can only have failed_children if the
799 # --keep-going flag was used, because without it the build
800 # will stop before diving in the other branch.
802 # Note that even if one of the children fails, we still
803 # added the other children to the list of candidate nodes
804 # to keep on building (--keep-going).
806 node.set_state(NODE_FAILED)
808 if S: S.child_failed = S.child_failed + 1
809 if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node)))
812 if children_not_ready:
813 for child in children_not_ready:
814 # We're waiting on one or more derived targets
815 # that have not yet finished building.
816 if S: S.not_built = S.not_built + 1
818 # Add this node to the waiting parents lists of
819 # anything we're waiting on, with a reference
820 # count so we can be put back on the list for
821 # re-evaluation when they've all finished.
822 node.ref_count = node.ref_count + child.add_to_waiting_parents(node)
823 if T: T.write(self.trace_message(' adjusted ref count: %s, child %s' %
824 (self.trace_node(node), repr(str(child)))))
827 for pc in children_pending:
828 T.write(self.trace_message(' adding %s to the pending children set\n' %
829 self.trace_node(pc)))
830 self.pending_children = self.pending_children | children_pending
834 # Skip this node if it has side-effects that are
835 # currently being built:
836 wait_side_effects = False
837 for se in node.side_effects:
838 if se.get_state() == NODE_EXECUTING:
839 se.add_to_waiting_s_e(node)
840 wait_side_effects = True
842 if wait_side_effects:
843 if S: S.side_effects = S.side_effects + 1
846 # The default when we've gotten through all of the checks above:
847 # this node is ready to be built.
848 if S: S.build = S.build + 1
849 if T: T.write(self.trace_message('Evaluating %s\n' %
850 self.trace_node(node)))
852 # For debugging only:
855 # self._validate_pending_children()
857 # self.ready_exc = sys.exc_info()
866 Returns the next task to be executed.
868 This simply asks for the next Node to be evaluated, and then wraps
869 it in the specific Task subclass with which we were initialized.
871 node = self._find_next_ready_node()
876 tlist = node.get_executor().targets
878 task = self.tasker(self, tlist, node in self.original_top, node)
882 # We had a problem just trying to get this task ready (like
883 # a child couldn't be linked in to a VariantDir when deciding
884 # whether this node is current). Arrange to raise the
885 # exception when the Task is "executed."
886 self.ready_exc = sys.exc_info()
889 task.exception_set(self.ready_exc)
891 self.ready_exc = None
895 def will_not_build(self, nodes, node_func=lambda n: None):
897 Perform clean-up about nodes that will never be built. Invokes
898 a user defined function on all of these nodes (including all
904 pending_children = self.pending_children
906 to_visit = set(nodes)
907 pending_children = pending_children - to_visit
911 T.write(self.trace_message(' removing node %s from the pending children set\n' %
916 node = to_visit.pop()
917 except AttributeError:
921 to_visit.remove(node)
927 # Prune recursion by flushing the waiting children
929 parents = node.waiting_parents
930 node.waiting_parents = set()
932 to_visit = to_visit | parents
933 pending_children = pending_children - parents
936 p.ref_count = p.ref_count - 1
937 if T: T.write(self.trace_message(' removing parent %s from the pending children set\n' %
940 # The container to_visit has been emptied.
943 # We have the stick back the pending_children list into the
944 # task master because the python 1.5.2 compatibility does not
945 # allow us to use in-place updates
946 self.pending_children = pending_children
950 Stops the current build completely.
952 self.next_candidate = self.no_next_candidate
956 Check for dependency cycles.
958 if not self.pending_children:
962 #nclist = [ (n, find_cycle([n], set())) for n in self.pending_children ]
963 nclist = map(lambda n: (n, find_cycle([n], set())), self.pending_children)
967 # node for node, cycle in nclist
968 # if cycle or node.get_state() != NODE_EXECUTED
970 genuine_cycles = filter(lambda t: t[1] or t[0].get_state() != NODE_EXECUTED, nclist)
971 if not genuine_cycles:
972 # All of the "cycles" found were single nodes in EXECUTED state,
973 # which is to say, they really weren't cycles. Just return.
976 desc = 'Found dependency cycle(s):\n'
977 for node, cycle in nclist:
979 desc = desc + " " + string.join(map(str, cycle), " -> ") + "\n"
982 " Internal Error: no cycle found for node %s (%s) in state %s\n" % \
983 (node, repr(node), StateString[node.get_state()])
985 raise SCons.Errors.UserError, desc