# of time, kill it (solves bug #278895). We try to avoid
# this when possible since it makes sandbox complain about
# being killed by a signal.
- self.cancelled = True
- self._cancel()
+ self.cancel()
self._exit_timeout_id = \
self.scheduler.timeout_add(self._cancel_timeout,
self._cancel_timeout_cb)
self._log_poll_exception(event)
self._unregister()
self.cancel()
+ self.wait()
elif event & self.scheduler.IO_HUP:
self._unregister()
self.wait()
return self.returncode
def cancel(self):
+ """
+ Cancel the task, but do not wait for exit status. If asynchronous exit
+ notification is desired, then use addExitListener to add a listener
+ before calling this method.
+ NOTE: Synchronous waiting for status is not supported, since it would
+ be vulnerable to hitting the recursion limit when a large number of
+ tasks need to be terminated simultaneously, like in bug #402335.
+ """
if not self.cancelled:
self.cancelled = True
self._cancel()
- self.wait()
def _cancel(self):
"""
self._remaining_tasks = True
def _terminate_tasks(self):
- while self._running_tasks:
- self._running_tasks.pop().cancel()
+ for task in list(self._running_tasks):
+ task.cancel()
def _iter_every_cp(self):
portage.writemsg_stdout("Listing available packages...\n")
implementation is running, in order to avoid potential
interference. All tasks should be cleaned up at the earliest
opportunity, but not necessarily before this method returns.
+ Typically, this method will send kill signals and return without
+ waiting for exit status. This allows basic cleanup to occur, such as
+ flushing of buffered output to logs.
"""
raise NotImplementedError()
def _terminate_tasks(self):
self._status_display.quiet = True
- while self._running_tasks:
- task_id, task = self._running_tasks.popitem()
+ for task in list(self._running_tasks.values()):
task.cancel()
for q in self._task_queues.values():
q.clear()
finally:
if current_task is not None and current_task.isAlive():
current_task.cancel()
+ current_task.wait()
clean_phase = EbuildPhase(background=False,
phase='clean', scheduler=sched_iface, settings=settings)
clean_phase.start()
self.schedule()
def clear(self):
+ """
+ Clear the task queue and asynchronously terminate any running tasks.
+ """
self._task_queue.clear()
- running_tasks = self.running_tasks
- while running_tasks:
- task = running_tasks.pop()
- task.removeExitListener(self._task_exit)
+ for task in list(self.running_tasks):
task.cancel()
+ def wait(self):
+ """
+ Synchronously wait for all running tasks to exit.
+ """
+ while self.running_tasks:
+ next(iter(self.running_tasks)).wait()
+
def __bool__(self):
return bool(self._task_queue or self.running_tasks)
self.sched_iface = self._scheduler.sched_iface
self.run = self._scheduler.run
self.clear = self._scheduler.clear
+ self.wait = self._queue.wait
self._scheduler.add(self._queue)
def add(self, task):
self.received_command = False
def exit_command_callback():
self.received_command = True
- proc.cancel()
- daemon.cancel()
+ task_scheduler.clear()
+ task_scheduler.wait()
exit_command.reply_hook = exit_command_callback
start_time = time.time()
task_scheduler.add(proc)
task_scheduler.run(timeout=self._SCHEDULE_TIMEOUT)
task_scheduler.clear()
+ task_scheduler.wait()
hardlock_cleanup(env['PORTAGE_BUILDDIR'],
remove_all_locks=True)
self.received_command = False
def exit_command_callback():
self.received_command = True
- proc.cancel()
- daemon.cancel()
+ task_scheduler.clear()
+ task_scheduler.wait()
exit_command.reply_hook = exit_command_callback
start_time = time.time()
task_scheduler.add(proc)
task_scheduler.run(timeout=short_timeout_ms)
task_scheduler.clear()
+ task_scheduler.wait()
hardlock_cleanup(env['PORTAGE_BUILDDIR'],
remove_all_locks=True)