summaryrefslogtreecommitdiffstats
path: root/tokio/src/runtime/thread_pool
diff options
context:
space:
mode:
authorCarl Lerche <me@carllerche.com>2020-01-08 21:23:10 -0800
committerGitHub <noreply@github.com>2020-01-08 21:23:10 -0800
commit6406328176cdecf15cad69b327597a4d4d0b8e20 (patch)
treeb802f2ac711188d4338169d7ff175bf917147126 /tokio/src/runtime/thread_pool
parentf28c9f0d17a4dca2003bbee57a09f62c3795c2d2 (diff)
rt: fix threaded scheduler shutdown deadlock (#2074)
Previously, if an IO event was received during the runtime shutdown process, it was possible to enter a deadlock. This was due to the scheduler shutdown logic not expecting tasks to get scheduled once the worker was in the shutdown process. This patch fixes the deadlock by checking the queues for new tasks after each call to park. If a new task is received, it is forcefully shutdown. Fixes #2061
Diffstat (limited to 'tokio/src/runtime/thread_pool')
-rw-r--r--tokio/src/runtime/thread_pool/worker.rs40
1 files changed, 27 insertions, 13 deletions
diff --git a/tokio/src/runtime/thread_pool/worker.rs b/tokio/src/runtime/thread_pool/worker.rs
index 44df8b74..298ef06d 100644
--- a/tokio/src/runtime/thread_pool/worker.rs
+++ b/tokio/src/runtime/thread_pool/worker.rs
@@ -533,27 +533,41 @@ impl GenerationGuard<'_> {
// Transition all tasks owned by the worker to canceled.
self.owned().owned_tasks.shutdown();
- // First, drain all tasks from both the local & global queue.
- while let Some(task) = self.owned().work_queue.pop_local_first() {
- task.shutdown();
- }
-
- // Notify all workers in case they have pending tasks to drop
- //
- // Not super efficient, but we are also shutting down.
- self.worker.slices.notify_all();
+ // Always notify the first time around. This flushes any released tasks
+ // that happened before the call to `Worker::shutdown`
+ let mut notify = true;
// The worker can only shutdown once there are no further owned tasks.
- while !self.owned().owned_tasks.is_empty() {
+ loop {
+ // First, drain all tasks from both the local & global queue.
+ while let Some(task) = self.owned().work_queue.pop_local_first() {
+ notify = true;
+ task.shutdown();
+ }
+
+ if notify {
+ // If any tasks are shutdown, they may be pushed on another
+ // worker's `pending_drop` stack. However, we don't know which
+ // workers need to be notified, so we just notify all of them.
+ // Since this is a shutdown process, excessive notification is
+ // not a huge deal.
+ self.worker.slices.notify_all();
+ notify = false;
+ }
+
+ // Try draining more tasks
+ self.drain_tasks_pending_drop();
+
+ if self.owned().owned_tasks.is_empty() {
+ break;
+ }
+
// Wait until task that this worker owns are released.
//
// `transition_to_parked` is not called as we are not working
// anymore. When a task is released, the owning worker is unparked
// directly.
self.park_mut().park().expect("park failed");
-
- // Try draining more tasks
- self.drain_tasks_pending_drop();
}
}