summaryrefslogtreecommitdiffstats
path: root/tokio/src/sync/tests
diff options
context:
space:
mode:
authorEliza Weisman <eliza@buoyant.io>2020-03-27 16:14:07 -0700
committerGitHub <noreply@github.com>2020-03-27 16:14:07 -0700
commit00725f6876821f2ec5246a807563e35c5e53f3e1 (patch)
tree690d5f18d3c7c84489a4e25ce51986232842fb2a /tokio/src/sync/tests
parent5c71268bb88a1125e822f5a0a68ff996f6811736 (diff)
sync: fix possible dangling pointer in semaphore (#2340)
## Motivation When cancelling futures which are waiting to acquire semaphore permits, there is a possible dangling pointer if notified futures are dropped after the notified wakers have been split into a separate list. Because these futures' wait queue nodes are no longer in the main list guarded by the lock, their `Drop` impls will complete immediately, and they may be dropped while still in the list of tasks to notify. ## Solution This branch fixes this by popping from the wait list inside the lock. The wakers of popped nodes are temporarily stored in a stack array, so that they can be notified after the lock is released. Since the size of the stack array is fixed, we may in some cases have to loop multiple times, acquiring and releasing the lock, until all permits have been released. This may also have the possible side advantage of preventing a thread releasing a very large number of permits from starving other threads that need to enqueue waiters. I've also added a loom test that can reliably reproduce a segfault on master, but passes on this branch (after a lot of iterations). Signed-off-by: Eliza Weisman <eliza@buoyant.io>
Diffstat (limited to 'tokio/src/sync/tests')
-rw-r--r--tokio/src/sync/tests/loom_semaphore_batch.rs44
1 files changed, 44 insertions, 0 deletions
diff --git a/tokio/src/sync/tests/loom_semaphore_batch.rs b/tokio/src/sync/tests/loom_semaphore_batch.rs
index 4c1936c5..76a1bc00 100644
--- a/tokio/src/sync/tests/loom_semaphore_batch.rs
+++ b/tokio/src/sync/tests/loom_semaphore_batch.rs
@@ -114,6 +114,50 @@ fn concurrent_close() {
}
#[test]
+fn concurrent_cancel() {
+ async fn poll_and_cancel(semaphore: Arc<Semaphore>) {
+ let mut acquire1 = Some(semaphore.acquire(1));
+ let mut acquire2 = Some(semaphore.acquire(1));
+ poll_fn(|cx| {
+ // poll the acquire future once, and then immediately throw
+ // it away. this simulates a situation where a future is
+ // polled and then cancelled, such as by a timeout.
+ if let Some(acquire) = acquire1.take() {
+ pin!(acquire);
+ let _ = acquire.poll(cx);
+ }
+ if let Some(acquire) = acquire2.take() {
+ pin!(acquire);
+ let _ = acquire.poll(cx);
+ }
+ Poll::Ready(())
+ })
+ .await
+ }
+
+ loom::model(|| {
+ let semaphore = Arc::new(Semaphore::new(0));
+ let t1 = {
+ let semaphore = semaphore.clone();
+ thread::spawn(move || block_on(poll_and_cancel(semaphore)))
+ };
+ let t2 = {
+ let semaphore = semaphore.clone();
+ thread::spawn(move || block_on(poll_and_cancel(semaphore)))
+ };
+ let t3 = {
+ let semaphore = semaphore.clone();
+ thread::spawn(move || block_on(poll_and_cancel(semaphore)))
+ };
+
+ t1.join().unwrap();
+ semaphore.release(10);
+ t2.join().unwrap();
+ t3.join().unwrap();
+ });
+}
+
+#[test]
fn batch() {
let mut b = loom::model::Builder::new();
b.preemption_bound = Some(1);