summaryrefslogtreecommitdiffstats
path: root/tokio/src/executor/thread_pool/queue/global.rs
blob: 6bcd621623bbe8c3008c2f07f9f5383226b2ba1c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
use crate::executor::loom::sync::atomic::AtomicUsize;
use crate::executor::loom::sync::Mutex;
use crate::executor::task::{Header, Task};

use std::marker::PhantomData;
use std::ptr::{self, NonNull};
use std::sync::atomic::Ordering::{Acquire, Release};
use std::usize;

pub(super) struct Queue<T: 'static> {
    /// Pointers to the head and tail of the queue
    pointers: Mutex<Pointers>,

    /// Number of pending tasks in the queue. This helps prevent unnecessary
    /// locking in the hot path.
    ///
    /// The LSB is a flag tracking whether or not the queue is open or not.
    len: AtomicUsize,

    _p: PhantomData<T>,
}

struct Pointers {
    head: *const Header,
    tail: *const Header,
}

const CLOSED: usize = 1;
const MAX_LEN: usize = usize::MAX >> 1;

impl<T: 'static> Queue<T> {
    pub(super) fn new() -> Queue<T> {
        Queue {
            pointers: Mutex::new(Pointers {
                head: ptr::null(),
                tail: ptr::null(),
            }),
            len: AtomicUsize::new(0),
            _p: PhantomData,
        }
    }

    pub(super) fn is_empty(&self) -> bool {
        self.len() == 0
    }

    pub(super) fn is_closed(&self) -> bool {
        self.len.load(Acquire) & CLOSED == CLOSED
    }

    /// Close the worker queue
    pub(super) fn close(&self) -> bool {
        // Acquire the lock
        let _p = self.pointers.lock().unwrap();

        let len = unsafe {
            // Set the queue as closed. Because all mutations are synchronized by
            // the mutex, a read followed by a write is acceptable.
            self.len.unsync_load()
        };

        let ret = len & CLOSED == 0;

        self.len.store(len | CLOSED, Release);

        ret
    }

    fn len(&self) -> usize {
        self.len.load(Acquire) >> 1
    }

    pub(super) fn wait_for_unlocked(&self) {
        // Acquire and release the lock immediately. This synchronizes the
        // caller **after** all external waiters are done w/ the scheduler
        // struct.
        drop(self.pointers.lock().unwrap());
    }

    /// Push a value into the queue and call the closure **while still holding
    /// the push lock**
    pub(super) fn push<F>(&self, task: Task<T>, f: F)
    where
        F: FnOnce(Result<(), Task<T>>),
    {
        unsafe {
            // Acquire queue lock
            let mut p = self.pointers.lock().unwrap();

            // Check if the queue is closed. This must happen in the lock.
            let len = self.len.unsync_load();
            if len & CLOSED == CLOSED {
                f(Err(task));
                return;
            }

            let task = task.into_raw();

            // The next pointer should already be null
            debug_assert!(get_next(task).is_null());

            if let Some(tail) = NonNull::new(p.tail as *mut _) {
                set_next(tail, task.as_ptr());
            } else {
                p.head = task.as_ptr();
            }

            p.tail = task.as_ptr();

            // Increment the count.
            //
            // All updates to the len atomic are guarded by the mutex. As such,
            // a non-atomic load followed by a store is safe.
            //
            // We increment by 2 to avoid touching the shutdown flag
            if (len >> 1) == MAX_LEN {
                eprintln!("[ERROR] overflowed task counter. This is a bug and should be reported.");
                std::process::abort();
            }

            self.len.store(len + 2, Release);
            f(Ok(()));
        }
    }

    pub(super) fn push_batch(&self, batch_head: Task<T>, batch_tail: Task<T>, num: usize) {
        unsafe {
            let batch_head = batch_head.into_raw().as_ptr();
            let batch_tail = batch_tail.into_raw();

            debug_assert!(get_next(batch_tail).is_null());

            let mut p = self.pointers.lock().unwrap();

            if let Some(tail) = NonNull::new(p.tail as *mut _) {
                set_next(tail, batch_head);
            } else {
                p.head = batch_head;
            }

            p.tail = batch_tail.as_ptr();

            // Increment the count.
            //
            // All updates to the len atomic are guarded by the mutex. As such,
            // a non-atomic load followed by a store is safe.
            //
            // Left shift by 1 to avoid touching the shutdown flag.
            let len = self.len.unsync_load();

            if (len >> 1) >= (MAX_LEN - num) {
                std::process::abort();
            }

            self.len.store(len + (num << 1), Release);
        }
    }

    pub(super) fn pop(&self) -> Option<Task<T>> {
        // Fast path, if len == 0, then there are no values
        if self.is_empty() {
            return None;
        }

        unsafe {
            let mut p = self.pointers.lock().unwrap();

            // It is possible to hit null here if another thread poped the last
            // task between us checking `len` and acquiring the lock.
            let task = NonNull::new(p.head as *mut _)?;

            p.head = get_next(task);

            if p.head.is_null() {
                p.tail = ptr::null();
            }

            set_next(task, ptr::null());

            // Decrement the count.
            //
            // All updates to the len atomic are guarded by the mutex. As such,
            // a non-atomic load followed by a store is safe.
            //
            // Decrement by 2 to avoid touching the shutdown flag
            self.len.store(self.len.unsync_load() - 2, Release);

            Some(Task::from_raw(task))
        }
    }
}

unsafe fn get_next(meta: NonNull<Header>) -> *const Header {
    *meta.as_ref().queue_next.get()
}

unsafe fn set_next(meta: NonNull<Header>, val: *const Header) {
    *meta.as_ref().queue_next.get() = val;
}