From a0557840eb424e174bf81a0175c40f9e176a2cc2 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 23 Sep 2020 13:02:15 -0700 Subject: io: use intrusive wait list for I/O driver (#2828) This refactors I/O registration in a few ways: - Cleans up the cached readiness in `PollEvented`. This cache used to be helpful when readiness was a linked list of `*mut Node`s in `Registration`. Previous refactors have turned `Registration` into just an `AtomicUsize` holding the current readiness, so the cache is just extra work and complexity. Gone. - Polling the `Registration` for readiness now gives a `ReadyEvent`, which includes the driver tick. This event must be passed back into `clear_readiness`, so that the readiness is only cleared from `Registration` if the tick hasn't changed. Previously, it was possible to clear the readiness even though another thread had *just* polled the driver and found the socket ready again. - Registration now also contains an `async fn readiness`, which stores wakers in an instrusive linked list. This allows an unbounded number of tasks to register for readiness (previously, only 1 per direction (read and write)). By using the intrusive linked list, there is no concern of leaking the storage of the wakers, since they are stored inside the `async fn` and released when the future is dropped. - Registration retains a `poll_readiness(Direction)` method, to support `AsyncRead` and `AsyncWrite`. They aren't able to use `async fn`s, and so there are 2 reserved slots for those methods. - IO types where it makes sense to have multiple tasks waiting on them now take advantage of this new `async fn readiness`, such as `UdpSocket` and `UnixDatagram`. Additionally, this makes the `io-driver` "feature" internal-only (no longer documented, not part of public API), and adds a second internal-only feature, `io-readiness`, to group together linked list part of registration that is only used by some of the IO types. After a bit of discussion, changing stream-based transports (like `TcpStream`) to have `async fn read(&self)` is punted, since that is likely too easy of a footgun to activate. Refs: #2779, #2728 --- .github/workflows/ci.yml | 4 +- examples/connect.rs | 8 +- examples/echo-udp.rs | 2 +- examples/udp-client.rs | 2 +- examples/udp-codec.rs | 6 + tokio-util/Cargo.toml | 3 +- tokio-util/src/cfg.rs | 2 + tokio-util/src/lib.rs | 5 + tokio-util/tests/udp.rs | 2 + tokio/Cargo.toml | 8 +- tokio/src/io/driver/mod.rs | 57 ++-- tokio/src/io/driver/scheduled_io.rs | 410 +++++++++++++++++++++++++++-- tokio/src/io/mod.rs | 4 +- tokio/src/io/poll_evented.rs | 295 +++++++-------------- tokio/src/io/registration.rs | 237 ++--------------- tokio/src/lib.rs | 4 +- tokio/src/macros/cfg.rs | 10 +- tokio/src/net/tcp/listener.rs | 20 +- tokio/src/net/tcp/stream.rs | 254 +++++++++--------- tokio/src/net/udp/mod.rs | 4 - tokio/src/net/udp/socket.rs | 116 ++------ tokio/src/net/udp/split.rs | 148 ----------- tokio/src/net/unix/datagram/mod.rs | 5 - tokio/src/net/unix/datagram/socket.rs | 227 +++------------- tokio/src/net/unix/datagram/split.rs | 68 ----- tokio/src/net/unix/datagram/split_owned.rs | 148 ----------- tokio/src/net/unix/listener.rs | 30 +-- tokio/src/net/unix/stream.rs | 54 ++-- tokio/src/signal/unix/driver.rs | 43 +-- tokio/src/sync/task/atomic_waker.rs | 5 +- tokio/src/util/bit.rs | 2 +- tokio/src/util/linked_list.rs | 54 ++++ tokio/src/util/mod.rs | 2 +- tokio/src/util/slab.rs | 2 + tokio/tests/async_send_sync.rs | 4 - tokio/tests/rt_common.rs | 4 +- tokio/tests/udp.rs | 34 +-- tokio/tests/uds_datagram.rs | 34 +-- 38 files changed, 888 insertions(+), 1429 deletions(-) delete mode 100644 tokio/src/net/udp/split.rs delete mode 100644 tokio/src/net/unix/datagram/split.rs delete mode 100644 tokio/src/net/unix/datagram/split_owned.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 756a7677..16da8c95 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -150,11 +150,11 @@ jobs: run: cargo install cargo-hack - name: check --each-feature - run: cargo hack check --all --each-feature -Z avoid-dev-deps + run: cargo hack check --all --each-feature --skip io-driver,io-readiness -Z avoid-dev-deps # Try with unstable feature flags - name: check --each-feature --unstable - run: cargo hack check --all --each-feature -Z avoid-dev-deps + run: cargo hack check --all --each-feature --skip io-driver,io-readiness -Z avoid-dev-deps env: RUSTFLAGS: --cfg tokio_unstable -Dwarnings diff --git a/examples/connect.rs b/examples/connect.rs index 5d0515a7..6e909b25 100644 --- a/examples/connect.rs +++ b/examples/connect.rs @@ -96,7 +96,6 @@ mod udp { use std::error::Error; use std::io; use std::net::SocketAddr; - use tokio::net::udp::{RecvHalf, SendHalf}; use tokio::net::UdpSocket; pub async fn connect( @@ -114,16 +113,15 @@ mod udp { let socket = UdpSocket::bind(&bind_addr).await?; socket.connect(addr).await?; - let (mut r, mut w) = socket.split(); - future::try_join(send(stdin, &mut w), recv(stdout, &mut r)).await?; + future::try_join(send(stdin, &socket), recv(stdout, &socket)).await?; Ok(()) } async fn send( mut stdin: impl Stream> + Unpin, - writer: &mut SendHalf, + writer: &UdpSocket, ) -> Result<(), io::Error> { while let Some(item) = stdin.next().await { let buf = item?; @@ -135,7 +133,7 @@ mod udp { async fn recv( mut stdout: impl Sink + Unpin, - reader: &mut RecvHalf, + reader: &UdpSocket, ) -> Result<(), io::Error> { loop { let mut buf = vec![0; 1024]; diff --git a/examples/echo-udp.rs b/examples/echo-udp.rs index bc688b9b..3027c869 100644 --- a/examples/echo-udp.rs +++ b/examples/echo-udp.rs @@ -26,7 +26,7 @@ struct Server { impl Server { async fn run(self) -> Result<(), io::Error> { let Server { - mut socket, + socket, mut buf, mut to_send, } = self; diff --git a/examples/udp-client.rs b/examples/udp-client.rs index a191033d..a394ee66 100644 --- a/examples/udp-client.rs +++ b/examples/udp-client.rs @@ -55,7 +55,7 @@ async fn main() -> Result<(), Box> { } .parse()?; - let mut socket = UdpSocket::bind(local_addr).await?; + let socket = UdpSocket::bind(local_addr).await?; const MAX_DATAGRAM_SIZE: usize = 65_507; socket.connect(&remote_addr).await?; let data = get_stdin_data()?; diff --git a/examples/udp-codec.rs b/examples/udp-codec.rs index 8b64cbc3..f338bd84 100644 --- a/examples/udp-codec.rs +++ b/examples/udp-codec.rs @@ -1,3 +1,8 @@ +fn main() {} + +// Disabled while future of UdpFramed is decided on. +// See https://github.com/tokio-rs/tokio/issues/2830 +/* //! This example leverages `BytesCodec` to create a UDP client and server which //! speak a custom protocol. //! @@ -78,3 +83,4 @@ async fn pong(socket: &mut UdpFramed) -> Result<(), io::Error> { Ok(()) } +*/ diff --git a/tokio-util/Cargo.toml b/tokio-util/Cargo.toml index 85b4e592..45daa2b1 100644 --- a/tokio-util/Cargo.toml +++ b/tokio-util/Cargo.toml @@ -25,11 +25,10 @@ publish = false default = [] # Shorthand for enabling everything -full = ["codec", "udp", "compat", "io"] +full = ["codec", "compat", "io"] compat = ["futures-io",] codec = ["tokio/stream"] -udp = ["tokio/udp"] io = [] [dependencies] diff --git a/tokio-util/src/cfg.rs b/tokio-util/src/cfg.rs index 2efa5f09..f9176747 100644 --- a/tokio-util/src/cfg.rs +++ b/tokio-util/src/cfg.rs @@ -18,6 +18,7 @@ macro_rules! cfg_compat { } } +/* macro_rules! cfg_udp { ($($item:item)*) => { $( @@ -27,6 +28,7 @@ macro_rules! cfg_udp { )* } } +*/ macro_rules! cfg_io { ($($item:item)*) => { diff --git a/tokio-util/src/lib.rs b/tokio-util/src/lib.rs index 24a8af95..b96d9044 100644 --- a/tokio-util/src/lib.rs +++ b/tokio-util/src/lib.rs @@ -30,9 +30,14 @@ cfg_codec! { pub mod codec; } +/* +Disabled due to removal of poll_ functions on UdpSocket. + +See https://github.com/tokio-rs/tokio/issues/2830 cfg_udp! { pub mod udp; } +*/ cfg_compat! { pub mod compat; diff --git a/tokio-util/tests/udp.rs b/tokio-util/tests/udp.rs index 4820ac72..99763854 100644 --- a/tokio-util/tests/udp.rs +++ b/tokio-util/tests/udp.rs @@ -1,3 +1,4 @@ +/* #![warn(rust_2018_idioms)] use tokio::{net::UdpSocket, stream::StreamExt}; @@ -100,3 +101,4 @@ async fn send_framed_lines_codec() -> std::io::Result<()> { Ok(()) } +*/ diff --git a/tokio/Cargo.toml b/tokio/Cargo.toml index b1d943e3..6df368eb 100644 --- a/tokio/Cargo.toml +++ b/tokio/Cargo.toml @@ -33,7 +33,6 @@ full = [ "blocking", "dns", "fs", - "io-driver", "io-util", "io-std", "macros", @@ -51,7 +50,8 @@ full = [ blocking = ["rt-core"] dns = ["rt-core"] fs = ["rt-core", "io-util"] -io-driver = ["mio", "lazy_static"] +io-driver = ["mio", "lazy_static"] # internal only +io-readiness = [] # internal only io-util = ["memchr"] # stdin, stdout, stderr io-std = ["rt-core"] @@ -85,8 +85,8 @@ sync = ["fnv"] test-util = [] tcp = ["io-driver", "iovec"] time = ["slab"] -udp = ["io-driver"] -uds = ["io-driver", "mio-uds", "libc"] +udp = ["io-driver", "io-readiness"] +uds = ["io-driver", "io-readiness", "mio-uds", "libc"] [dependencies] tokio-macros = { version = "0.3.0", path = "../tokio-macros", optional = true } diff --git a/tokio/src/io/driver/mod.rs b/tokio/src/io/driver/mod.rs index d8c253ab..30b30203 100644 --- a/tokio/src/io/driver/mod.rs +++ b/tokio/src/io/driver/mod.rs @@ -12,14 +12,13 @@ use mio::event::Evented; use std::fmt; use std::io; use std::sync::{Arc, Weak}; -use std::task::Waker; use std::time::Duration; /// I/O driver, backed by Mio pub(crate) struct Driver { /// Tracks the number of times `turn` is called. It is safe for this to wrap /// as it is mostly used to determine when to call `compact()` - tick: u16, + tick: u8, /// Reuse the `mio::Events` value across calls to poll. events: Option, @@ -40,6 +39,11 @@ pub(crate) struct Handle { inner: Weak, } +pub(crate) struct ReadyEvent { + tick: u8, + readiness: mio::Ready, +} + pub(super) struct Inner { /// The underlying system event queue. io: mio::Poll, @@ -57,6 +61,11 @@ pub(super) enum Direction { Write, } +enum Tick { + Set(u8), + Clear(u8), +} + // TODO: Don't use a fake token. Instead, reserve a slot entry for the wakeup // token. const TOKEN_WAKEUP: mio::Token = mio::Token(1 << 31); @@ -122,11 +131,11 @@ impl Driver { fn turn(&mut self, max_wait: Option) -> io::Result<()> { // How often to call `compact()` on the resource slab - const COMPACT_INTERVAL: u16 = 256; + const COMPACT_INTERVAL: u8 = 255; self.tick = self.tick.wrapping_add(1); - if self.tick % COMPACT_INTERVAL == 0 { + if self.tick == COMPACT_INTERVAL { self.resources.compact(); } @@ -160,9 +169,6 @@ impl Driver { } fn dispatch(&mut self, token: mio::Token, ready: mio::Ready) { - let mut rd = None; - let mut wr = None; - let addr = slab::Address::from_usize(ADDRESS.unpack(token.0)); let io = match self.resources.get(addr) { @@ -170,29 +176,15 @@ impl Driver { None => return, }; - if io - .set_readiness(Some(token.0), |curr| curr | ready.as_usize()) - .is_err() - { + let set = io.set_readiness(Some(token.0), Tick::Set(self.tick), |curr| { + curr | ready.as_usize() + }); + if set.is_err() { // token no longer valid! return; } - if ready.is_writable() || platform::is_hup(ready) || platform::is_error(ready) { - wr = io.writer.take_waker(); - } - - if !(ready & (!mio::Ready::writable())).is_empty() { - rd = io.reader.take_waker(); - } - - if let Some(w) = rd { - w.wake(); - } - - if let Some(w) = wr { - w.wake(); - } + io.wake(ready); } } @@ -202,8 +194,7 @@ impl Drop for Driver { // If a task is waiting on the I/O resource, notify it. The task // will then attempt to use the I/O resource and fail due to the // driver being shutdown. - io.reader.wake(); - io.writer.wake(); + io.wake(mio::Ready::all()); }) } } @@ -310,16 +301,6 @@ impl Inner { pub(super) fn deregister_source(&self, source: &dyn Evented) -> io::Result<()> { self.io.deregister(source) } - - /// Registers interest in the I/O resource associated with `token`. - pub(super) fn register(&self, io: &slab::Ref, dir: Direction, w: Waker) { - let waker = match dir { - Direction::Read => &io.reader, - Direction::Write => &io.writer, - }; - - waker.register(w); - } } impl Direction { diff --git a/tokio/src/io/driver/scheduled_io.rs b/tokio/src/io/driver/scheduled_io.rs index 566f7daf..48c56a19 100644 --- a/tokio/src/io/driver/scheduled_io.rs +++ b/tokio/src/io/driver/scheduled_io.rs @@ -1,8 +1,21 @@ -use crate::loom::future::AtomicWaker; +use super::{platform, Direction, ReadyEvent, Tick}; use crate::loom::sync::atomic::AtomicUsize; +use crate::loom::sync::Mutex; +use crate::util::bit; use crate::util::slab::Entry; use std::sync::atomic::Ordering::{AcqRel, Acquire, Release}; +use std::task::{Context, Poll, Waker}; + +cfg_io_readiness! { + use crate::util::linked_list::{self, LinkedList}; + + use std::cell::UnsafeCell; + use std::future::Future; + use std::marker::PhantomPinned; + use std::pin::Pin; + use std::ptr::NonNull; +} /// Stored in the I/O driver resource slab. #[derive(Debug)] @@ -10,19 +23,84 @@ pub(crate) struct ScheduledIo { /// Packs the resource's readiness with the resource's generation. readiness: AtomicUsize, - /// Task waiting on read readiness - pub(crate) reader: AtomicWaker, + waiters: Mutex, +} + +#[cfg(feature = "io-readiness")] +type WaitList = LinkedList::Target>; + +#[derive(Debug, Default)] +struct Waiters { + #[cfg(feature = "io-readiness")] + /// List of all current waiters + list: WaitList, + + /// Waker used for AsyncRead + reader: Option, + + /// Waker used for AsyncWrite + writer: Option, +} + +cfg_io_readiness! { + #[derive(Debug)] + struct Waiter { + pointers: linked_list::Pointers, + + /// The waker for this task + waker: Option, + + /// The interest this waiter is waiting on + interest: mio::Ready, + + is_ready: bool, + + /// Should never be `!Unpin` + _p: PhantomPinned, + } + + /// Future returned by `readiness()` + struct Readiness<'a> { + scheduled_io: &'a ScheduledIo, + + state: State, + + /// Entry in the waiter `LinkedList`. + waiter: UnsafeCell, + } + + enum State { + Init, + Waiting, + Done, + } +} + +// The `ScheduledIo::readiness` (`AtomicUsize`) is packed full of goodness. +// +// | reserved | generation | driver tick | readinesss | +// |----------+------------+--------------+------------| +// | 1 bit | 7 bits + 8 bits + 16 bits | + +const READINESS: bit::Pack = bit::Pack::least_significant(16); + +const TICK: bit::Pack = READINESS.then(8); + +const GENERATION: bit::Pack = TICK.then(7); - /// Task waiting on write readiness - pub(crate) writer: AtomicWaker, +#[test] +fn test_generations_assert_same() { + assert_eq!(super::GENERATION, GENERATION); } +// ===== impl ScheduledIo ===== + impl Entry for ScheduledIo { fn reset(&self) { let state = self.readiness.load(Acquire); - let generation = super::GENERATION.unpack(state); - let next = super::GENERATION.pack_lossy(generation + 1, 0); + let generation = GENERATION.unpack(state); + let next = GENERATION.pack_lossy(generation + 1, 0); self.readiness.store(next, Release); } @@ -32,15 +110,14 @@ impl Default for ScheduledIo { fn default() -> ScheduledIo { ScheduledIo { readiness: AtomicUsize::new(0), - reader: AtomicWaker::new(), - writer: AtomicWaker::new(), + waiters: Mutex::new(Default::default()), } } } impl ScheduledIo { pub(crate) fn generation(&self) -> usize { - super::GENERATION.unpack(self.readiness.load(Acquire)) + GENERATION.unpack(self.readiness.load(Acquire)) } /// Sets the readiness on this `ScheduledIo` by invoking the given closure on @@ -48,6 +125,8 @@ impl ScheduledIo { /// /// # Arguments /// - `token`: the token for this `ScheduledIo`. + /// - `tick`: whether setting the tick or trying to clear readiness for a + /// specific tick. /// - `f`: a closure returning a new readiness value given the previous /// readiness. /// @@ -57,51 +136,330 @@ impl ScheduledIo { /// generation, then the corresponding IO resource has been removed and /// replaced with a new resource. In that case, this method returns `Err`. /// Otherwise, this returns the previous readiness. - pub(crate) fn set_readiness( + pub(super) fn set_readiness( &self, token: Option, + tick: Tick, f: impl Fn(usize) -> usize, ) -> Result { let mut current = self.readiness.load(Acquire); loop { - let current_generation = super::GENERATION.unpack(current); + let current_generation = GENERATION.unpack(current); if let Some(token) = token { // Check that the generation for this access is still the // current one. - if super::GENERATION.unpack(token) != current_generation { + if GENERATION.unpack(token) != current_generation { return Err(()); } } - // Mask out the generation bits so that the modifying function - // doesn't see them. + // Mask out the tick/generation bits so that the modifying + // function doesn't see them. let current_readiness = current & mio::Ready::all().as_usize(); - let new = f(current_readiness); + let mut new = f(current_readiness); debug_assert!( - new <= super::ADDRESS.max_value(), - "new readiness value would overwrite generation bits!" + new <= READINESS.max_value(), + "new readiness value would overwrite tick/generation bits!" ); - match self.readiness.compare_exchange( - current, - super::GENERATION.pack(current_generation, new), - AcqRel, - Acquire, - ) { + match tick { + Tick::Set(t) => { + new = TICK.pack(t as usize, new); + } + Tick::Clear(t) => { + if TICK.unpack(current) as u8 != t { + // Trying to clear readiness with an old event! + return Err(()); + } + new = TICK.pack(t as usize, new); + } + } + + new = GENERATION.pack(current_generation, new); + + match self + .readiness + .compare_exchange(current, new, AcqRel, Acquire) + { Ok(_) => return Ok(current), // we lost the race, retry! Err(actual) => current = actual, } } } + + pub(super) fn wake(&self, ready: mio::Ready) { + let mut waiters = self.waiters.lock().unwrap(); + + // check for AsyncRead slot + if !(ready & (!mio::Ready::writable())).is_empty() { + if let Some(waker) = waiters.reader.take() { + waker.wake(); + } + } + + // check for AsyncWrite slot + if ready.is_writable() || platform::is_hup(ready) || platform::is_error(ready) { + if let Some(waker) = waiters.writer.take() { + waker.wake(); + } + } + + #[cfg(feature = "io-readiness")] + { + // check list of waiters + for waiter in waiters + .list + .drain_filter(|w| !(w.interest & ready).is_empty()) + { + let waiter = unsafe { &mut *waiter.as_ptr() }; + if let Some(waker) = waiter.waker.take() { + waiter.is_ready = true; + waker.wake(); + } + } + } + } + + /// Poll version of checking readiness for a certain direction. + /// + /// These are to support `AsyncRead` and `AsyncWrite` polling methods, + /// which cannot use the `async fn` version. This uses reserved reader + /// and writer slots. + pub(in crate::io) fn poll_readiness( + &self, + cx: &mut Context<'_>, + direction: Direction, + ) -> Poll { + let curr = self.readiness.load(Acquire); + + let ready = direction.mask() & mio::Ready::from_usize(READINESS.unpack(curr)); + + if ready.is_empty() { + // Update the task info + let mut waiters = self.waiters.lock().unwrap(); + let slot = match direction { + Direction::Read => &mut waiters.reader, + Direction::Write => &mut waiters.writer, + }; + *slot = Some(cx.waker().clone()); + + // Try again, in case the readiness was changed while we were + // taking the waiters lock + let curr = self.readiness.load(Acquire); + let ready = direction.mask() & mio::Ready::from_usize(READINESS.unpack(curr)); + if ready.is_empty() { + Poll::Pending + } else { + Poll::Ready(ReadyEvent { + tick: TICK.unpack(curr) as u8, + readiness: ready, + }) + } + } else { + Poll::Ready(ReadyEvent { + tick: TICK.unpack(curr) as u8, + readiness: ready, + }) + } + } + + pub(crate) fn clear_readiness(&self, event: ReadyEvent) { + // This consumes the current readiness state **except** for HUP and + // error. HUP and error are excluded because a) they are final states + // and never transition out and b) both the read AND the write + // directions need to be able to obvserve these states. + // + // # Platform-specific behavior + // + // HUP and error readiness are platform-specific. On epoll platforms, + // HUP has specific conditions that must be met by both peers of a + // connection in order to be triggered. + // + // On epoll platforms, `EPOLLERR` is signaled through + // `UnixReady::error()` and is important to be observable by both read + // AND write. A specific case that `EPOLLERR` occurs is when the read + // end of a pipe is closed. When this occurs, a peer blocked by + // writing to the pipe should be notified. + let mask_no_hup = (event.readiness - platform::hup() - platform::error()).as_usize(); + + // result isn't important + let _ = self.set_readiness(None, Tick::Clear(event.tick), |curr| curr & (!mask_no_hup)); + } } impl Drop for ScheduledIo { fn drop(&mut self) { - self.writer.wake(); - self.reader.wake(); + self.wake(mio::Ready::all()); + } +} + +unsafe impl Send for ScheduledIo {} +unsafe impl Sync for ScheduledIo {} + +cfg_io_readiness! { + impl ScheduledIo { + /// An async version of `poll_readiness` which uses a linked list of wakers + pub(crate) async fn readiness(&self, interest: mio::Ready) -> ReadyEvent { + self.readiness_fut(interest).await + } + + // This is in a separate function so that the borrow checker doesn't think + // we are borrowing the `UnsafeCell` possibly over await boundaries. + // + // Go figure. + fn readiness_fut(&self, interest: mio::Ready) -> Readiness<'_> { + Readiness { + scheduled_io: self, + state: State::Init, + waiter: UnsafeCell::new(Waiter { + pointers: linked_list::Pointers::new(), + waker: None, + is_ready: false, + interest, + _p: PhantomPinned, + }), + } + } + } + + unsafe impl linked_list::Link for Waiter { + type Handle = NonNull; + type Target = Waiter; + + fn as_raw(handle: &NonNull) -> NonNull { + *handle + } + + unsafe fn from_raw(ptr: NonNull) -> NonNull { + ptr + } + + unsafe fn pointers(mut target: NonNull) -> NonNull> { + NonNull::from(&mut target.as_mut().pointers) + } + } + + // ===== impl Readiness ===== + + impl Future for Readiness<'_> { + type Output = ReadyEvent; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + use std::sync::atomic::Ordering::SeqCst; + + let (scheduled_io, state, waiter) = unsafe { + let me = self.get_unchecked_mut(); + (&me.scheduled_io, &mut me.state, &me.waiter) + }; + + loop { + match *state { + State::Init => { + // Optimistically check existing readiness + let curr = scheduled_io.readiness.load(SeqCst); + let readiness = mio::Ready::from_usize(READINESS.unpack(curr)); + + // Safety: `waiter.interest` never changes + let interest = unsafe { (*waiter.get()).interest }; + + if readiness.contains(interest) { + // Currently ready! + let tick = TICK.unpack(curr) as u8; + *state = State::Done; + return Poll::Ready(ReadyEvent { readiness, tick }); + } + + // Wasn't ready, take the lock (and check again while locked). + let mut waiters = scheduled_io.waiters.lock().unwrap(); + + let curr = scheduled_io.readiness.load(SeqCst); + let readiness = mio::Ready::from_usize(READINESS.unpack(curr)); + + if readiness.contains(interest) { + // Currently ready! + let tick = TICK.unpack(curr) as u8; + *state = State::Done; + return Poll::Ready(ReadyEvent { readiness, tick }); + } + + // Not ready even after locked, insert into list... + + // Safety: called while locked + unsafe { + (*waiter.get()).waker = Some(cx.waker().clone()); + } + + // Insert the waiter into the linked list + // + // safety: pointers from `UnsafeCell` are never null. + waiters + .list + .push_front(unsafe { NonNull::new_unchecked(waiter.get()) }); + *state = State::Waiting; + } + State::Waiting => { + // Currently in the "Waiting" state, implying the caller has + // a waiter stored in the waiter list (guarded by + // `notify.waiters`). In order to access the waker fields, + // we must hold the lock. + + let waiters = scheduled_io.waiters.lock().unwrap(); + + // Safety: called while locked + let w = unsafe { &mut *waiter.get() }; + + if w.is_ready { + // Our waker has been notified. + *state = State::Done; + } else { + // Update the waker, if necessary. + if !w.waker.as_ref().unwrap().will_wake(cx.waker()) { + w.waker = Some(cx.waker().clone()); + } + + return Poll::Pending; + } + + // Explicit drop of the lock to indicate the scope that the + // lock is held. Because holding the lock is required to + // ensure safe access to fields not held within the lock, it + // is helpful to visualize the scope of the critical + // section. + drop(waiters); + } + State::Done => { + let tick = TICK.unpack(scheduled_io.readiness.load(Acquire)) as u8; + + // Safety: State::Done means it is no longer shared + let w = unsafe { &mut *waiter.get() }; + + return Poll::Ready(ReadyEvent { + tick, + readiness: w.interest, + }); + } + } + } + } + } + + impl Drop for Readiness<'_> { + fn drop(&mut self) { + let mut waiters = self.scheduled_io.waiters.lock().unwrap(); + + // Safety: `waiter` is only ever stored in `waiters` + unsafe { + waiters + .list + .remove(NonNull::new_unchecked(self.waiter.get())) + }; + } } + + unsafe impl Send for Readiness<'_> {} + unsafe impl Sync for Readiness<'_> {} } diff --git a/tokio/src/io/mod.rs b/tokio/src/io/mod.rs index e81054bf..1ca3fe69 100644 --- a/tokio/src/io/mod.rs +++ b/tokio/src/io/mod.rs @@ -207,10 +207,10 @@ cfg_io_driver! { pub(crate) mod driver; mod poll_evented; - pub use poll_evented::PollEvented; + #[cfg(not(loom))] + pub(crate) use poll_evented::PollEvented; mod registration; - pub use registration::Registration; } cfg_io_std! { diff --git a/tokio/src/io/poll_evented.rs b/tokio/src/io/poll_evented.rs index 9054c3b8..2c943ea4 100644 --- a/tokio/src/io/poll_evented.rs +++ b/tokio/src/io/poll_evented.rs @@ -1,13 +1,12 @@ -use crate::io::driver::platform; -use crate::io::{AsyncRead, AsyncWrite, ReadBuf, Registration}; +use crate::io::driver::{Direction, Handle, ReadyEvent}; +use crate::io::registration::Registration; +use crate::io::{AsyncRead, AsyncWrite, ReadBuf}; use mio::event::Evented; use std::fmt; use std::io::{self, Read, Write}; use std::marker::Unpin; use std::pin::Pin; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering::Relaxed; use std::task::{Context, Poll}; cfg_io_driver! { @@ -53,37 +52,6 @@ cfg_io_driver! { /// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and /// [`clear_read_ready`]. /// - /// ```rust - /// use tokio::io::PollEvented; - /// - /// use futures::ready; - /// use mio::Ready; - /// use mio::net::{TcpStream, TcpListener}; - /// use std::io; - /// use std::task::{Context, Poll}; - /// - /// struct MyListener { - /// poll_evented: PollEvented, - /// } - /// - /// impl MyListener { - /// pub fn poll_accept(&mut self, cx: &mut Context<'_>) -> Poll> { - /// let ready = Ready::readable(); - /// - /// ready!(self.poll_evented.poll_read_ready(cx, ready))?; - /// - /// match self.poll_evented.get_ref().accept() { - /// Ok((socket, _)) => Poll::Ready(Ok(socket)), - /// Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - /// self.poll_evented.clear_read_ready(cx, ready)?; - /// Poll::Pending - /// } - /// Err(e) => Poll::Ready(Err(e)), - /// } - /// } - /// } - /// ``` - /// /// ## Platform-specific events /// /// `PollEvented` also allows receiving platform-specific `mio::Ready` events. @@ -101,66 +69,14 @@ cfg_io_driver! { /// [`clear_write_ready`]: method@Self::clear_write_ready /// [`poll_read_ready`]: method@Self::poll_read_ready /// [`poll_write_ready`]: method@Self::poll_write_ready - pub struct PollEvented { + pub(crate) struct PollEvented { io: Option, - inner: Inner, + registration: Registration, } } -struct Inner { - registration: Registration, - - /// Currently visible read readiness - read_readiness: AtomicUsize, - - /// Currently visible write readiness - write_readiness: AtomicUsize, -} - // ===== impl PollEvented ===== -macro_rules! poll_ready { - ($me:expr, $mask:expr, $cache:ident, $take:ident, $poll:expr) => {{ - // Load cached & encoded readiness. - let mut cached = $me.inner.$cache.load(Relaxed); - let mask = $mask | platform::hup() | platform::error(); - - // See if the current readiness matches any bits. - let mut ret = mio::Ready::from_usize(cached) & $mask; - - if ret.is_empty() { - // Readiness does not match, consume the registration's readiness - // stream. This happens in a loop to ensure that the stream gets - // drained. - loop { - let ready = match $poll? { - Poll::Ready(v) => v, - Poll::Pending => return Poll::Pending, - }; - cached |= ready.as_usize(); - - // Update the cache store - $me.inner.$cache.store(cached, Relaxed); - - ret |= ready & mask; - - if !ret.is_empty() { - return Poll::Ready(Ok(ret)); - } - } - } else { - // Check what's new with the registration stream. This will not - // request to be notified - if let Some(ready) = $me.inner.registration.$take()? { - cached |= ready.as_usize(); - $me.inner.$cache.store(cached, Relaxed); - } - - Poll::Ready(Ok(mio::Ready::from_usize(cached))) - } - }}; -} - impl PollEvented where E: Evented, @@ -174,7 +90,8 @@ where /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - pub fn new(io: E) -> io::Result { + #[cfg_attr(feature = "signal", allow(unused))] + pub(crate) fn new(io: E) -> io::Result { PollEvented::new_with_ready(io, mio::Ready::all()) } @@ -202,27 +119,39 @@ where /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - pub fn new_with_ready(io: E, ready: mio::Ready) -> io::Result { - let registration = Registration::new_with_ready(&io, ready)?; + #[cfg_attr(feature = "signal", allow(unused))] + pub(crate) fn new_with_ready(io: E, ready: mio::Ready) -> io::Result { + Self::new_with_ready_and_handle(io, ready, Handle::current()) + } + + pub(crate) fn new_with_ready_and_handle( + io: E, + ready: mio::Ready, + handle: Handle, + ) -> io::Result { + let registration = Registration::new_with_ready_and_handle(&io, ready, handle)?; Ok(Self { io: Some(io), - inner: Inner { - registration, - read_readiness: AtomicUsize::new(0), - write_readiness: AtomicUsize::new(0), - }, + registration, }) } /// Returns a shared reference to the underlying I/O object this readiness /// stream is wrapping. - pub fn get_ref(&self) -> &E { + #[cfg(any( + feature = "process", + feature = "tcp", + feature = "udp", + feature = "uds", + feature = "signal" + ))] + pub(crate) fn get_ref(&self) -> &E { self.io.as_ref().unwrap() } /// Returns a mutable reference to the underlying I/O object this readiness /// stream is wrapping. - pub fn get_mut(&mut self) -> &mut E { + pub(crate) fn get_mut(&mut self) -> &mut E { self.io.as_mut().unwrap() } @@ -234,12 +163,17 @@ where /// Note that deregistering does not guarantee that the I/O resource can be /// registered with a different reactor. Some I/O resource types can only be /// associated with a single reactor instance for their lifetime. - pub fn into_inner(mut self) -> io::Result { + #[cfg(any(feature = "tcp", feature = "udp", feature = "uds"))] + pub(crate) fn into_inner(mut self) -> io::Result { let io = self.io.take().unwrap(); - self.inner.registration.deregister(&io)?; + self.registration.deregister(&io)?; Ok(io) } + pub(crate) fn clear_readiness(&self, event: ReadyEvent) { + self.registration.clear_readiness(event); + } + /// Checks the I/O resource's read readiness state. /// /// The mask argument allows specifying what readiness to notify on. This @@ -266,51 +200,8 @@ where /// /// This method may not be called concurrently. It takes `&self` to allow /// calling it concurrently with `poll_write_ready`. - pub fn poll_read_ready( - &self, - cx: &mut Context<'_>, - mask: mio::Ready, - ) -> Poll> { - assert!(!mask.is_writable(), "cannot poll for write readiness"); - poll_ready!( - self, - mask, - read_readiness, - take_read_ready, - self.inner.registration.poll_read_ready(cx) - ) - } - - /// Clears the I/O resource's read readiness state and registers the current - /// task to be notified once a read readiness event is received. - /// - /// After calling this function, `poll_read_ready` will return - /// `Poll::Pending` until a new read readiness event has been received. - /// - /// The `mask` argument specifies the readiness bits to clear. This may not - /// include `writable` or `hup`. - /// - /// # Panics - /// - /// This function panics if: - /// - /// * `ready` includes writable or HUP - /// * called from outside of a task context. - pub fn clear_read_ready(&self, cx: &mut Context<'_>, ready: mio::Ready) -> io::Result<()> { - // Cannot clear write readiness - assert!(!ready.is_writable(), "cannot clear write readiness"); - assert!(!platform::is_hup(ready), "cannot clear HUP readiness"); - - self.inner - .read_readiness - .fetch_and(!ready.as_usize(), Relaxed); - - if self.poll_read_ready(cx, ready)?.is_ready() { - // Notify the current task - cx.waker().wake_by_ref(); - } - - Ok(()) + pub(crate) fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll> { + self.registration.poll_readiness(cx, Direction::Read) } /// Checks the I/O resource's write readiness state. @@ -337,41 +228,35 @@ where /// /// This method may not be called concurrently. It takes `&self` to allow /// calling it concurrently with `poll_read_ready`. - pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll> { - poll_ready!( - self, - mio::Ready::writable(), - write_readiness, - take_write_ready, - self.inner.registration.poll_write_ready(cx) - ) + pub(crate) fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll> { + self.registration.poll_readiness(cx, Direction::Write) } +} - /// Resets the I/O resource's write readiness state and registers the current - /// task to be notified once a write readiness event is received. - /// - /// This only clears writable readiness. HUP (on platforms that support HUP) - /// cannot be cleared as it is a final state. - /// - /// After calling this function, `poll_write_ready(Ready::writable())` will - /// return `NotReady` until a new write readiness event has been received. - /// - /// # Panics - /// - /// This function will panic if called from outside of a task context. - pub fn clear_write_ready(&self, cx: &mut Context<'_>) -> io::Result<()> { - let ready = mio::Ready::writable(); +cfg_io_readiness! { + impl PollEvented + where + E: Evented, + { + pub(crate) async fn readiness(&self, interest: mio::Ready) -> io::Result { + self.registration.readiness(interest).await + } - self.inner - .write_readiness - .fetch_and(!ready.as_usize(), Relaxed); + pub(crate) async fn async_io(&self, interest: mio::Ready, mut op: F) -> io::Result + where + F: FnMut(&E) -> io::Result, + { + loop { + let event = self.readiness(interest).await?; - if self.poll_write_ready(cx)?.is_ready() { - // Notify the current task - cx.waker().wake_by_ref(); + match op(self.get_ref()) { + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.clear_readiness(event); + } + x => return x, + } + } } - - Ok(()) } } @@ -386,20 +271,22 @@ where cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { - ready!(self.poll_read_ready(cx, mio::Ready::readable()))?; + loop { + let ev = ready!(self.poll_read_ready(cx))?; - // We can't assume the `Read` won't look at the read buffer, - // so we have to force initialization here. - let r = (*self).get_mut().read(buf.initialize_unfilled()); + // We can't assume the `Read` won't look at the read buffer, + // so we have to force initialization here. + let r = (*self).get_mut().read(buf.initialize_unfilled()); - if is_wouldblock(&r) { - self.clear_read_ready(cx, mio::Ready::readable())?; - return Poll::Pending; - } + if is_wouldblock(&r) { + self.clear_readiness(ev); + continue; + } - Poll::Ready(r.map(|n| { - buf.add_filled(n); - })) + return Poll::Ready(r.map(|n| { + buf.add_filled(n); + })); + } } } @@ -412,29 +299,33 @@ where cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { - ready!(self.poll_write_ready(cx))?; + loop { + let ev = ready!(self.poll_write_ready(cx))?; - let r = (*self).get_mut().write(buf); + let r = (*self).get_mut().write(buf); - if is_wouldblock(&r) { - self.clear_write_ready(cx)?; - return Poll::Pending; - } + if is_wouldblock(&r) { + self.clear_readiness(ev); + continue; + } - Poll::Ready(r) + return Poll::Ready(r); + } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - ready!(self.poll_write_ready(cx))?; + loop { + let ev = ready!(self.poll_write_ready(cx))?; - let r = (*self).get_mut().flush(); + let r = (*self).get_mut().flush(); - if is_wouldblock(&r) { - self.clear_write_ready(cx)?; - return Poll::Pending; - } + if is_wouldblock(&r) { + self.clear_readiness(ev); + continue; + } - Poll::Ready(r) + return Poll::Ready(r); + } } fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { @@ -459,7 +350,7 @@ impl Drop for PollEvented { fn drop(&mut self) { if let Some(io) = self.io.take() { // Ignore errors - let _ = self.inner.registration.deregister(&io); + let _ = self.registration.deregister(&io); } } } diff --git a/tokio/src/io/registration.rs b/tokio/src/io/registration.rs index b805d2b9..e4ec096f 100644 --- a/tokio/src/io/registration.rs +++ b/tokio/src/io/registration.rs @@ -1,4 +1,4 @@ -use crate::io::driver::{platform, Direction, Handle, ScheduledIo}; +use crate::io::driver::{Direction, Handle, ReadyEvent, ScheduledIo}; use crate::util::slab; use mio::{self, Evented}; @@ -38,7 +38,7 @@ cfg_io_driver! { /// [`poll_read_ready`]: method@Self::poll_read_ready` /// [`poll_write_ready`]: method@Self::poll_write_ready` #[derive(Debug)] - pub struct Registration { + pub(crate) struct Registration { /// Handle to the associated driver. handle: Handle, @@ -53,28 +53,6 @@ unsafe impl Sync for Registration {} // ===== impl Registration ===== impl Registration { - /// Registers the I/O resource with the default reactor. - /// - /// # Return - /// - /// - `Ok` if the registration happened successfully - /// - `Err` if an error was encountered during registration - /// - /// - /// # Panics - /// - /// This function panics if thread-local runtime is not set. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - pub fn new(io: &T) -> io::Result - where - T: Evented, - { - Registration::new_with_ready(io, mio::Ready::all()) - } - /// Registers the I/O resource with the default reactor, for a specific `mio::Ready` state. /// `new_with_ready` should be used over `new` when you need control over the readiness state, /// such as when a file descriptor only allows reads. This does not add `hup` or `error` so if @@ -96,23 +74,6 @@ impl Registration { /// /// - `Ok` if the registration happened successfully /// - `Err` if an error was encountered during registration - /// - /// - /// # Panics - /// - /// This function panics if thread-local runtime is not set. - /// - /// The runtime is usually set implicitly when this function is called - /// from a future driven by a tokio runtime, otherwise runtime can be set - /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. - pub fn new_with_ready(io: &T, ready: mio::Ready) -> io::Result - where - T: Evented, - { - Self::new_with_ready_and_handle(io, ready, Handle::current()) - } - - /// Same as `new_with_ready` but also accepts an explicit handle. pub(crate) fn new_with_ready_and_handle( io: &T, ready: mio::Ready, @@ -149,7 +110,7 @@ impl Registration { /// no longer result in notifications getting sent for this registration. /// /// `Err` is returned if an error is encountered. - pub fn deregister(&mut self, io: &T) -> io::Result<()> + pub(super) fn deregister(&mut self, io: &T) -> io::Result<()> where T: Evented, { @@ -160,192 +121,36 @@ impl Registration { inner.deregister_source(io) } - /// Polls for events on the I/O resource's read readiness stream. - /// - /// If the I/O resource receives a new read readiness event since the last - /// call to `poll_read_ready`, it is returned. If it has not, the current - /// task is notified once a new event is received. - /// - /// All events except `HUP` are [edge-triggered]. Once `HUP` is returned, - /// the function will always return `Ready(HUP)`. This should be treated as - /// the end of the readiness stream. - /// - /// # Return value - /// - /// There are several possible return values: - /// - /// * `Poll::Ready(Ok(readiness))` means that the I/O resource has received - /// a new readiness event. The readiness value is included. - /// - /// * `Poll::Pending` means that no new readiness events have been received - /// since the last call to `poll_read_ready`. - /// - /// * `Poll::Ready(Err(err))` means that the registration has encountered an - /// error. This could represent a permanent internal error for example. - /// - /// [edge-triggered]: struct@mio::Poll#edge-triggered-and-level-triggered - /// - /// # Panics - /// - /// This function will panic if called from outside of a task context. - pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll> { - // Keep track of task budget - let coop = ready!(crate::coop::poll_proceed(cx)); - - let v = self.poll_ready(Direction::Read, Some(cx)).map_err(|e| { - coop.made_progress(); - e - })?; - match v { - Some(v) => { - coop.made_progress(); - Poll::Ready(Ok(v)) - } - None => Poll::Pending, - } - } - - /// Consume any pending read readiness event. - /// - /// This function is identical to [`poll_read_ready`] **except** that it - /// will not notify the current task when a new event is received. As such, - /// it is safe to call this function from outside of a task context. - /// - /// [`poll_read_ready`]: method@Self::poll_read_ready - pub fn take_read_ready(&self) -> io::Result> { - self.poll_ready(Direction::Read, None) - } - - /// Polls for events on the I/O resource's write readiness stream. - /// - /// If the I/O resource receives a new write readiness event since the last - /// call to `poll_write_ready`, it is returned. If it has not, the current - /// task is notified once a new event is received. - /// - /// All events except `HUP` are [edge-triggered]. Once `HUP` is returned, - /// the function will always return `Ready(HUP)`. This should be treated as - /// the end of the readiness stream. - /// - /// # Return value - /// - /// There are several possible return values: - /// - /// * `Poll::Ready(Ok(readiness))` means that the I/O resource has received - /// a new readiness event. The readiness value is included. - /// - /// * `Poll::Pending` means that no new readiness events have been received - /// since the last call to `poll_write_ready`. - /// - /// * `Poll::Ready(Err(err))` means that the registration has encountered an - /// error. This could represent a permanent internal error for example. - /// - /// [edge-triggered]: struct@mio::Poll#edge-triggered-and-level-triggered - /// - /// # Panics - /// - /// This function will panic if called from outside of a task context. - pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll> { - // Keep track of task budget - let coop = ready!(crate::coop::poll_proceed(cx)); - - let v = self.poll_ready(Direction::Write, Some(cx)).map_err(|e| { - coop.made_progress(); - e - })?; - match v { - Some(v) => { - coop.made_progress(); - Poll::Ready(Ok(v)) - } - None => Poll::Pending, - } - } - - /// Consumes any pending write readiness event. - /// - /// This function is identical to [`poll_write_ready`] **except** that it - /// will not notify the current task when a new event is received. As such, - /// it is safe to call this function from outside of a task context. - /// - /// [`poll_write_ready`]: method@Self::poll_write_ready - pub fn take_write_ready(&self) -> io::Result> { - self.poll_ready(Direction::Write, None) + pub(super) fn clear_readiness(&self, event: ReadyEvent) { + self.shared.clear_readiness(event); } /// Polls for events on the I/O resource's `direction` readiness stream. /// /// If called with a task context, notify the task when a new event is /// received. - fn poll_ready( + pub(super) fn poll_readiness( &self, + cx: &mut Context<'_>, direction: Direction, - cx: Option<&mut Context<'_>>, - ) -> io::Result> { - let inner = match self.handle.inner() { - Some(inner) => inner, - None => return Err(io::Error::new(io::ErrorKind::Other, "reactor gone")), - }; - - // If the task should be notified about new events, ensure that it has - // been registered - if let Some(ref cx) = cx { - inner.register(&self.shared, direction, cx.waker().clone()) + ) -> Poll> { + if self.handle.inner().is_none() { + return Poll::Ready(Err(io::Error::new(io::ErrorKind::Other, "reactor gone"))); } - let mask = direction.mask(); - let mask_no_hup = (mask - platform::hup() - platform::error()).as_usize(); - - // This consumes the current readiness state **except** for HUP and - // error. HUP and error are excluded because a) they are final states - // and never transitition out and b) both the read AND the write - // directions need to be able to obvserve these states. - // - // # Platform-specific behavior - // - // HUP and error readiness are platform-specific. On epoll platforms, - // HUP has specific conditions that must be met by both peers of a - // connection in order to be triggered. - // - // On epoll platforms, `EPOLLERR` is signaled through - // `UnixReady::error()` and is important to be observable by both read - // AND write. A specific case that `EPOLLERR` occurs is when the read - // end of a pipe is closed. When this occurs, a peer blocked by - // writing to the pipe should be notified. - let curr_ready = self - .shared - .set_readiness(None, |curr| curr & (!mask_no_hup)) - .unwrap_or_else(|_| unreachable!()); - - let mut ready = mask & mio::Ready::from_usize(curr_ready); - - if ready.is_empty() { - if let Some(cx) = cx { - // Update the task info - match direction { - Direction::Read => self.shared.reader.register_by_ref(cx.waker()), - Direction::Write => self.shared.writer.register_by_ref(cx.waker()), - } - - // Try again - let curr_ready = self - .shared - .set_readiness(None, |curr| curr & (!mask_no_hup)) - .unwrap(); - ready = mask & mio::Ready::from_usize(curr_ready); - } - } - - if ready.is_empty() { - Ok(None) - } else { - Ok(Some(ready)) - } + // Keep track of task budget + let coop = ready!(crate::coop::poll_proceed(cx)); + let ev = ready!(self.shared.poll_readiness(cx, direction)); + coop.made_progress(); + Poll::Ready(Ok(ev)) } } -impl Drop for Registration { - fn drop(&mut self) { - drop(self.shared.reader.take_waker()); - drop(self.shared.writer.take_waker()); +cfg_io_readiness! { + impl Registration { + pub(super) async fn readiness(&self, interest: mio::Ready) -> io::Result { + // TODO: does this need to return a `Result`? + Ok(self.shared.readiness(interest).await) + } } } diff --git a/tokio/src/lib.rs b/tokio/src/lib.rs index b31b478d..f057cedc 100644 --- a/tokio/src/lib.rs +++ b/tokio/src/lib.rs @@ -77,7 +77,6 @@ //! - `rt-core`: Enables `tokio::spawn` and the basic (single-threaded) scheduler. //! - `rt-threaded`: Enables the heavier, multi-threaded, work-stealing scheduler. //! - `rt-util`: Enables non-scheduler utilities. -//! - `io-driver`: Enables the `mio` based IO driver. //! - `io-util`: Enables the IO based `Ext` traits. //! - `io-std`: Enable `Stdout`, `Stdin` and `Stderr` types. //! - `net`: Enables `tokio::net` types such as `TcpStream`, `UnixStream` and `UdpSocket`. @@ -269,8 +268,7 @@ //! the [`AsyncRead`], [`AsyncWrite`], and [`AsyncBufRead`] traits. In addition, //! when the "io-util" feature flag is enabled, it also provides combinators and //! functions for working with these traits, forming as an asynchronous -//! counterpart to [`std::io`]. When the "io-driver" feature flag is enabled, it -//! also provides utilities for library authors implementing I/O resources. +//! counterpart to [`std::io`]. //! //! Tokio also includes APIs for performing various kinds of I/O and interacting //! with the operating system asynchronously. These include: diff --git a/tokio/src/macros/cfg.rs b/tokio/src/macros/cfg.rs index ff9f9481..1336c63d 100644 --- a/tokio/src/macros/cfg.rs +++ b/tokio/src/macros/cfg.rs @@ -129,7 +129,6 @@ macro_rules! cfg_io_driver { ($($item:item)*) => { $( #[cfg(feature = "io-driver")] - #[cfg_attr(docsrs, doc(cfg(feature = "io-driver")))] $item )* } @@ -144,6 +143,15 @@ macro_rules! cfg_not_io_driver { } } +macro_rules! cfg_io_readiness { + ($($item:item)*) => { + $( + #[cfg(feature = "io-readiness")] + $item + )* + } +} + macro_rules! cfg_io_std { ($($item:item)*) => { $( diff --git a/tokio/src/net/tcp/listener.rs b/tokio/src/net/tcp/listener.rs index 0d7bbdbb..323b8bca 100644 --- a/tokio/src/net/tcp/listener.rs +++ b/tokio/src/net/tcp/listener.rs @@ -205,15 +205,16 @@ impl TcpListener { &mut self, cx: &mut Context<'_>, ) -> Poll> { - ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; + loop { + let ev = ready!(self.io.poll_read_ready(cx))?; - match self.io.get_ref().accept_std() { - Ok(pair) => Poll::Ready(Ok(pair)), - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_read_ready(cx, mio::Ready::readable())?; - Poll::Pending + match self.io.get_ref().accept_std() { + Ok(pair) => return Poll::Ready(Ok(pair)), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_readiness(ev); + } + Err(e) => return Poll::Ready(Err(e)), } - Err(e) => Poll::Ready(Err(e)), } } @@ -411,11 +412,6 @@ impl TryFrom for mio::net::TcpListener { type Error = io::Error; /// Consumes value, returning the mio I/O object. - /// - /// See [`PollEvented::into_inner`] for more details about - /// resource deregistration that happens during the call. - /// - /// [`PollEvented::into_inner`]: crate::io::PollEvented::into_inner fn try_from(value: TcpListener) -> Result { value.io.into_inner() } diff --git a/tokio/src/net/tcp/stream.rs b/tokio/src/net/tcp/stream.rs index e0348724..f4f705b4 100644 --- a/tokio/src/net/tcp/stream.rs +++ b/tokio/src/net/tcp/stream.rs @@ -299,15 +299,16 @@ impl TcpStream { cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll> { - ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; + loop { + let ev = ready!(self.io.poll_read_ready(cx))?; - match self.io.get_ref().peek(buf) { - Ok(ret) => Poll::Ready(Ok(ret)), - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_read_ready(cx, mio::Ready::readable())?; - Poll::Pending + match self.io.get_ref().peek(buf) { + Ok(ret) => return Poll::Ready(Ok(ret)), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_readiness(ev); + } + Err(e) => return Poll::Ready(Err(e)), } - Err(e) => Poll::Ready(Err(e)), } } @@ -703,26 +704,28 @@ impl TcpStream { cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { - ready!(self.io.poll_read_ready(cx, mio::Ready::readable()))?; - - // Safety: `TcpStream::read` will not peak at the maybe uinitialized bytes. - let b = - unsafe { &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit] as *mut [u8]) }; - match self.io.get_ref().read(b) { - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_read_ready(cx, mio::Ready::readable())?; - Poll::Pending - } - Ok(n) => { - // Safety: We trust `TcpStream::read` to have filled up `n` bytes - // in the buffer. - unsafe { - buf.assume_init(n); + loop { + let ev = ready!(self.io.poll_read_ready(cx))?; + + // Safety: `TcpStream::read` will not peek at the maybe uinitialized bytes. + let b = unsafe { + &mut *(buf.unfilled_mut() as *mut [std::mem::MaybeUninit] as *mut [u8]) + }; + match self.io.get_ref().read(b) { + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { + self.io.clear_readiness(ev); + } + Ok(n) => { + // Safety: We trust `TcpStream::read` to have filled up `n` bytes + // in the buffer. + unsafe { + buf.assume_init(n); + } + buf.add_filled(n); + return Poll::Ready(Ok(())); } - buf.add_filled(n); - Poll::Ready(Ok(())) + Err(e) => return Poll::Ready(Err(e)), } - Err(e) => Poll::Ready(Err(e)), } } @@ -731,14 +734,15 @@ impl TcpStream { cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { - ready!(self.io.poll_write_ready(cx))?; + loop { + let ev = ready!(self.io.poll_write_ready(cx))?; - match self.io.get_ref().write(buf) { - Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { - self.io.clear_write_ready(cx)?; -