diff --git a/Cargo.toml b/Cargo.toml index 45662bd8..3bc9084a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,6 +48,7 @@ std = [ "once_cell", "pin-utils", "slab", + "piper", ] alloc = [ "futures-core/alloc", @@ -76,6 +77,7 @@ pin-project-lite = { version = "0.1.4", optional = true } pin-utils = { version = "0.1.0-alpha.4", optional = true } slab = { version = "0.4.2", optional = true } smol = { path = "../smol", optional = true } +piper = { git = "https://github.com/stjepang/piper.git", branch = "master", optional = true } [dev-dependencies] femme = "1.3.0" diff --git a/src/net/tcp/listener.rs b/src/net/tcp/listener.rs index 9e15d40f..290da0d1 100644 --- a/src/net/tcp/listener.rs +++ b/src/net/tcp/listener.rs @@ -1,13 +1,13 @@ use std::future::Future; use std::net::SocketAddr; use std::pin::Pin; -use std::sync::Arc; -use crate::future; +use smol::Async; + use crate::io; -use crate::rt::Watcher; use crate::net::{TcpStream, ToSocketAddrs}; use crate::stream::Stream; +use crate::sync::Arc; use crate::task::{Context, Poll}; /// A TCP socket server, listening for connections. @@ -49,7 +49,7 @@ use crate::task::{Context, Poll}; /// ``` #[derive(Debug)] pub struct TcpListener { - watcher: Watcher, + watcher: Async, } impl TcpListener { @@ -79,11 +79,9 @@ impl TcpListener { let addrs = addrs.to_socket_addrs().await?; for addr in addrs { - match mio::net::TcpListener::bind(&addr) { - Ok(mio_listener) => { - return Ok(TcpListener { - watcher: Watcher::new(mio_listener), - }); + match Async::::bind(&addr) { + Ok(listener) => { + return Ok(TcpListener { watcher: listener }); } Err(err) => last_err = Some(err), } @@ -114,13 +112,9 @@ impl TcpListener { /// # Ok(()) }) } /// ``` pub async fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> { - let (io, addr) = - future::poll_fn(|cx| self.watcher.poll_read_with(cx, |inner| inner.accept_std())) - .await?; - - let mio_stream = mio::net::TcpStream::from_stream(io)?; + let (stream, addr) = self.watcher.accept().await?; let stream = TcpStream { - watcher: Arc::new(Watcher::new(mio_stream)), + watcher: Arc::new(stream), }; Ok((stream, addr)) } @@ -206,9 +200,8 @@ impl<'a> Stream for Incoming<'a> { impl From for TcpListener { /// Converts a `std::net::TcpListener` into its asynchronous equivalent. fn from(listener: std::net::TcpListener) -> TcpListener { - let mio_listener = mio::net::TcpListener::from_std(listener).unwrap(); TcpListener { - watcher: Watcher::new(mio_listener), + watcher: Async::new(listener).expect("TcpListener is known to be good"), } } } @@ -230,29 +223,29 @@ cfg_unix! { impl IntoRawFd for TcpListener { fn into_raw_fd(self) -> RawFd { - self.watcher.into_inner().into_raw_fd() + self.watcher.into_raw_fd() } } } cfg_windows! { - // use crate::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle}; - // - // impl AsRawSocket for TcpListener { - // fn as_raw_socket(&self) -> RawSocket { - // self.raw_socket - // } - // } - // - // impl FromRawSocket for TcpListener { - // unsafe fn from_raw_socket(handle: RawSocket) -> TcpListener { - // net::TcpListener::from_raw_socket(handle).try_into().unwrap() - // } - // } - // - // impl IntoRawSocket for TcpListener { - // fn into_raw_socket(self) -> RawSocket { - // self.raw_socket - // } - // } + use crate::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle}; + + impl AsRawSocket for TcpListener { + fn as_raw_socket(&self) -> RawSocket { + self.watcher.as_raw_socket() + } + } + + impl FromRawSocket for TcpListener { + unsafe fn from_raw_socket(handle: RawSocket) -> TcpListener { + net::TcpListener::from_raw_socket(handle).try_into().unwrap() + } + } + + impl IntoRawSocket for TcpListener { + fn into_raw_socket(self) -> RawSocket { + self.watcher.into_raw_socket() + } + } } diff --git a/src/net/tcp/stream.rs b/src/net/tcp/stream.rs index 1f50e8f1..1d2d0ce1 100644 --- a/src/net/tcp/stream.rs +++ b/src/net/tcp/stream.rs @@ -1,12 +1,12 @@ -use std::io::{IoSlice, IoSliceMut, Read as _, Write as _}; +use std::io::{IoSlice, IoSliceMut}; use std::net::SocketAddr; use std::pin::Pin; -use std::sync::Arc; -use crate::future; +use smol::Async; + use crate::io::{self, Read, Write}; -use crate::rt::Watcher; use crate::net::ToSocketAddrs; +use crate::sync::Arc; use crate::task::{Context, Poll}; /// A TCP stream between a local and a remote socket. @@ -47,7 +47,7 @@ use crate::task::{Context, Poll}; /// ``` #[derive(Debug, Clone)] pub struct TcpStream { - pub(super) watcher: Arc>, + pub(super) watcher: Arc>, } impl TcpStream { @@ -75,28 +75,16 @@ impl TcpStream { let addrs = addrs.to_socket_addrs().await?; for addr in addrs { - // mio's TcpStream::connect is non-blocking and may just be in progress - // when it returns with `Ok`. We therefore wait for write readiness to - // be sure the connection has either been established or there was an - // error which we check for afterwards. - let watcher = match mio::net::TcpStream::connect(&addr) { - Ok(s) => Watcher::new(s), + match Async::::connect(&addr).await { + Ok(stream) => { + return Ok(TcpStream { + watcher: Arc::new(stream), + }); + } Err(e) => { last_err = Some(e); continue; } - }; - - future::poll_fn(|cx| watcher.poll_write_ready(cx)).await; - - match watcher.get_ref().take_error() { - Ok(None) => { - return Ok(TcpStream { - watcher: Arc::new(watcher), - }); - } - Ok(Some(e)) => last_err = Some(e), - Err(e) => last_err = Some(e), } } @@ -214,7 +202,7 @@ impl TcpStream { /// # Ok(()) }) } /// ``` pub async fn peek(&self, buf: &mut [u8]) -> io::Result { - future::poll_fn(|cx| self.watcher.poll_read_with(cx, |inner| inner.peek(buf))).await + self.watcher.peek(buf).await } /// Gets the value of the `TCP_NODELAY` option on this socket. @@ -317,7 +305,7 @@ impl Read for &TcpStream { cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll> { - self.watcher.poll_read_with(cx, |mut inner| inner.read(buf)) + Pin::new(&mut &*self.watcher).poll_read(cx, buf) } } @@ -353,26 +341,23 @@ impl Write for &TcpStream { cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { - self.watcher - .poll_write_with(cx, |mut inner| inner.write(buf)) + Pin::new(&mut &*self.watcher).poll_write(cx, buf) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.watcher.poll_write_with(cx, |mut inner| inner.flush()) + Pin::new(&mut &*self.watcher).poll_flush(cx) } - fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - self.shutdown(std::net::Shutdown::Write)?; - Poll::Ready(Ok(())) + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut &*self.watcher).poll_close(cx) } } impl From for TcpStream { /// Converts a `std::net::TcpStream` into its asynchronous equivalent. fn from(stream: std::net::TcpStream) -> TcpStream { - let mio_stream = mio::net::TcpStream::from_stream(stream).unwrap(); TcpStream { - watcher: Arc::new(Watcher::new(mio_stream)), + watcher: Arc::new(Async::new(stream).expect("TcpStream is known to be good")), } } } @@ -403,23 +388,23 @@ cfg_unix! { } cfg_windows! { - // use crate::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle}; - // - // impl AsRawSocket for TcpStream { - // fn as_raw_socket(&self) -> RawSocket { - // self.raw_socket - // } - // } - // - // impl FromRawSocket for TcpStream { - // unsafe fn from_raw_socket(handle: RawSocket) -> TcpStream { - // net::TcpStream::from_raw_socket(handle).try_into().unwrap() - // } - // } - // - // impl IntoRawSocket for TcpListener { - // fn into_raw_socket(self) -> RawSocket { - // self.raw_socket - // } - // } + use crate::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle}; + + impl AsRawSocket for TcpStream { + fn as_raw_socket(&self) -> RawSocket { + self.raw_socket + } + } + + impl FromRawSocket for TcpStream { + unsafe fn from_raw_socket(handle: RawSocket) -> TcpStream { + net::TcpStream::from_raw_socket(handle).try_into().unwrap() + } + } + + impl IntoRawSocket for TcpListener { + fn into_raw_socket(self) -> RawSocket { + self.raw_socket + } + } } diff --git a/src/net/udp/mod.rs b/src/net/udp/mod.rs index 774478d3..3bc9ad77 100644 --- a/src/net/udp/mod.rs +++ b/src/net/udp/mod.rs @@ -2,9 +2,9 @@ use std::io; use std::net::SocketAddr; use std::net::{Ipv4Addr, Ipv6Addr}; -use crate::future; +use smol::Async; + use crate::net::ToSocketAddrs; -use crate::rt::Watcher; use crate::utils::Context as _; /// A UDP socket. @@ -45,7 +45,7 @@ use crate::utils::Context as _; /// ``` #[derive(Debug)] pub struct UdpSocket { - watcher: Watcher, + watcher: Async, } impl UdpSocket { @@ -69,16 +69,12 @@ impl UdpSocket { /// ``` pub async fn bind(addrs: A) -> io::Result { let mut last_err = None; - let addrs = addrs - .to_socket_addrs() - .await?; + let addrs = addrs.to_socket_addrs().await?; for addr in addrs { - match mio::net::UdpSocket::bind(&addr) { - Ok(mio_socket) => { - return Ok(UdpSocket { - watcher: Watcher::new(mio_socket), - }); + match Async::::bind(&addr) { + Ok(socket) => { + return Ok(UdpSocket { watcher: socket }); } Err(err) => last_err = Some(err), } @@ -153,12 +149,10 @@ impl UdpSocket { } }; - future::poll_fn(|cx| { - self.watcher - .poll_write_with(cx, |inner| inner.send_to(buf, &addr)) - }) - .await - .context(|| format!("could not send packet to {}", addr)) + self.watcher + .send_to(buf, addr) + .await + .context(|| format!("could not send packet to {}", addr)) } /// Receives data from the socket. @@ -181,22 +175,7 @@ impl UdpSocket { /// # Ok(()) }) } /// ``` pub async fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - future::poll_fn(|cx| { - self.watcher - .poll_read_with(cx, |inner| inner.recv_from(buf)) - }) - .await - .context(|| { - use std::fmt::Write; - - let mut error = String::from("could not receive data on "); - if let Ok(addr) = self.local_addr() { - let _ = write!(&mut error, "{}", addr); - } else { - error.push_str("socket"); - } - error - }) + self.watcher.recv_from(buf).await } /// Connects the UDP socket to a remote address. @@ -267,19 +246,7 @@ impl UdpSocket { /// # Ok(()) }) } /// ``` pub async fn send(&self, buf: &[u8]) -> io::Result { - future::poll_fn(|cx| self.watcher.poll_write_with(cx, |inner| inner.send(buf))) - .await - .context(|| { - use std::fmt::Write; - - let mut error = String::from("could not send data on "); - if let Ok(addr) = self.local_addr() { - let _ = write!(&mut error, "{}", addr); - } else { - error.push_str("socket"); - } - error - }) + self.watcher.send(buf).await } /// Receives data from the socket. @@ -303,19 +270,7 @@ impl UdpSocket { /// # Ok(()) }) } /// ``` pub async fn recv(&self, buf: &mut [u8]) -> io::Result { - future::poll_fn(|cx| self.watcher.poll_read_with(cx, |inner| inner.recv(buf))) - .await - .context(|| { - use std::fmt::Write; - - let mut error = String::from("could not receive data on "); - if let Ok(addr) = self.local_addr() { - let _ = write!(&mut error, "{}", addr); - } else { - error.push_str("socket"); - } - error - }) + self.watcher.recv(buf).await } /// Gets the value of the `SO_BROADCAST` option for this socket. @@ -498,9 +453,8 @@ impl UdpSocket { impl From for UdpSocket { /// Converts a `std::net::UdpSocket` into its asynchronous equivalent. fn from(socket: std::net::UdpSocket) -> UdpSocket { - let mio_socket = mio::net::UdpSocket::from_socket(socket).unwrap(); UdpSocket { - watcher: Watcher::new(mio_socket), + watcher: Async::new(socket).expect("UdpSocket is known to be good"), } } } @@ -522,29 +476,29 @@ cfg_unix! { impl IntoRawFd for UdpSocket { fn into_raw_fd(self) -> RawFd { - self.watcher.into_inner().into_raw_fd() + self.watcher.into_raw_fd() } } } cfg_windows! { - // use crate::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle}; - // - // impl AsRawSocket for UdpSocket { - // fn as_raw_socket(&self) -> RawSocket { - // self.raw_socket - // } - // } - // - // impl FromRawSocket for UdpSocket { - // unsafe fn from_raw_socket(handle: RawSocket) -> UdpSocket { - // net::UdpSocket::from_raw_socket(handle).into() - // } - // } - // - // impl IntoRawSocket for UdpSocket { - // fn into_raw_socket(self) -> RawSocket { - // self.raw_socket - // } - // } + use crate::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle}; + + impl AsRawSocket for UdpSocket { + fn as_raw_socket(&self) -> RawSocket { + self.watcher.as_raw_socket() + } + } + + impl FromRawSocket for UdpSocket { + unsafe fn from_raw_socket(handle: RawSocket) -> UdpSocket { + net::UdpSocket::from_raw_socket(handle).into() + } + } + + impl IntoRawSocket for UdpSocket { + fn into_raw_socket(self) -> RawSocket { + self.watcher.into_raw_socket() + } + } } diff --git a/src/os/unix/net/datagram.rs b/src/os/unix/net/datagram.rs index 5a2d6ec9..6a98736c 100644 --- a/src/os/unix/net/datagram.rs +++ b/src/os/unix/net/datagram.rs @@ -2,16 +2,14 @@ use std::fmt; use std::net::Shutdown; +use std::os::unix::net::UnixDatagram as StdUnixDatagram; -use mio_uds; +use smol::Async; use super::SocketAddr; -use crate::future; use crate::io; -use crate::rt::Watcher; use crate::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; use crate::path::Path; -use crate::task::spawn_blocking; /// A Unix datagram socket. /// @@ -42,13 +40,13 @@ use crate::task::spawn_blocking; /// # Ok(()) }) } /// ``` pub struct UnixDatagram { - watcher: Watcher, + watcher: Async, } impl UnixDatagram { - fn new(socket: mio_uds::UnixDatagram) -> UnixDatagram { + fn new(socket: StdUnixDatagram) -> UnixDatagram { UnixDatagram { - watcher: Watcher::new(socket), + watcher: Async::new(socket).expect("UnixDatagram is known to be good"), } } @@ -67,8 +65,8 @@ impl UnixDatagram { /// ``` pub async fn bind>(path: P) -> io::Result { let path = path.as_ref().to_owned(); - let socket = spawn_blocking(move || mio_uds::UnixDatagram::bind(path)).await?; - Ok(UnixDatagram::new(socket)) + let socket = Async::::bind(path)?; + Ok(UnixDatagram { watcher: socket }) } /// Creates a Unix datagram which is not bound to any address. @@ -85,7 +83,7 @@ impl UnixDatagram { /// # Ok(()) }) } /// ``` pub fn unbound() -> io::Result { - let socket = mio_uds::UnixDatagram::unbound()?; + let socket = StdUnixDatagram::unbound()?; Ok(UnixDatagram::new(socket)) } @@ -105,7 +103,7 @@ impl UnixDatagram { /// # Ok(()) }) } /// ``` pub fn pair() -> io::Result<(UnixDatagram, UnixDatagram)> { - let (a, b) = mio_uds::UnixDatagram::pair()?; + let (a, b) = StdUnixDatagram::pair()?; let a = UnixDatagram::new(a); let b = UnixDatagram::new(b); Ok((a, b)) @@ -197,11 +195,7 @@ impl UnixDatagram { /// # Ok(()) }) } /// ``` pub async fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - future::poll_fn(|cx| { - self.watcher - .poll_read_with(cx, |inner| inner.recv_from(buf)) - }) - .await + self.watcher.recv_from(buf).await } /// Receives data from the socket. @@ -222,7 +216,7 @@ impl UnixDatagram { /// # Ok(()) }) } /// ``` pub async fn recv(&self, buf: &mut [u8]) -> io::Result { - future::poll_fn(|cx| self.watcher.poll_read_with(cx, |inner| inner.recv(buf))).await + self.watcher.recv(buf).await } /// Sends data on the socket to the specified address. @@ -242,11 +236,7 @@ impl UnixDatagram { /// # Ok(()) }) } /// ``` pub async fn send_to>(&self, buf: &[u8], path: P) -> io::Result { - future::poll_fn(|cx| { - self.watcher - .poll_write_with(cx, |inner| inner.send_to(buf, path.as_ref())) - }) - .await + self.watcher.send_to(buf, path.as_ref()).await } /// Sends data on the socket to the socket's peer. @@ -267,7 +257,7 @@ impl UnixDatagram { /// # Ok(()) }) } /// ``` pub async fn send(&self, buf: &[u8]) -> io::Result { - future::poll_fn(|cx| self.watcher.poll_write_with(cx, |inner| inner.send(buf))).await + self.watcher.send(buf).await } /// Shut down the read, write, or both halves of this connection. @@ -312,19 +302,18 @@ impl fmt::Debug for UnixDatagram { } } -impl From for UnixDatagram { +impl From for UnixDatagram { /// Converts a `std::os::unix::net::UnixDatagram` into its asynchronous equivalent. - fn from(datagram: std::os::unix::net::UnixDatagram) -> UnixDatagram { - let mio_datagram = mio_uds::UnixDatagram::from_datagram(datagram).unwrap(); + fn from(datagram: StdUnixDatagram) -> UnixDatagram { UnixDatagram { - watcher: Watcher::new(mio_datagram), + watcher: Async::new(datagram).expect("UnixDatagram is known to be good"), } } } impl AsRawFd for UnixDatagram { fn as_raw_fd(&self) -> RawFd { - self.watcher.get_ref().as_raw_fd() + self.watcher.as_raw_fd() } } @@ -337,6 +326,6 @@ impl FromRawFd for UnixDatagram { impl IntoRawFd for UnixDatagram { fn into_raw_fd(self) -> RawFd { - self.watcher.into_inner().into_raw_fd() + self.watcher.into_raw_fd() } } diff --git a/src/os/unix/net/listener.rs b/src/os/unix/net/listener.rs index 9f6bdcbc..4099bd6f 100644 --- a/src/os/unix/net/listener.rs +++ b/src/os/unix/net/listener.rs @@ -1,20 +1,19 @@ //! Unix-specific networking extensions. use std::fmt; -use std::pin::Pin; use std::future::Future; +use std::os::unix::net::UnixListener as StdUnixListener; +use std::pin::Pin; -use mio_uds; +use smol::Async; use super::SocketAddr; use super::UnixStream; -use crate::future; use crate::io; -use crate::rt::Watcher; use crate::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; use crate::path::Path; use crate::stream::Stream; -use crate::task::{spawn_blocking, Context, Poll}; +use crate::task::{Context, Poll}; /// A Unix domain socket server, listening for connections. /// @@ -50,7 +49,7 @@ use crate::task::{spawn_blocking, Context, Poll}; /// # Ok(()) }) } /// ``` pub struct UnixListener { - watcher: Watcher, + watcher: Async, } impl UnixListener { @@ -69,11 +68,9 @@ impl UnixListener { /// ``` pub async fn bind>(path: P) -> io::Result { let path = path.as_ref().to_owned(); - let listener = spawn_blocking(move || mio_uds::UnixListener::bind(path)).await?; + let listener = Async::::bind(path)?; - Ok(UnixListener { - watcher: Watcher::new(listener), - }) + Ok(UnixListener { watcher: listener }) } /// Accepts a new incoming connection to this listener. @@ -93,29 +90,9 @@ impl UnixListener { /// # Ok(()) }) } /// ``` pub async fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> { - future::poll_fn(|cx| { - let res = futures_core::ready!(self.watcher.poll_read_with(cx, |inner| { - match inner.accept_std() { - // Converting to `WouldBlock` so that the watcher will - // add the waker of this task to a list of readers. - Ok(None) => Err(io::ErrorKind::WouldBlock.into()), - res => res, - } - })); - - match res? { - Some((io, addr)) => { - let mio_stream = mio_uds::UnixStream::from_stream(io)?; - let stream = UnixStream { - watcher: Watcher::new(mio_stream), - }; - Poll::Ready(Ok((stream, addr))) - } - // This should never happen since `None` is converted to `WouldBlock` - None => unreachable!(), - } - }) - .await + let (stream, addr) = self.watcher.accept().await?; + + Ok((UnixStream { watcher: stream }, addr)) } /// Returns a stream of incoming connections. @@ -206,19 +183,18 @@ impl Stream for Incoming<'_> { } } -impl From for UnixListener { +impl From for UnixListener { /// Converts a `std::os::unix::net::UnixListener` into its asynchronous equivalent. - fn from(listener: std::os::unix::net::UnixListener) -> UnixListener { - let mio_listener = mio_uds::UnixListener::from_listener(listener).unwrap(); + fn from(listener: StdUnixListener) -> UnixListener { UnixListener { - watcher: Watcher::new(mio_listener), + watcher: Async::new(listener).expect("UnixListener is known to be good"), } } } impl AsRawFd for UnixListener { fn as_raw_fd(&self) -> RawFd { - self.watcher.get_ref().as_raw_fd() + self.watcher.as_raw_fd() } } @@ -231,6 +207,6 @@ impl FromRawFd for UnixListener { impl IntoRawFd for UnixListener { fn into_raw_fd(self) -> RawFd { - self.watcher.into_inner().into_raw_fd() + self.watcher.into_raw_fd() } } diff --git a/src/os/unix/net/stream.rs b/src/os/unix/net/stream.rs index a1c83f1b..7320c85b 100644 --- a/src/os/unix/net/stream.rs +++ b/src/os/unix/net/stream.rs @@ -1,18 +1,17 @@ //! Unix-specific networking extensions. use std::fmt; -use std::io::{Read as _, Write as _}; use std::net::Shutdown; +use std::os::unix::net::UnixStream as StdUnixStream; use std::pin::Pin; -use mio_uds; +use smol::Async; use super::SocketAddr; use crate::io::{self, Read, Write}; -use crate::rt::Watcher; use crate::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}; use crate::path::Path; -use crate::task::{spawn_blocking, Context, Poll}; +use crate::task::{Context, Poll}; /// A Unix stream socket. /// @@ -38,7 +37,7 @@ use crate::task::{spawn_blocking, Context, Poll}; /// # Ok(()) }) } /// ``` pub struct UnixStream { - pub(super) watcher: Watcher, + pub(super) watcher: Async, } impl UnixStream { @@ -57,15 +56,9 @@ impl UnixStream { /// ``` pub async fn connect>(path: P) -> io::Result { let path = path.as_ref().to_owned(); + let stream = Async::::connect(path).await?; - spawn_blocking(move || { - let std_stream = std::os::unix::net::UnixStream::connect(path)?; - let mio_stream = mio_uds::UnixStream::from_stream(std_stream)?; - Ok(UnixStream { - watcher: Watcher::new(mio_stream), - }) - }) - .await + Ok(UnixStream { watcher: stream }) } /// Creates an unnamed pair of connected sockets. @@ -84,13 +77,9 @@ impl UnixStream { /// # Ok(()) }) } /// ``` pub fn pair() -> io::Result<(UnixStream, UnixStream)> { - let (a, b) = mio_uds::UnixStream::pair()?; - let a = UnixStream { - watcher: Watcher::new(a), - }; - let b = UnixStream { - watcher: Watcher::new(b), - }; + let (a, b) = Async::::pair()?; + let a = UnixStream { watcher: a }; + let b = UnixStream { watcher: b }; Ok((a, b)) } @@ -169,7 +158,7 @@ impl Read for &UnixStream { cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll> { - self.watcher.poll_read_with(cx, |mut inner| inner.read(buf)) + Pin::new(&mut &self.watcher).poll_read(cx, buf) } } @@ -197,16 +186,15 @@ impl Write for &UnixStream { cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { - self.watcher - .poll_write_with(cx, |mut inner| inner.write(buf)) + Pin::new(&mut &self.watcher).poll_write(cx, buf) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.watcher.poll_write_with(cx, |mut inner| inner.flush()) + Pin::new(&mut &self.watcher).poll_flush(cx) } - fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut &self.watcher).poll_close(cx) } } @@ -227,19 +215,17 @@ impl fmt::Debug for UnixStream { } } -impl From for UnixStream { +impl From for UnixStream { /// Converts a `std::os::unix::net::UnixStream` into its asynchronous equivalent. - fn from(stream: std::os::unix::net::UnixStream) -> UnixStream { - let mio_stream = mio_uds::UnixStream::from_stream(stream).unwrap(); - UnixStream { - watcher: Watcher::new(mio_stream), - } + fn from(stream: StdUnixStream) -> UnixStream { + let stream = Async::new(stream).expect("UnixStream is known to be good"); + UnixStream { watcher: stream } } } impl AsRawFd for UnixStream { fn as_raw_fd(&self) -> RawFd { - self.watcher.get_ref().as_raw_fd() + self.watcher.as_raw_fd() } } @@ -252,6 +238,6 @@ impl FromRawFd for UnixStream { impl IntoRawFd for UnixStream { fn into_raw_fd(self) -> RawFd { - self.watcher.into_inner().into_raw_fd() + self.watcher.into_raw_fd() } } diff --git a/src/rt/mod.rs b/src/rt/mod.rs index 2149d242..d5d0d610 100644 --- a/src/rt/mod.rs +++ b/src/rt/mod.rs @@ -4,20 +4,20 @@ use std::thread; use once_cell::sync::Lazy; -use crate::utils::abort_on_panic; +use crate::future; -pub use reactor::{Reactor, Watcher}; -pub use runtime::Runtime; - -mod reactor; -mod runtime; +/// Dummy runtime struct. +pub struct Runtime {} /// The global runtime. pub static RUNTIME: Lazy = Lazy::new(|| { - thread::Builder::new() - .name("async-std/runtime".to_string()) - .spawn(|| abort_on_panic(|| RUNTIME.run())) - .expect("cannot start a runtime thread"); - - Runtime::new() + // Create an executor thread pool. + let num_threads = num_cpus::get().max(1); + for _ in 0..num_threads { + thread::Builder::new() + .name("async-std/runtime".to_string()) + .spawn(|| smol::run(future::pending::<()>())) + .expect("cannot start a runtime thread"); + } + Runtime {} }); diff --git a/src/rt/reactor.rs b/src/rt/reactor.rs deleted file mode 100644 index 2a35b72c..00000000 --- a/src/rt/reactor.rs +++ /dev/null @@ -1,354 +0,0 @@ -use std::fmt; -use std::sync::{Arc, Mutex}; -use std::time::Duration; - -use mio::{self, Evented}; -use slab::Slab; - -use crate::io; -use crate::rt::RUNTIME; -use crate::task::{Context, Poll, Waker}; - -/// Data associated with a registered I/O handle. -#[derive(Debug)] -struct Entry { - /// A unique identifier. - token: mio::Token, - - /// Tasks that are blocked on reading from this I/O handle. - readers: Mutex, - - /// Tasks that are blocked on writing to this I/O handle. - writers: Mutex, -} - -/// The state of a networking driver. -pub struct Reactor { - /// A mio instance that polls for new events. - poller: mio::Poll, - - /// A list into which mio stores events. - events: Mutex, - - /// A collection of registered I/O handles. - entries: Mutex>>, - - /// Dummy I/O handle that is only used to wake up the polling thread. - notify_reg: (mio::Registration, mio::SetReadiness), - - /// An identifier for the notification handle. - notify_token: mio::Token, -} - -/// The set of `Waker`s interested in read readiness. -#[derive(Debug)] -struct Readers { - /// Flag indicating read readiness. - /// (cf. `Watcher::poll_read_ready`) - ready: bool, - /// The `Waker`s blocked on reading. - wakers: Vec, -} - -/// The set of `Waker`s interested in write readiness. -#[derive(Debug)] -struct Writers { - /// Flag indicating write readiness. - /// (cf. `Watcher::poll_write_ready`) - ready: bool, - /// The `Waker`s blocked on writing. - wakers: Vec, -} - -impl Reactor { - /// Creates a new reactor for polling I/O events. - pub fn new() -> io::Result { - let poller = mio::Poll::new()?; - let notify_reg = mio::Registration::new2(); - - let mut reactor = Reactor { - poller, - events: Mutex::new(mio::Events::with_capacity(1000)), - entries: Mutex::new(Slab::new()), - notify_reg, - notify_token: mio::Token(0), - }; - - // Register a dummy I/O handle for waking up the polling thread. - let entry = reactor.register(&reactor.notify_reg.0)?; - reactor.notify_token = entry.token; - - Ok(reactor) - } - - /// Registers an I/O event source and returns its associated entry. - fn register(&self, source: &dyn Evented) -> io::Result> { - let mut entries = self.entries.lock().unwrap(); - - // Reserve a vacant spot in the slab and use its key as the token value. - let vacant = entries.vacant_entry(); - let token = mio::Token(vacant.key()); - - // Allocate an entry and insert it into the slab. - let entry = Arc::new(Entry { - token, - readers: Mutex::new(Readers { - ready: false, - wakers: Vec::new(), - }), - writers: Mutex::new(Writers { - ready: false, - wakers: Vec::new(), - }), - }); - vacant.insert(entry.clone()); - - // Register the I/O event source in the poller. - let interest = mio::Ready::all(); - let opts = mio::PollOpt::edge(); - self.poller.register(source, token, interest, opts)?; - - Ok(entry) - } - - /// Deregisters an I/O event source associated with an entry. - fn deregister(&self, source: &dyn Evented, entry: &Entry) -> io::Result<()> { - // Deregister the I/O object from the mio instance. - self.poller.deregister(source)?; - - // Remove the entry associated with the I/O object. - self.entries.lock().unwrap().remove(entry.token.0); - - Ok(()) - } - - /// Notifies the reactor so that polling stops blocking. - pub fn notify(&self) -> io::Result<()> { - self.notify_reg.1.set_readiness(mio::Ready::readable()) - } - - /// Waits on the poller for new events and wakes up tasks blocked on I/O handles. - /// - /// Returns `Ok(true)` if at least one new task was woken. - pub fn poll(&self, timeout: Option) -> io::Result { - let mut events = self.events.lock().unwrap(); - - // Block on the poller until at least one new event comes in. - self.poller.poll(&mut events, timeout)?; - - // Lock the entire entry table while we're processing new events. - let entries = self.entries.lock().unwrap(); - - // The number of woken tasks. - let mut progress = false; - - for event in events.iter() { - let token = event.token(); - - if token == self.notify_token { - // If this is the notification token, we just need the notification state. - self.notify_reg.1.set_readiness(mio::Ready::empty())?; - } else { - // Otherwise, look for the entry associated with this token. - if let Some(entry) = entries.get(token.0) { - // Set the readiness flags from this I/O event. - let readiness = event.readiness(); - - // Wake up reader tasks blocked on this I/O handle. - let reader_interests = mio::Ready::all() - mio::Ready::writable(); - if !(readiness & reader_interests).is_empty() { - let mut readers = entry.readers.lock().unwrap(); - readers.ready = true; - for w in readers.wakers.drain(..) { - w.wake(); - progress = true; - } - } - - // Wake up writer tasks blocked on this I/O handle. - let writer_interests = mio::Ready::all() - mio::Ready::readable(); - if !(readiness & writer_interests).is_empty() { - let mut writers = entry.writers.lock().unwrap(); - writers.ready = true; - for w in writers.wakers.drain(..) { - w.wake(); - progress = true; - } - } - } - } - } - - Ok(progress) - } -} - -/// An I/O handle powered by the networking driver. -/// -/// This handle wraps an I/O event source and exposes a "futurized" interface on top of it, -/// implementing traits `AsyncRead` and `AsyncWrite`. -pub struct Watcher { - /// Data associated with the I/O handle. - entry: Arc, - - /// The I/O event source. - source: Option, -} - -impl Watcher { - /// Creates a new I/O handle. - /// - /// The provided I/O event source will be kept registered inside the reactor's poller for the - /// lifetime of the returned I/O handle. - pub fn new(source: T) -> Watcher { - Watcher { - entry: RUNTIME - .reactor() - .register(&source) - .expect("cannot register an I/O event source"), - source: Some(source), - } - } - - /// Returns a reference to the inner I/O event source. - pub fn get_ref(&self) -> &T { - self.source.as_ref().unwrap() - } - - /// Polls the inner I/O source for a non-blocking read operation. - /// - /// If the operation returns an error of the `io::ErrorKind::WouldBlock` kind, the current task - /// will be registered for wakeup when the I/O source becomes readable. - pub fn poll_read_with<'a, F, R>(&'a self, cx: &mut Context<'_>, mut f: F) -> Poll> - where - F: FnMut(&'a T) -> io::Result, - { - // If the operation isn't blocked, return its result. - match f(self.source.as_ref().unwrap()) { - Err(err) if err.kind() == io::ErrorKind::WouldBlock => {} - res => return Poll::Ready(res), - } - - // Lock the waker list. - let mut readers = self.entry.readers.lock().unwrap(); - - // Try running the operation again. - match f(self.source.as_ref().unwrap()) { - Err(err) if err.kind() == io::ErrorKind::WouldBlock => {} - res => return Poll::Ready(res), - } - - // Register the task if it isn't registered already. - - if readers.wakers.iter().all(|w| !w.will_wake(cx.waker())) { - readers.wakers.push(cx.waker().clone()); - } - - Poll::Pending - } - - /// Polls the inner I/O source for a non-blocking write operation. - /// - /// If the operation returns an error of the `io::ErrorKind::WouldBlock` kind, the current task - /// will be registered for wakeup when the I/O source becomes writable. - pub fn poll_write_with<'a, F, R>( - &'a self, - cx: &mut Context<'_>, - mut f: F, - ) -> Poll> - where - F: FnMut(&'a T) -> io::Result, - { - // If the operation isn't blocked, return its result. - match f(self.source.as_ref().unwrap()) { - Err(err) if err.kind() == io::ErrorKind::WouldBlock => {} - res => return Poll::Ready(res), - } - - // Lock the waker list. - let mut writers = self.entry.writers.lock().unwrap(); - - // Try running the operation again. - match f(self.source.as_ref().unwrap()) { - Err(err) if err.kind() == io::ErrorKind::WouldBlock => {} - res => return Poll::Ready(res), - } - - // Register the task if it isn't registered already. - if writers.wakers.iter().all(|w| !w.will_wake(cx.waker())) { - writers.wakers.push(cx.waker().clone()); - } - - Poll::Pending - } - - /// Polls the inner I/O source until a non-blocking read can be performed. - /// - /// If non-blocking reads are currently not possible, the `Waker` - /// will be saved and notified when it can read non-blocking - /// again. - #[allow(dead_code)] - pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<()> { - // Lock the waker list. - let mut readers = self.entry.readers.lock().unwrap(); - if readers.ready { - return Poll::Ready(()); - } - // Register the task if it isn't registered already. - if readers.wakers.iter().all(|w| !w.will_wake(cx.waker())) { - readers.wakers.push(cx.waker().clone()); - } - Poll::Pending - } - - /// Polls the inner I/O source until a non-blocking write can be performed. - /// - /// If non-blocking writes are currently not possible, the `Waker` - /// will be saved and notified when it can write non-blocking - /// again. - pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<()> { - // Lock the waker list. - let mut writers = self.entry.writers.lock().unwrap(); - if writers.ready { - return Poll::Ready(()); - } - // Register the task if it isn't registered already. - if writers.wakers.iter().all(|w| !w.will_wake(cx.waker())) { - writers.wakers.push(cx.waker().clone()); - } - Poll::Pending - } - - /// Deregisters and returns the inner I/O source. - /// - /// This method is typically used to convert `Watcher`s to raw file descriptors/handles. - #[allow(dead_code)] - pub fn into_inner(mut self) -> T { - let source = self.source.take().unwrap(); - RUNTIME - .reactor() - .deregister(&source, &self.entry) - .expect("cannot deregister I/O event source"); - source - } -} - -impl Drop for Watcher { - fn drop(&mut self) { - if let Some(ref source) = self.source { - RUNTIME - .reactor() - .deregister(source, &self.entry) - .expect("cannot deregister I/O event source"); - } - } -} - -impl fmt::Debug for Watcher { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Watcher") - .field("entry", &self.entry) - .field("source", &self.source) - .finish() - } -} diff --git a/src/rt/runtime.rs b/src/rt/runtime.rs deleted file mode 100644 index a0d88b98..00000000 --- a/src/rt/runtime.rs +++ /dev/null @@ -1,415 +0,0 @@ -use std::cell::Cell; -use std::io; -use std::iter; -use std::ptr; -use std::sync::atomic::{self, Ordering}; -use std::sync::{Arc, Mutex}; -use std::thread; -use std::time::Duration; - -use crossbeam_deque::{Injector, Steal, Stealer, Worker}; -use crossbeam_utils::thread::scope; -use once_cell::unsync::OnceCell; - -use crate::rt::Reactor; -use crate::sync::Spinlock; -use crate::task::Runnable; -use crate::utils::{abort_on_panic, random}; - -thread_local! { - /// A reference to the current machine, if the current thread runs tasks. - static MACHINE: OnceCell> = OnceCell::new(); - - /// This flag is set to true whenever `task::yield_now()` is invoked. - static YIELD_NOW: Cell = Cell::new(false); -} - -struct Scheduler { - /// Set to `true` while a machine is polling the reactor. - polling: bool, - - /// Idle processors. - processors: Vec, - - /// Running machines. - machines: Vec>, -} - -/// An async runtime. -pub struct Runtime { - /// The reactor. - reactor: Reactor, - - /// The global queue of tasks. - injector: Injector, - - /// Handles to local queues for stealing work. - stealers: Vec>, - - /// The scheduler state. - sched: Mutex, -} - -impl Runtime { - /// Creates a new runtime. - pub fn new() -> Runtime { - let cpus = num_cpus::get().max(1); - let processors: Vec<_> = (0..cpus).map(|_| Processor::new()).collect(); - let stealers = processors.iter().map(|p| p.worker.stealer()).collect(); - - Runtime { - reactor: Reactor::new().unwrap(), - injector: Injector::new(), - stealers, - sched: Mutex::new(Scheduler { - processors, - machines: Vec::new(), - polling: false, - }), - } - } - - /// Returns a reference to the reactor. - pub fn reactor(&self) -> &Reactor { - &self.reactor - } - - /// Flushes the task slot so that tasks get run more fairly. - pub fn yield_now(&self) { - YIELD_NOW.with(|flag| flag.set(true)); - } - - /// Schedules a task. - pub fn schedule(&self, task: Runnable) { - MACHINE.with(|machine| { - // If the current thread is a worker thread, schedule it onto the current machine. - // Otherwise, push it into the global task queue. - match machine.get() { - None => { - self.injector.push(task); - self.notify(); - } - Some(m) => m.schedule(&self, task), - } - }); - } - - /// Runs the runtime on the current thread. - pub fn run(&self) { - scope(|s| { - let mut idle = 0; - let mut delay = 0; - - loop { - // Get a list of new machines to start, if any need to be started. - for m in self.make_machines() { - idle = 0; - - s.builder() - .name("async-std/machine".to_string()) - .spawn(move |_| { - abort_on_panic(|| { - let _ = MACHINE.with(|machine| machine.set(m.clone())); - m.run(self); - }) - }) - .expect("cannot start a machine thread"); - } - - // Sleep for a bit longer if the scheduler state hasn't changed in a while. - if idle > 10 { - delay = (delay * 2).min(10_000); - } else { - idle += 1; - delay = 1000; - } - - thread::sleep(Duration::from_micros(delay)); - } - }) - .unwrap(); - } - - /// Returns a list of machines that need to be started. - fn make_machines(&self) -> Vec> { - let mut sched = self.sched.lock().unwrap(); - let mut to_start = Vec::new(); - - // If no machine has been polling the reactor in a while, that means the runtime is - // overloaded with work and we need to start another machine. - if !sched.polling { - if let Some(p) = sched.processors.pop() { - let m = Arc::new(Machine::new(p)); - to_start.push(m.clone()); - sched.machines.push(m); - } - } - - to_start - } - - /// Unparks a thread polling the reactor. - fn notify(&self) { - atomic::fence(Ordering::SeqCst); - self.reactor.notify().unwrap(); - } - - /// Attempts to poll the reactor without blocking on it. - /// - /// Returns `Ok(true)` if at least one new task was woken. - /// - /// This function might not poll the reactor at all so do not rely on it doing anything. Only - /// use for optimization. - fn quick_poll(&self) -> io::Result { - if let Ok(sched) = self.sched.try_lock() { - if !sched.polling { - return self.reactor.poll(Some(Duration::from_secs(0))); - } - } - Ok(false) - } -} - -/// A thread running a processor. -struct Machine { - /// Holds the processor until it gets stolen. - processor: Spinlock>, -} - -impl Machine { - /// Creates a new machine running a processor. - fn new(p: Processor) -> Machine { - Machine { - processor: Spinlock::new(Some(p)), - } - } - - /// Schedules a task onto the machine. - fn schedule(&self, rt: &Runtime, task: Runnable) { - match self.processor.lock().as_mut() { - None => { - rt.injector.push(task); - rt.notify(); - } - Some(p) => p.schedule(rt, task), - } - } - - /// Finds the next runnable task. - fn find_task(&self, rt: &Runtime) -> Steal { - let mut retry = false; - - // First try finding a task in the local queue or in the global queue. - if let Some(p) = self.processor.lock().as_mut() { - if let Some(task) = p.pop_task() { - return Steal::Success(task); - } - - match p.steal_from_global(rt) { - Steal::Empty => {} - Steal::Retry => retry = true, - Steal::Success(task) => return Steal::Success(task), - } - } - - // Try polling the reactor, but don't block on it. - let progress = rt.quick_poll().unwrap(); - - // Try finding a task in the local queue, which might hold tasks woken by the reactor. If - // the local queue is still empty, try stealing from other processors. - if let Some(p) = self.processor.lock().as_mut() { - if progress { - if let Some(task) = p.pop_task() { - return Steal::Success(task); - } - } - - match p.steal_from_others(rt) { - Steal::Empty => {} - Steal::Retry => retry = true, - Steal::Success(task) => return Steal::Success(task), - } - } - - if retry { Steal::Retry } else { Steal::Empty } - } - - /// Runs the machine on the current thread. - fn run(&self, rt: &Runtime) { - /// Number of yields when no runnable task is found. - const YIELDS: u32 = 3; - /// Number of short sleeps when no runnable task in found. - const SLEEPS: u32 = 10; - /// Number of runs in a row before the global queue is inspected. - const RUNS: u32 = 64; - - // The number of times the thread found work in a row. - let mut runs = 0; - // The number of times the thread didn't find work in a row. - let mut fails = 0; - - loop { - // Check if `task::yield_now()` was invoked and flush the slot if so. - YIELD_NOW.with(|flag| { - if flag.replace(false) { - if let Some(p) = self.processor.lock().as_mut() { - p.flush_slot(rt); - } - } - }); - - // After a number of runs in a row, do some work to ensure no task is left behind - // indefinitely. Poll the reactor, steal tasks from the global queue, and flush the - // task slot. - if runs >= RUNS { - runs = 0; - rt.quick_poll().unwrap(); - - if let Some(p) = self.processor.lock().as_mut() { - if let Steal::Success(task) = p.steal_from_global(rt) { - p.schedule(rt, task); - } - - p.flush_slot(rt); - } - } - - // Try to find a runnable task. - if let Steal::Success(task) = self.find_task(rt) { - task.run(); - runs += 1; - fails = 0; - continue; - } - - fails += 1; - - // Yield the current thread a few times. - if fails <= YIELDS { - thread::yield_now(); - continue; - } - - // Put the current thread to sleep a few times. - if fails <= YIELDS + SLEEPS { - let opt_p = self.processor.lock().take(); - thread::sleep(Duration::from_micros(10)); - *self.processor.lock() = opt_p; - continue; - } - - let mut sched = rt.sched.lock().unwrap(); - - // One final check for available tasks while the scheduler is locked. - if let Some(task) = iter::repeat_with(|| self.find_task(rt)) - .find(|s| !s.is_retry()) - .and_then(|s| s.success()) - { - self.schedule(rt, task); - continue; - } - - // If another thread is already blocked on the reactor, there is no point in keeping - // the current thread around since there is too little work to do. - if sched.polling { - break; - } - - // Take out the machine associated with the current thread. - let m = match sched - .machines - .iter() - .position(|elem| ptr::eq(&**elem, self)) - { - None => break, // The processor was stolen. - Some(pos) => sched.machines.swap_remove(pos), - }; - - // Unlock the schedule poll the reactor until new I/O events arrive. - sched.polling = true; - drop(sched); - rt.reactor.poll(None).unwrap(); - - // Lock the scheduler again and re-register the machine. - sched = rt.sched.lock().unwrap(); - sched.polling = false; - sched.machines.push(m); - - runs = 0; - fails = 0; - } - - // When shutting down the thread, take the processor out if still available. - let opt_p = self.processor.lock().take(); - - // Return the processor to the scheduler and remove the machine. - if let Some(p) = opt_p { - let mut sched = rt.sched.lock().unwrap(); - sched.processors.push(p); - sched.machines.retain(|elem| !ptr::eq(&**elem, self)); - } - } -} - -struct Processor { - /// The local task queue. - worker: Worker, - - /// Contains the next task to run as an optimization that skips the queue. - slot: Option, -} - -impl Processor { - /// Creates a new processor. - fn new() -> Processor { - Processor { - worker: Worker::new_fifo(), - slot: None, - } - } - - /// Schedules a task to run on this processor. - fn schedule(&mut self, rt: &Runtime, task: Runnable) { - match self.slot.replace(task) { - None => {} - Some(task) => { - self.worker.push(task); - rt.notify(); - } - } - } - - /// Flushes a task from the slot into the local queue. - fn flush_slot(&mut self, rt: &Runtime) { - if let Some(task) = self.slot.take() { - self.worker.push(task); - rt.notify(); - } - } - - /// Pops a task from this processor. - fn pop_task(&mut self) -> Option { - self.slot.take().or_else(|| self.worker.pop()) - } - - /// Steals a task from the global queue. - fn steal_from_global(&self, rt: &Runtime) -> Steal { - rt.injector.steal_batch_and_pop(&self.worker) - } - - /// Steals a task from other processors. - fn steal_from_others(&self, rt: &Runtime) -> Steal { - // Pick a random starting point in the list of queues. - let len = rt.stealers.len(); - let start = random(len as u32) as usize; - - // Create an iterator over stealers that starts from the chosen point. - let (l, r) = rt.stealers.split_at(start); - let stealers = r.iter().chain(l.iter()); - - // Try stealing a batch of tasks from each queue. - stealers - .map(|s| s.steal_batch_and_pop(&self.worker)) - .collect() - } -} diff --git a/src/sync/mod.rs b/src/sync/mod.rs index 1531f8c5..217709b9 100644 --- a/src/sync/mod.rs +++ b/src/sync/mod.rs @@ -174,7 +174,10 @@ #![allow(clippy::needless_doctest_main)] #[doc(inline)] -pub use std::sync::{Arc, Weak}; +pub use std::sync::Weak; + +#[doc(inline)] +pub use piper::Arc; pub use mutex::{Mutex, MutexGuard}; pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; @@ -194,8 +197,3 @@ cfg_unstable! { pub(crate) mod waker_set; pub(crate) use waker_set::WakerSet; - -cfg_default! { - pub(crate) mod spin_lock; - pub(crate) use spin_lock::Spinlock; -} diff --git a/src/sync/spin_lock.rs b/src/sync/spin_lock.rs deleted file mode 100644 index 854b7e02..00000000 --- a/src/sync/spin_lock.rs +++ /dev/null @@ -1,89 +0,0 @@ -use std::cell::UnsafeCell; -use std::ops::{Deref, DerefMut}; -use std::sync::atomic::{AtomicBool, Ordering}; - -use crossbeam_utils::Backoff; - -/// A simple spinlock. -#[derive(Debug)] -pub struct Spinlock { - locked: AtomicBool, - value: UnsafeCell, -} - -unsafe impl Send for Spinlock {} -unsafe impl Sync for Spinlock {} - -impl Spinlock { - /// Returns a new spinlock initialized with `value`. - pub const fn new(value: T) -> Spinlock { - Spinlock { - locked: AtomicBool::new(false), - value: UnsafeCell::new(value), - } - } - - /// Locks the spinlock. - pub fn lock(&self) -> SpinlockGuard<'_, T> { - let backoff = Backoff::new(); - while self.locked.compare_and_swap(false, true, Ordering::Acquire) { - backoff.snooze(); - } - SpinlockGuard { parent: self } - } -} - -/// A guard holding a spinlock locked. -#[derive(Debug)] -pub struct SpinlockGuard<'a, T> { - parent: &'a Spinlock, -} - -unsafe impl Send for SpinlockGuard<'_, T> {} -unsafe impl Sync for SpinlockGuard<'_, T> {} - -impl<'a, T> Drop for SpinlockGuard<'a, T> { - fn drop(&mut self) { - self.parent.locked.store(false, Ordering::Release); - } -} - -impl<'a, T> Deref for SpinlockGuard<'a, T> { - type Target = T; - - fn deref(&self) -> &T { - unsafe { &*self.parent.value.get() } - } -} - -impl<'a, T> DerefMut for SpinlockGuard<'a, T> { - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.parent.value.get() } - } -} - -#[test] -fn spinlock() { - use std::sync::Arc; - - use crate::sync::{Spinlock}; - use crate::task; - - task::block_on(async { - - let m = Arc::new(Spinlock::new(0)); - let mut tasks = vec![]; - - for _ in 0..10 { - let m = m.clone(); - tasks.push(task::spawn(async move { - *m.lock() += 1; - })); - } - - for t in tasks { - t.await; - } - assert_eq!(*m.lock(), 10); - }) -} diff --git a/src/task/block_on.rs b/src/task/block_on.rs index 4bade5bd..41e0ca01 100644 --- a/src/task/block_on.rs +++ b/src/task/block_on.rs @@ -1,13 +1,8 @@ -use std::cell::Cell; use std::future::Future; -use std::mem::{self, ManuallyDrop}; -use std::sync::Arc; -use std::task::{RawWaker, RawWakerVTable}; -use crossbeam_utils::sync::Parker; use kv_log_macro::trace; -use crate::task::{Context, Poll, Task, Waker}; +use crate::task::Task; /// Spawns a task and blocks the current thread on its result. /// @@ -45,11 +40,11 @@ where parent_task_id: Task::get_current(|t| t.id().0).unwrap_or(0), }); - let future = async move { + let wrapped_future = async move { // Drop task-locals on exit. - defer! { - Task::get_current(|t| unsafe { t.drop_locals() }); - } + // defer! { + // Task::get_current(|t| unsafe { t.drop_locals() }); + // } // Log completion on exit. defer! { @@ -61,70 +56,8 @@ where future.await }; - // Run the future as a task. - unsafe { Task::set_current(&task, || run(future)) } -} - -/// Blocks the current thread on a future's result. -fn run(future: F) -> T -where - F: Future, -{ - thread_local! { - // May hold a pre-allocated parker that can be reused for efficiency. - // - // Note that each invocation of `block` needs its own parker. In particular, if `block` - // recursively calls itself, we must make sure that each recursive call uses a distinct - // parker instance. - static CACHE: Cell>> = Cell::new(None); - } - - // Virtual table for wakers based on `Arc`. - static VTABLE: RawWakerVTable = { - unsafe fn clone_raw(ptr: *const ()) -> RawWaker { - let arc = ManuallyDrop::new(Arc::from_raw(ptr as *const Parker)); - #[allow(clippy::redundant_clone)] - mem::forget(arc.clone()); - RawWaker::new(ptr, &VTABLE) - } - - unsafe fn wake_raw(ptr: *const ()) { - let arc = Arc::from_raw(ptr as *const Parker); - arc.unparker().unpark(); - } - - unsafe fn wake_by_ref_raw(ptr: *const ()) { - let arc = ManuallyDrop::new(Arc::from_raw(ptr as *const Parker)); - arc.unparker().unpark(); - } - - unsafe fn drop_raw(ptr: *const ()) { - drop(Arc::from_raw(ptr as *const Parker)) - } - - RawWakerVTable::new(clone_raw, wake_raw, wake_by_ref_raw, drop_raw) - }; - - // Pin the future on the stack. - pin_utils::pin_mut!(future); - - CACHE.with(|cache| { - // Reuse a cached parker or create a new one for this invocation of `block`. - let arc_parker: Arc = cache.take().unwrap_or_else(|| Arc::new(Parker::new())); - let ptr = (&*arc_parker as *const Parker) as *const (); - - // Create a waker and task context. - let waker = unsafe { ManuallyDrop::new(Waker::from_raw(RawWaker::new(ptr, &VTABLE))) }; - let cx = &mut Context::from_waker(&waker); + once_cell::sync::Lazy::force(&crate::rt::RUNTIME); - loop { - if let Poll::Ready(t) = future.as_mut().poll(cx) { - // Save the parker for the next invocation of `block`. - cache.set(Some(arc_parker)); - return t; - } - - arc_parker.park(); - } - }) + // Run the future as a task. + unsafe { Task::set_current(&task, || smol::block_on(wrapped_future)) } } diff --git a/src/task/builder.rs b/src/task/builder.rs index f1fef59e..898308c7 100644 --- a/src/task/builder.rs +++ b/src/task/builder.rs @@ -3,9 +3,7 @@ use std::future::Future; use kv_log_macro::trace; use crate::io; -use crate::rt::RUNTIME; use crate::task::{JoinHandle, Task}; -use crate::utils::abort_on_panic; /// Task builder that configures the settings of a new task. #[derive(Debug, Default)] @@ -42,11 +40,11 @@ impl Builder { parent_task_id: Task::get_current(|t| t.id().0).unwrap_or(0), }); - let future = async move { + let wrapped_future = async move { // Drop task-locals on exit. - defer! { - Task::get_current(|t| unsafe { t.drop_locals() }); - } + // defer! { + // Task::get_current(|t| unsafe { t.drop_locals() }); + // } // Log completion on exit. defer! { @@ -54,25 +52,12 @@ impl Builder { task_id: Task::get_current(|t| t.id().0), }); } - future.await }; - let schedule = move |t| RUNTIME.schedule(Runnable(t)); - let (task, handle) = async_task::spawn(future, schedule, task); - task.schedule(); - Ok(JoinHandle::new(handle)) - } -} - -/// A runnable task. -pub struct Runnable(async_task::Task); + once_cell::sync::Lazy::force(&crate::rt::RUNTIME); -impl Runnable { - /// Runs the task by polling its future once. - pub fn run(self) { - unsafe { - Task::set_current(self.0.tag(), || abort_on_panic(|| self.0.run())); - } + let task = smol::Task::spawn(wrapped_future); + Ok(JoinHandle::new(task)) } } diff --git a/src/task/join_handle.rs b/src/task/join_handle.rs index d929d11f..72fb2e0c 100644 --- a/src/task/join_handle.rs +++ b/src/task/join_handle.rs @@ -12,11 +12,11 @@ use crate::task::{Context, Poll, Task}; /// /// [spawned]: fn.spawn.html #[derive(Debug)] -pub struct JoinHandle(async_task::JoinHandle); +pub struct JoinHandle(smol::Task); impl JoinHandle { /// Creates a new `JoinHandle`. - pub(crate) fn new(inner: async_task::JoinHandle) -> JoinHandle { + pub(crate) fn new(inner: smol::Task) -> JoinHandle { JoinHandle(inner) } @@ -36,7 +36,7 @@ impl JoinHandle { /// # /// # }) pub fn task(&self) -> &Task { - self.0.tag() + todo!() } } @@ -44,10 +44,7 @@ impl Future for JoinHandle { type Output = T; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match Pin::new(&mut self.0).poll(cx) { - Poll::Pending => Poll::Pending, - Poll::Ready(None) => panic!("cannot await the result of a panicked task"), - Poll::Ready(Some(val)) => Poll::Ready(val), - } + dbg!("poll joinhandle"); + Pin::new(&mut self.0).poll(cx) } } diff --git a/src/task/mod.rs b/src/task/mod.rs index 56224a36..61917cd0 100644 --- a/src/task/mod.rs +++ b/src/task/mod.rs @@ -141,7 +141,6 @@ cfg_default! { pub use spawn::spawn; pub use task_local::{AccessError, LocalKey}; - pub(crate) use builder::Runnable; pub(crate) use task_local::LocalsMap; mod block_on; diff --git a/src/task/spawn_blocking.rs b/src/task/spawn_blocking.rs index 27143f76..d7b4fd0b 100644 --- a/src/task/spawn_blocking.rs +++ b/src/task/spawn_blocking.rs @@ -1,12 +1,4 @@ -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::thread; -use std::time::Duration; - -use crossbeam_channel::{unbounded, Receiver, Sender}; -use once_cell::sync::Lazy; - -use crate::task::{JoinHandle, Task}; -use crate::utils::abort_on_panic; +use crate::task::JoinHandle; /// Spawns a blocking task. /// @@ -43,80 +35,8 @@ where F: FnOnce() -> T + Send + 'static, T: Send + 'static, { - let schedule = |task| POOL.sender.send(task).unwrap(); - let (task, handle) = async_task::spawn(async { f() }, schedule, Task::new(None)); - task.schedule(); - JoinHandle::new(handle) -} - -type Runnable = async_task::Task; - -struct Pool { - sender: Sender, - receiver: Receiver, -} - -/// The number of sleeping worker threads. -static SLEEPING: AtomicUsize = AtomicUsize::new(0); - -static POOL: Lazy = Lazy::new(|| { - // Start a single worker thread waiting for the first task. - start_thread(); + once_cell::sync::Lazy::force(&crate::rt::RUNTIME); - let (sender, receiver) = unbounded(); - Pool { sender, receiver } -}); - -fn start_thread() { - SLEEPING.fetch_add(1, Ordering::SeqCst); - let timeout = Duration::from_secs(1); - - thread::Builder::new() - .name("async-std/blocking".to_string()) - .spawn(move || { - loop { - let mut task = match POOL.receiver.recv_timeout(timeout) { - Ok(task) => task, - Err(_) => { - // Check whether this is the last sleeping thread. - if SLEEPING.fetch_sub(1, Ordering::SeqCst) == 1 { - // If so, then restart the thread to make sure there is always at least - // one sleeping thread. - if SLEEPING.compare_and_swap(0, 1, Ordering::SeqCst) == 0 { - continue; - } - } - - // Stop the thread. - return; - } - }; - - // If there are no sleeping threads, then start one to make sure there is always at - // least one sleeping thread. - if SLEEPING.fetch_sub(1, Ordering::SeqCst) == 1 { - start_thread(); - } - - loop { - // Run the task. - abort_on_panic(|| task.run()); - - // Try taking another task if there are any available. - task = match POOL.receiver.try_recv() { - Ok(task) => task, - Err(_) => break, - }; - } - - // If there is at least one sleeping thread, stop this thread instead of putting it - // to sleep. - if SLEEPING.load(Ordering::SeqCst) > 0 { - return; - } - - SLEEPING.fetch_add(1, Ordering::SeqCst); - } - }) - .expect("cannot start a blocking thread"); + let handle = smol::Task::blocking(async move { f() }); + JoinHandle::new(handle) } diff --git a/src/task/yield_now.rs b/src/task/yield_now.rs index bdb08eb6..2b1fd0b9 100644 --- a/src/task/yield_now.rs +++ b/src/task/yield_now.rs @@ -43,10 +43,6 @@ impl Future for YieldNow { if !self.0 { self.0 = true; cx.waker().wake_by_ref(); - - #[cfg(feature = "default")] - crate::rt::RUNTIME.yield_now(); - Poll::Pending } else { Poll::Ready(()) diff --git a/src/utils.rs b/src/utils.rs index 4bdbd925..13ee16de 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -20,40 +20,6 @@ pub fn abort_on_panic(f: impl FnOnce() -> T) -> T { t } -/// Generates a random number in `0..n`. -#[cfg(any(feature = "unstable", feature = "default"))] -pub fn random(n: u32) -> u32 { - use std::cell::Cell; - use std::num::Wrapping; - - thread_local! { - static RNG: Cell> = { - // Take the address of a local value as seed. - let mut x = 0i32; - let r = &mut x; - let addr = r as *mut i32 as usize; - Cell::new(Wrapping(addr as u32)) - } - } - - RNG.with(|rng| { - // This is the 32-bit variant of Xorshift. - // - // Source: https://en.wikipedia.org/wiki/Xorshift - let mut x = rng.get(); - x ^= x << 13; - x ^= x >> 17; - x ^= x << 5; - rng.set(x); - - // This is a fast alternative to `x % n`. - // - // Author: Daniel Lemire - // Source: https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ - ((u64::from(x.0)).wrapping_mul(u64::from(n)) >> 32) as u32 - }) -} - /// Add additional context to errors pub(crate) trait Context { fn context(self, message: impl Fn() -> String) -> Self; diff --git a/tests/mutex.rs b/tests/mutex.rs index fd1c07b3..ebdd7520 100644 --- a/tests/mutex.rs +++ b/tests/mutex.rs @@ -40,24 +40,33 @@ fn contention() { let tx = Arc::new(tx); let mutex = Arc::new(Mutex::new(0)); - let num_tasks = 10000; + let num_tasks = 10; //000; + let mut handles = Vec::new(); for _ in 0..num_tasks { let tx = tx.clone(); let mutex = mutex.clone(); - task::spawn(async move { + dbg!("spawn"); + handles.push(task::spawn(async move { let mut lock = mutex.lock().await; *lock += 1; tx.unbounded_send(()).unwrap(); drop(lock); - }); + })); } - for _ in 0..num_tasks { + for i in 0..num_tasks { + dbg!(i); rx.next().await.unwrap(); } + for handle in handles.into_iter() { + handle.await; + } + + dbg!("wait"); + let lock = mutex.lock().await; assert_eq!(num_tasks, *lock); });