forked from mirror/async-std
Compare commits
6 Commits
master
...
fix/schedu
Author | SHA1 | Date |
---|---|---|
dignifiedquire | 124aa76c4e | 5 years ago |
dignifiedquire | 77d3693112 | 5 years ago |
dignifiedquire | 00b8366d55 | 5 years ago |
dignifiedquire | 6306ad9df1 | 5 years ago |
dignifiedquire | 546ad3d287 | 5 years ago |
dignifiedquire | bc8677ed09 | 5 years ago |
@ -1,34 +1,23 @@
|
|||||||
//! The runtime.
|
//! The runtime.
|
||||||
|
|
||||||
use std::env;
|
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
|
|
||||||
use crate::future;
|
use crate::utils::abort_on_panic;
|
||||||
|
|
||||||
/// Dummy runtime struct.
|
pub use reactor::{Reactor, Watcher};
|
||||||
pub struct Runtime {}
|
pub use runtime::Runtime;
|
||||||
|
|
||||||
|
mod reactor;
|
||||||
|
mod runtime;
|
||||||
|
|
||||||
/// The global runtime.
|
/// The global runtime.
|
||||||
pub static RUNTIME: Lazy<Runtime> = Lazy::new(|| {
|
pub static RUNTIME: Lazy<Runtime> = Lazy::new(|| {
|
||||||
// Create an executor thread pool.
|
|
||||||
|
|
||||||
let thread_count = env::var("ASYNC_STD_THREAD_COUNT")
|
|
||||||
.map(|env| {
|
|
||||||
env.parse()
|
|
||||||
.expect("ASYNC_STD_THREAD_COUNT must be a number")
|
|
||||||
})
|
|
||||||
.unwrap_or_else(|_| num_cpus::get())
|
|
||||||
.max(1);
|
|
||||||
|
|
||||||
let thread_name = env::var("ASYNC_STD_THREAD_NAME").unwrap_or("async-std/runtime".to_string());
|
|
||||||
|
|
||||||
for _ in 0..thread_count {
|
|
||||||
thread::Builder::new()
|
thread::Builder::new()
|
||||||
.name(thread_name.clone())
|
.name("async-std/runtime".to_string())
|
||||||
.spawn(|| crate::task::block_on(future::pending::<()>()))
|
.spawn(|| abort_on_panic(|| RUNTIME.run()))
|
||||||
.expect("cannot start a runtime thread");
|
.expect("cannot start a runtime thread");
|
||||||
}
|
|
||||||
Runtime {}
|
Runtime::new()
|
||||||
});
|
});
|
||||||
|
@ -0,0 +1,354 @@
|
|||||||
|
use std::fmt;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use mio::{self, Evented};
|
||||||
|
use slab::Slab;
|
||||||
|
|
||||||
|
use crate::io;
|
||||||
|
use crate::rt::RUNTIME;
|
||||||
|
use crate::task::{Context, Poll, Waker};
|
||||||
|
|
||||||
|
/// Data associated with a registered I/O handle.
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Entry {
|
||||||
|
/// A unique identifier.
|
||||||
|
token: mio::Token,
|
||||||
|
|
||||||
|
/// Tasks that are blocked on reading from this I/O handle.
|
||||||
|
readers: Mutex<Readers>,
|
||||||
|
|
||||||
|
/// Tasks that are blocked on writing to this I/O handle.
|
||||||
|
writers: Mutex<Writers>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The state of a networking driver.
|
||||||
|
pub struct Reactor {
|
||||||
|
/// A mio instance that polls for new events.
|
||||||
|
poller: mio::Poll,
|
||||||
|
|
||||||
|
/// A list into which mio stores events.
|
||||||
|
events: Mutex<mio::Events>,
|
||||||
|
|
||||||
|
/// A collection of registered I/O handles.
|
||||||
|
entries: Mutex<Slab<Arc<Entry>>>,
|
||||||
|
|
||||||
|
/// Dummy I/O handle that is only used to wake up the polling thread.
|
||||||
|
notify_reg: (mio::Registration, mio::SetReadiness),
|
||||||
|
|
||||||
|
/// An identifier for the notification handle.
|
||||||
|
notify_token: mio::Token,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The set of `Waker`s interested in read readiness.
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Readers {
|
||||||
|
/// Flag indicating read readiness.
|
||||||
|
/// (cf. `Watcher::poll_read_ready`)
|
||||||
|
ready: bool,
|
||||||
|
/// The `Waker`s blocked on reading.
|
||||||
|
wakers: Vec<Waker>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The set of `Waker`s interested in write readiness.
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Writers {
|
||||||
|
/// Flag indicating write readiness.
|
||||||
|
/// (cf. `Watcher::poll_write_ready`)
|
||||||
|
ready: bool,
|
||||||
|
/// The `Waker`s blocked on writing.
|
||||||
|
wakers: Vec<Waker>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Reactor {
|
||||||
|
/// Creates a new reactor for polling I/O events.
|
||||||
|
pub fn new() -> io::Result<Reactor> {
|
||||||
|
let poller = mio::Poll::new()?;
|
||||||
|
let notify_reg = mio::Registration::new2();
|
||||||
|
|
||||||
|
let mut reactor = Reactor {
|
||||||
|
poller,
|
||||||
|
events: Mutex::new(mio::Events::with_capacity(1000)),
|
||||||
|
entries: Mutex::new(Slab::new()),
|
||||||
|
notify_reg,
|
||||||
|
notify_token: mio::Token(0),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Register a dummy I/O handle for waking up the polling thread.
|
||||||
|
let entry = reactor.register(&reactor.notify_reg.0)?;
|
||||||
|
reactor.notify_token = entry.token;
|
||||||
|
|
||||||
|
Ok(reactor)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Registers an I/O event source and returns its associated entry.
|
||||||
|
fn register(&self, source: &dyn Evented) -> io::Result<Arc<Entry>> {
|
||||||
|
let mut entries = self.entries.lock().unwrap();
|
||||||
|
|
||||||
|
// Reserve a vacant spot in the slab and use its key as the token value.
|
||||||
|
let vacant = entries.vacant_entry();
|
||||||
|
let token = mio::Token(vacant.key());
|
||||||
|
|
||||||
|
// Allocate an entry and insert it into the slab.
|
||||||
|
let entry = Arc::new(Entry {
|
||||||
|
token,
|
||||||
|
readers: Mutex::new(Readers {
|
||||||
|
ready: false,
|
||||||
|
wakers: Vec::new(),
|
||||||
|
}),
|
||||||
|
writers: Mutex::new(Writers {
|
||||||
|
ready: false,
|
||||||
|
wakers: Vec::new(),
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
vacant.insert(entry.clone());
|
||||||
|
|
||||||
|
// Register the I/O event source in the poller.
|
||||||
|
let interest = mio::Ready::all();
|
||||||
|
let opts = mio::PollOpt::edge();
|
||||||
|
self.poller.register(source, token, interest, opts)?;
|
||||||
|
|
||||||
|
Ok(entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deregisters an I/O event source associated with an entry.
|
||||||
|
fn deregister(&self, source: &dyn Evented, entry: &Entry) -> io::Result<()> {
|
||||||
|
// Deregister the I/O object from the mio instance.
|
||||||
|
self.poller.deregister(source)?;
|
||||||
|
|
||||||
|
// Remove the entry associated with the I/O object.
|
||||||
|
self.entries.lock().unwrap().remove(entry.token.0);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Notifies the reactor so that polling stops blocking.
|
||||||
|
pub fn notify(&self) -> io::Result<()> {
|
||||||
|
self.notify_reg.1.set_readiness(mio::Ready::readable())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Waits on the poller for new events and wakes up tasks blocked on I/O handles.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(true)` if at least one new task was woken.
|
||||||
|
pub fn poll(&self, timeout: Option<Duration>) -> io::Result<bool> {
|
||||||
|
let mut events = self.events.lock().unwrap();
|
||||||
|
|
||||||
|
// Block on the poller until at least one new event comes in.
|
||||||
|
self.poller.poll(&mut events, timeout)?;
|
||||||
|
|
||||||
|
// Lock the entire entry table while we're processing new events.
|
||||||
|
let entries = self.entries.lock().unwrap();
|
||||||
|
|
||||||
|
// The number of woken tasks.
|
||||||
|
let mut progress = false;
|
||||||
|
|
||||||
|
for event in events.iter() {
|
||||||
|
let token = event.token();
|
||||||
|
|
||||||
|
if token == self.notify_token {
|
||||||
|
// If this is the notification token, we just need the notification state.
|
||||||
|
self.notify_reg.1.set_readiness(mio::Ready::empty())?;
|
||||||
|
} else {
|
||||||
|
// Otherwise, look for the entry associated with this token.
|
||||||
|
if let Some(entry) = entries.get(token.0) {
|
||||||
|
// Set the readiness flags from this I/O event.
|
||||||
|
let readiness = event.readiness();
|
||||||
|
|
||||||
|
// Wake up reader tasks blocked on this I/O handle.
|
||||||
|
let reader_interests = mio::Ready::all() - mio::Ready::writable();
|
||||||
|
if !(readiness & reader_interests).is_empty() {
|
||||||
|
let mut readers = entry.readers.lock().unwrap();
|
||||||
|
readers.ready = true;
|
||||||
|
for w in readers.wakers.drain(..) {
|
||||||
|
w.wake();
|
||||||
|
progress = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wake up writer tasks blocked on this I/O handle.
|
||||||
|
let writer_interests = mio::Ready::all() - mio::Ready::readable();
|
||||||
|
if !(readiness & writer_interests).is_empty() {
|
||||||
|
let mut writers = entry.writers.lock().unwrap();
|
||||||
|
writers.ready = true;
|
||||||
|
for w in writers.wakers.drain(..) {
|
||||||
|
w.wake();
|
||||||
|
progress = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An I/O handle powered by the networking driver.
|
||||||
|
///
|
||||||
|
/// This handle wraps an I/O event source and exposes a "futurized" interface on top of it,
|
||||||
|
/// implementing traits `AsyncRead` and `AsyncWrite`.
|
||||||
|
pub struct Watcher<T: Evented> {
|
||||||
|
/// Data associated with the I/O handle.
|
||||||
|
entry: Arc<Entry>,
|
||||||
|
|
||||||
|
/// The I/O event source.
|
||||||
|
source: Option<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Evented> Watcher<T> {
|
||||||
|
/// Creates a new I/O handle.
|
||||||
|
///
|
||||||
|
/// The provided I/O event source will be kept registered inside the reactor's poller for the
|
||||||
|
/// lifetime of the returned I/O handle.
|
||||||
|
pub fn new(source: T) -> Watcher<T> {
|
||||||
|
Watcher {
|
||||||
|
entry: RUNTIME
|
||||||
|
.reactor()
|
||||||
|
.register(&source)
|
||||||
|
.expect("cannot register an I/O event source"),
|
||||||
|
source: Some(source),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a reference to the inner I/O event source.
|
||||||
|
pub fn get_ref(&self) -> &T {
|
||||||
|
self.source.as_ref().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Polls the inner I/O source for a non-blocking read operation.
|
||||||
|
///
|
||||||
|
/// If the operation returns an error of the `io::ErrorKind::WouldBlock` kind, the current task
|
||||||
|
/// will be registered for wakeup when the I/O source becomes readable.
|
||||||
|
pub fn poll_read_with<'a, F, R>(&'a self, cx: &mut Context<'_>, mut f: F) -> Poll<io::Result<R>>
|
||||||
|
where
|
||||||
|
F: FnMut(&'a T) -> io::Result<R>,
|
||||||
|
{
|
||||||
|
// If the operation isn't blocked, return its result.
|
||||||
|
match f(self.source.as_ref().unwrap()) {
|
||||||
|
Err(err) if err.kind() == io::ErrorKind::WouldBlock => {}
|
||||||
|
res => return Poll::Ready(res),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock the waker list.
|
||||||
|
let mut readers = self.entry.readers.lock().unwrap();
|
||||||
|
|
||||||
|
// Try running the operation again.
|
||||||
|
match f(self.source.as_ref().unwrap()) {
|
||||||
|
Err(err) if err.kind() == io::ErrorKind::WouldBlock => {}
|
||||||
|
res => return Poll::Ready(res),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register the task if it isn't registered already.
|
||||||
|
|
||||||
|
if readers.wakers.iter().all(|w| !w.will_wake(cx.waker())) {
|
||||||
|
readers.wakers.push(cx.waker().clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Polls the inner I/O source for a non-blocking write operation.
|
||||||
|
///
|
||||||
|
/// If the operation returns an error of the `io::ErrorKind::WouldBlock` kind, the current task
|
||||||
|
/// will be registered for wakeup when the I/O source becomes writable.
|
||||||
|
pub fn poll_write_with<'a, F, R>(
|
||||||
|
&'a self,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
mut f: F,
|
||||||
|
) -> Poll<io::Result<R>>
|
||||||
|
where
|
||||||
|
F: FnMut(&'a T) -> io::Result<R>,
|
||||||
|
{
|
||||||
|
// If the operation isn't blocked, return its result.
|
||||||
|
match f(self.source.as_ref().unwrap()) {
|
||||||
|
Err(err) if err.kind() == io::ErrorKind::WouldBlock => {}
|
||||||
|
res => return Poll::Ready(res),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock the waker list.
|
||||||
|
let mut writers = self.entry.writers.lock().unwrap();
|
||||||
|
|
||||||
|
// Try running the operation again.
|
||||||
|
match f(self.source.as_ref().unwrap()) {
|
||||||
|
Err(err) if err.kind() == io::ErrorKind::WouldBlock => {}
|
||||||
|
res => return Poll::Ready(res),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register the task if it isn't registered already.
|
||||||
|
if writers.wakers.iter().all(|w| !w.will_wake(cx.waker())) {
|
||||||
|
writers.wakers.push(cx.waker().clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Polls the inner I/O source until a non-blocking read can be performed.
|
||||||
|
///
|
||||||
|
/// If non-blocking reads are currently not possible, the `Waker`
|
||||||
|
/// will be saved and notified when it can read non-blocking
|
||||||
|
/// again.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<()> {
|
||||||
|
// Lock the waker list.
|
||||||
|
let mut readers = self.entry.readers.lock().unwrap();
|
||||||
|
if readers.ready {
|
||||||
|
return Poll::Ready(());
|
||||||
|
}
|
||||||
|
// Register the task if it isn't registered already.
|
||||||
|
if readers.wakers.iter().all(|w| !w.will_wake(cx.waker())) {
|
||||||
|
readers.wakers.push(cx.waker().clone());
|
||||||
|
}
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Polls the inner I/O source until a non-blocking write can be performed.
|
||||||
|
///
|
||||||
|
/// If non-blocking writes are currently not possible, the `Waker`
|
||||||
|
/// will be saved and notified when it can write non-blocking
|
||||||
|
/// again.
|
||||||
|
pub fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<()> {
|
||||||
|
// Lock the waker list.
|
||||||
|
let mut writers = self.entry.writers.lock().unwrap();
|
||||||
|
if writers.ready {
|
||||||
|
return Poll::Ready(());
|
||||||
|
}
|
||||||
|
// Register the task if it isn't registered already.
|
||||||
|
if writers.wakers.iter().all(|w| !w.will_wake(cx.waker())) {
|
||||||
|
writers.wakers.push(cx.waker().clone());
|
||||||
|
}
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deregisters and returns the inner I/O source.
|
||||||
|
///
|
||||||
|
/// This method is typically used to convert `Watcher`s to raw file descriptors/handles.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn into_inner(mut self) -> T {
|
||||||
|
let source = self.source.take().unwrap();
|
||||||
|
RUNTIME
|
||||||
|
.reactor()
|
||||||
|
.deregister(&source, &self.entry)
|
||||||
|
.expect("cannot deregister I/O event source");
|
||||||
|
source
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Evented> Drop for Watcher<T> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if let Some(ref source) = self.source {
|
||||||
|
RUNTIME
|
||||||
|
.reactor()
|
||||||
|
.deregister(source, &self.entry)
|
||||||
|
.expect("cannot deregister I/O event source");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Evented + fmt::Debug> fmt::Debug for Watcher<T> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("Watcher")
|
||||||
|
.field("entry", &self.entry)
|
||||||
|
.field("source", &self.source)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,646 @@
|
|||||||
|
use std::cell::Cell;
|
||||||
|
use std::cell::RefCell;
|
||||||
|
use std::io;
|
||||||
|
use std::iter;
|
||||||
|
use std::ptr;
|
||||||
|
use std::sync::atomic::{self, Ordering};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::thread;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use crossbeam_deque::{Injector, Steal, Stealer, Worker};
|
||||||
|
use crossbeam_utils::{
|
||||||
|
sync::{Parker, Unparker},
|
||||||
|
thread::scope,
|
||||||
|
};
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
|
|
||||||
|
use crate::rt::Reactor;
|
||||||
|
use crate::sync::Spinlock;
|
||||||
|
use crate::task::Runnable;
|
||||||
|
use crate::utils::{abort_on_panic, random};
|
||||||
|
|
||||||
|
thread_local! {
|
||||||
|
/// A reference to the current machine, if the current thread runs tasks.
|
||||||
|
static MACHINE: RefCell<Option<Arc<Machine>>> = RefCell::new(None);
|
||||||
|
|
||||||
|
/// This flag is set to true whenever `task::yield_now()` is invoked.
|
||||||
|
static YIELD_NOW: Cell<bool> = Cell::new(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Maximum number of OS threads = processors = machines
|
||||||
|
static MAXPROCS: Lazy<usize> = Lazy::new(|| num_cpus::get().max(1));
|
||||||
|
|
||||||
|
/// Minimum number of machines that are kept exeuting, to avoid starvation.
|
||||||
|
const MIN_MACHINES: usize = 2;
|
||||||
|
|
||||||
|
struct Scheduler {
|
||||||
|
/// Set to `true` while a machine is polling the reactor.
|
||||||
|
polling: bool,
|
||||||
|
|
||||||
|
progress: bool,
|
||||||
|
|
||||||
|
/// Available threads.
|
||||||
|
threads: Vec<ThreadState>,
|
||||||
|
|
||||||
|
/// Idle processors.
|
||||||
|
processors: Vec<Processor>,
|
||||||
|
|
||||||
|
/// Running machines.
|
||||||
|
machines: Vec<Arc<Machine>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Scheduler {
|
||||||
|
/// Get the next machine that has no work yet, if there is any.
|
||||||
|
fn next_idle_machine(&self) -> Option<Arc<Machine>> {
|
||||||
|
self.machines
|
||||||
|
.iter()
|
||||||
|
.find(|m| !m.has_work())
|
||||||
|
.map(|m| m.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ThreadState {
|
||||||
|
unparker: Unparker,
|
||||||
|
parked: Arc<atomic::AtomicBool>,
|
||||||
|
/// Used to transfer the machine into the thread.
|
||||||
|
machine_sender: crossbeam_channel::Sender<Arc<Machine>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An async runtime.
|
||||||
|
pub struct Runtime {
|
||||||
|
/// The reactor.
|
||||||
|
reactor: Reactor,
|
||||||
|
|
||||||
|
/// The global queue of tasks.
|
||||||
|
injector: Injector<Runnable>,
|
||||||
|
|
||||||
|
/// Handles to local queues for stealing work.
|
||||||
|
stealers: Vec<Stealer<Runnable>>,
|
||||||
|
|
||||||
|
/// The scheduler state.
|
||||||
|
sched: Mutex<Scheduler>,
|
||||||
|
|
||||||
|
#[cfg(feature = "tracing")]
|
||||||
|
poll_count: atomic::AtomicUsize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Runtime {
|
||||||
|
/// Creates a new runtime.
|
||||||
|
pub fn new() -> Runtime {
|
||||||
|
let processors: Vec<_> = (0..*MAXPROCS).map(|_| Processor::new()).collect();
|
||||||
|
let stealers = processors.iter().map(|p| p.worker.stealer()).collect();
|
||||||
|
let threads = Vec::with_capacity(*MAXPROCS);
|
||||||
|
|
||||||
|
Runtime {
|
||||||
|
reactor: Reactor::new().unwrap(),
|
||||||
|
injector: Injector::new(),
|
||||||
|
stealers,
|
||||||
|
#[cfg(feature = "tracing")]
|
||||||
|
poll_count: atomic::AtomicUsize::new(0),
|
||||||
|
sched: Mutex::new(Scheduler {
|
||||||
|
processors,
|
||||||
|
machines: Vec::with_capacity(*MAXPROCS),
|
||||||
|
threads,
|
||||||
|
polling: false,
|
||||||
|
progress: false,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a reference to the reactor.
|
||||||
|
pub fn reactor(&self) -> &Reactor {
|
||||||
|
&self.reactor
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flushes the task slot so that tasks get run more fairly.
|
||||||
|
pub fn yield_now(&self) {
|
||||||
|
YIELD_NOW.with(|flag| flag.set(true));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Schedules a task.
|
||||||
|
pub fn schedule(&self, task: Runnable) {
|
||||||
|
MACHINE.with(|machine| {
|
||||||
|
// If the current thread is a worker thread, schedule it onto the current machine.
|
||||||
|
// Otherwise, push it into the global task queue.
|
||||||
|
match &*machine.borrow() {
|
||||||
|
None => {
|
||||||
|
self.injector.push(task);
|
||||||
|
self.notify();
|
||||||
|
}
|
||||||
|
Some(m) => m.schedule(&self, task),
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Runs the runtime on the current thread.
|
||||||
|
pub fn run(&self) {
|
||||||
|
scope(|s| {
|
||||||
|
let mut idle = 0;
|
||||||
|
let mut delay = 0;
|
||||||
|
|
||||||
|
#[cfg(feature = "tracing")]
|
||||||
|
s.builder()
|
||||||
|
.name("async-std/trace".to_string())
|
||||||
|
.spawn(|_| {
|
||||||
|
use log_update::LogUpdate;
|
||||||
|
use std::io::stdout;
|
||||||
|
let mut log_update = LogUpdate::new(stdout()).unwrap();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let (thread_list, machine_list, processor_list, polling) = {
|
||||||
|
let sched = self.sched.lock().unwrap();
|
||||||
|
let thread_list = sched
|
||||||
|
.threads
|
||||||
|
.iter()
|
||||||
|
.map(|t| {
|
||||||
|
if t.parked.load(Ordering::Relaxed) {
|
||||||
|
"_"
|
||||||
|
} else {
|
||||||
|
"|"
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.fold(String::new(), |mut s, curr| {
|
||||||
|
s += " ";
|
||||||
|
s += curr;
|
||||||
|
s
|
||||||
|
});
|
||||||
|
let machine_list = sched
|
||||||
|
.machines
|
||||||
|
.iter()
|
||||||
|
.map(|m| match &*m.processor.lock() {
|
||||||
|
Some(p) => {
|
||||||
|
let len = p.worker.len() + p.slot.is_some() as usize;
|
||||||
|
len.to_string()
|
||||||
|
}
|
||||||
|
None => "_".to_string(),
|
||||||
|
})
|
||||||
|
.fold(String::new(), |mut s, curr| {
|
||||||
|
s += " ";
|
||||||
|
s += &curr;
|
||||||
|
s
|
||||||
|
});
|
||||||
|
let processor_list = sched
|
||||||
|
.processors
|
||||||
|
.iter()
|
||||||
|
.map(|p| {
|
||||||
|
let len = p.worker.len() + p.slot.is_some() as usize;
|
||||||
|
len.to_string()
|
||||||
|
})
|
||||||
|
.fold(String::new(), |mut s, curr| {
|
||||||
|
s += " ";
|
||||||
|
s += &curr;
|
||||||
|
s
|
||||||
|
});
|
||||||
|
(thread_list, machine_list, processor_list, sched.polling)
|
||||||
|
};
|
||||||
|
let glen = self.injector.len();
|
||||||
|
let polls = self.poll_count.load(Ordering::Relaxed);
|
||||||
|
let msg = format!(
|
||||||
|
"GlobalQueue: {}\nPolls: {} - {}\nThreads:\n{}\nMachines:\n{}\nProcessors:\n{}\n",
|
||||||
|
glen, polls,polling, thread_list, machine_list, processor_list
|
||||||
|
);
|
||||||
|
log_update.render(&msg).unwrap();
|
||||||
|
thread::sleep(Duration::from_millis(10));
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.expect("failed to start tracing");
|
||||||
|
|
||||||
|
loop {
|
||||||
|
// Get a list of new machines to start, if any need to be started.
|
||||||
|
let machines = self.make_machines();
|
||||||
|
for m in machines {
|
||||||
|
// println!("{} -- looking for thread", k);
|
||||||
|
idle = 0;
|
||||||
|
|
||||||
|
// println!("getting idle thread");
|
||||||
|
let sched = self.sched.lock().unwrap();
|
||||||
|
'inner: for (i, thread) in sched.threads.iter().enumerate() {
|
||||||
|
// grab the first parked thread
|
||||||
|
if thread
|
||||||
|
.parked
|
||||||
|
.compare_and_swap(true, false, Ordering::Acquire)
|
||||||
|
{
|
||||||
|
// println!("unpark thread {}", i);
|
||||||
|
// transfer the machine
|
||||||
|
thread
|
||||||
|
.machine_sender
|
||||||
|
.send(m.clone())
|
||||||
|
.expect("failed to send machine to thread");
|
||||||
|
// unpark the thread
|
||||||
|
thread.unparker.unpark();
|
||||||
|
// println!("{} found thread to unpark {}", k, i);
|
||||||
|
break 'inner;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let len = sched.threads.len();
|
||||||
|
drop(sched);
|
||||||
|
// no idle thread available, check if we can spawn one
|
||||||
|
if len < *MAXPROCS {
|
||||||
|
let i = len;
|
||||||
|
// println!("{} spawning thread {}", k, i);
|
||||||
|
// we can spawn one, lets do it
|
||||||
|
let parked = Arc::new(atomic::AtomicBool::new(false));
|
||||||
|
let parked2 = parked.clone();
|
||||||
|
let (machine_sender, machine_recv) = crossbeam_channel::bounded(1);
|
||||||
|
let parker = Parker::new();
|
||||||
|
let unparker = parker.unparker().clone();
|
||||||
|
|
||||||
|
s.builder()
|
||||||
|
.name("async-std/machine".to_string())
|
||||||
|
.spawn(move |_| {
|
||||||
|
abort_on_panic(|| {
|
||||||
|
loop {
|
||||||
|
// println!("checking park loop {}", i);
|
||||||
|
while parked2.load(Ordering::Acquire) {
|
||||||
|
parker.park();
|
||||||
|
// TODO: shutdown if idle for too long
|
||||||
|
}
|
||||||
|
// println!("thread unparked {}", i);
|
||||||
|
// when this thread is unparked, retrieve machine
|
||||||
|
let m: Arc<Machine> =
|
||||||
|
machine_recv.recv().expect("failed to receive machine");
|
||||||
|
|
||||||
|
// store it in the thread local
|
||||||
|
MACHINE.with(|machine| {
|
||||||
|
*machine.borrow_mut() = Some(m.clone());
|
||||||
|
});
|
||||||
|
// run it
|
||||||
|
m.run(self);
|
||||||
|
|
||||||
|
// when run ends
|
||||||
|
{
|
||||||
|
// see if there are any available processors
|
||||||
|
let mut sched = self.sched.lock().unwrap();
|
||||||
|
if let Some(p) = sched.processors.pop() {
|
||||||
|
// get a machine
|
||||||
|
if let Some(m) = sched.next_idle_machine(){
|
||||||
|
*m.processor.lock() = Some(p);
|
||||||
|
MACHINE.with(|machine| {
|
||||||
|
machine.borrow_mut().replace(m);
|
||||||
|
});
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
drop(sched);
|
||||||
|
|
||||||
|
// go into parked mode, no work
|
||||||
|
MACHINE.with(|machine| {
|
||||||
|
*machine.borrow_mut() = None;
|
||||||
|
});
|
||||||
|
parked2.store(true, Ordering::Relaxed);
|
||||||
|
// println!("thread parked {}", i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.expect("cannot start a machine thread");
|
||||||
|
|
||||||
|
let mut sched = self.sched.lock().unwrap();
|
||||||
|
|
||||||
|
// transfer the machine
|
||||||
|
machine_sender
|
||||||
|
.send(m)
|
||||||
|
.expect("failed to send machine to thread");
|
||||||
|
|
||||||
|
sched.threads.push(ThreadState {
|
||||||
|
unparker,
|
||||||
|
parked,
|
||||||
|
machine_sender,
|
||||||
|
});
|
||||||
|
drop(sched);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sleep for a bit longer if the scheduler state hasn't changed in a while.
|
||||||
|
if idle > 10 {
|
||||||
|
delay = (delay * 2).min(10_000);
|
||||||
|
} else {
|
||||||
|
idle += 1;
|
||||||
|
delay = 1000;
|
||||||
|
}
|
||||||
|
|
||||||
|
thread::sleep(Duration::from_micros(delay));
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a list of machines that need to be started.
|
||||||
|
fn make_machines(&self) -> Vec<Arc<Machine>> {
|
||||||
|
let mut sched = self.sched.lock().unwrap();
|
||||||
|
let mut to_start = Vec::new();
|
||||||
|
|
||||||
|
// If no machine has been polling the reactor in a while, that means the runtime is
|
||||||
|
// overloaded with work and we need to start another machine.
|
||||||
|
//
|
||||||
|
// Also ensure that there are at least 2 running machiens to avoid starvation.
|
||||||
|
if !sched.polling || sched.machines.len() < MIN_MACHINES {
|
||||||
|
#[cfg(feature = "tracing")]
|
||||||
|
self.poll_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
// if !sched.progress {
|
||||||
|
if let Some(p) = sched.processors.pop() {
|
||||||
|
if let Some(m) = sched.next_idle_machine() {
|
||||||
|
// find idle m
|
||||||
|
*m.processor.lock() = Some(p);
|
||||||
|
to_start.push(m.clone());
|
||||||
|
} else {
|
||||||
|
// no idle m
|
||||||
|
let m = Arc::new(Machine::new(p));
|
||||||
|
to_start.push(m.clone());
|
||||||
|
sched.machines.push(m);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// }
|
||||||
|
sched.progress = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
to_start
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unparks a thread polling the reactor.
|
||||||
|
fn notify(&self) {
|
||||||
|
atomic::fence(Ordering::SeqCst);
|
||||||
|
self.reactor.notify().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempts to poll the reactor without blocking on it.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(true)` if at least one new task was woken.
|
||||||
|
///
|
||||||
|
/// This function might not poll the reactor at all so do not rely on it doing anything. Only
|
||||||
|
/// use for optimization.
|
||||||
|
fn quick_poll(&self) -> io::Result<bool> {
|
||||||
|
if let Ok(sched) = self.sched.try_lock() {
|
||||||
|
if !sched.polling {
|
||||||
|
return self.reactor.poll(Some(Duration::from_secs(0)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A thread running a processor.
|
||||||
|
struct Machine {
|
||||||
|
/// Holds the processor until it gets stolen.
|
||||||
|
processor: Spinlock<Option<Processor>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Machine {
|
||||||
|
/// Creates a new machine running a processor.
|
||||||
|
fn new(p: Processor) -> Machine {
|
||||||
|
Machine {
|
||||||
|
processor: Spinlock::new(Some(p)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn has_work(&self) -> bool {
|
||||||
|
if let Some(p) = &*self.processor.lock() {
|
||||||
|
// TODO: is this the right check?
|
||||||
|
p.has_work()
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Schedules a task onto the machine.
|
||||||
|
fn schedule(&self, rt: &Runtime, task: Runnable) {
|
||||||
|
match self.processor.lock().as_mut() {
|
||||||
|
None => {
|
||||||
|
rt.injector.push(task);
|
||||||
|
rt.notify();
|
||||||
|
}
|
||||||
|
Some(p) => p.schedule(rt, task),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Finds the next runnable task.
|
||||||
|
fn find_task(&self, rt: &Runtime) -> Steal<Runnable> {
|
||||||
|
let mut retry = false;
|
||||||
|
|
||||||
|
// First try finding a task in the local queue or in the global queue.
|
||||||
|
if let Some(p) = self.processor.lock().as_mut() {
|
||||||
|
if let Some(task) = p.pop_task() {
|
||||||
|
return Steal::Success(task);
|
||||||
|
}
|
||||||
|
|
||||||
|
match p.steal_from_global(rt) {
|
||||||
|
Steal::Empty => {}
|
||||||
|
Steal::Retry => retry = true,
|
||||||
|
Steal::Success(task) => return Steal::Success(task),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try polling the reactor, but don't block on it.
|
||||||
|
let progress = rt.quick_poll().unwrap();
|
||||||
|
|
||||||
|
// Try finding a task in the local queue, which might hold tasks woken by the reactor. If
|
||||||
|
// the local queue is still empty, try stealing from other processors.
|
||||||
|
if let Some(p) = self.processor.lock().as_mut() {
|
||||||
|
if progress {
|
||||||
|
if let Some(task) = p.pop_task() {
|
||||||
|
return Steal::Success(task);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
match p.steal_from_others(rt) {
|
||||||
|
Steal::Empty => {}
|
||||||
|
Steal::Retry => retry = true,
|
||||||
|
Steal::Success(task) => return Steal::Success(task),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if retry { Steal::Retry } else { Steal::Empty }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Runs the machine on the current thread.
|
||||||
|
fn run(&self, rt: &Runtime) {
|
||||||
|
/// Number of yields when no runnable task is found.
|
||||||
|
const YIELDS: u32 = 3;
|
||||||
|
/// Number of short sleeps when no runnable task in found.
|
||||||
|
const SLEEPS: u32 = 10;
|
||||||
|
/// Number of runs in a row before the global queue is inspected.
|
||||||
|
const RUNS: u32 = 64;
|
||||||
|
|
||||||
|
// The number of times the thread found work in a row.
|
||||||
|
let mut runs = 0;
|
||||||
|
// The number of times the thread didn't find work in a row.
|
||||||
|
let mut fails = 0;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
// Check if `task::yield_now()` was invoked and flush the slot if so.
|
||||||
|
YIELD_NOW.with(|flag| {
|
||||||
|
if flag.replace(false) {
|
||||||
|
if let Some(p) = self.processor.lock().as_mut() {
|
||||||
|
p.flush_slot(rt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// After a number of runs in a row, do some work to ensure no task is left behind
|
||||||
|
// indefinitely. Poll the reactor, steal tasks from the global queue, and flush the
|
||||||
|
// task slot.
|
||||||
|
if runs >= RUNS {
|
||||||
|
runs = 0;
|
||||||
|
rt.quick_poll().unwrap();
|
||||||
|
|
||||||
|
if let Some(p) = self.processor.lock().as_mut() {
|
||||||
|
if let Steal::Success(task) = p.steal_from_global(rt) {
|
||||||
|
p.schedule(rt, task);
|
||||||
|
}
|
||||||
|
|
||||||
|
p.flush_slot(rt);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to find a runnable task.
|
||||||
|
if let Steal::Success(task) = self.find_task(rt) {
|
||||||
|
task.run();
|
||||||
|
runs += 1;
|
||||||
|
fails = 0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
fails += 1;
|
||||||
|
|
||||||
|
// Yield the current thread a few times.
|
||||||
|
if fails <= YIELDS {
|
||||||
|
thread::yield_now();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put the current thread to sleep a few times.
|
||||||
|
if fails <= YIELDS + SLEEPS {
|
||||||
|
let opt_p = self.processor.lock().take();
|
||||||
|
thread::sleep(Duration::from_micros(10));
|
||||||
|
*self.processor.lock() = opt_p;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut sched = rt.sched.lock().unwrap();
|
||||||
|
|
||||||
|
// One final check for available tasks while the scheduler is locked.
|
||||||
|
if let Some(task) = iter::repeat_with(|| self.find_task(rt))
|
||||||
|
.find(|s| !s.is_retry())
|
||||||
|
.and_then(|s| s.success())
|
||||||
|
{
|
||||||
|
self.schedule(rt, task);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If another thread is already blocked on the reactor, there is no point in keeping
|
||||||
|
// the current thread around since there is too little work to do.
|
||||||
|
if sched.polling {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take out the machine associated with the current thread.
|
||||||
|
let m = match sched
|
||||||
|
.machines
|
||||||
|
.iter()
|
||||||
|
.position(|elem| ptr::eq(&**elem, self))
|
||||||
|
{
|
||||||
|
None => break, // The processor was stolen.
|
||||||
|
Some(pos) => sched.machines.swap_remove(pos),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Unlock the schedule poll the reactor until new I/O events arrive.
|
||||||
|
// println!("polling start");
|
||||||
|
sched.polling = true;
|
||||||
|
drop(sched);
|
||||||
|
rt.reactor.poll(None).unwrap();
|
||||||
|
|
||||||
|
// Lock the scheduler again and re-register the machine.
|
||||||
|
sched = rt.sched.lock().unwrap();
|
||||||
|
sched.polling = false;
|
||||||
|
//println!("polling stop");
|
||||||
|
sched.machines.push(m);
|
||||||
|
sched.progress = true;
|
||||||
|
|
||||||
|
runs = 0;
|
||||||
|
fails = 0;
|
||||||
|
}
|
||||||
|
// println!("thread break");
|
||||||
|
|
||||||
|
// When shutting down the thread, take the processor out if still available.
|
||||||
|
let opt_p = self.processor.lock().take();
|
||||||
|
// println!("processor {:?}", opt_p.is_some());
|
||||||
|
|
||||||
|
// Return the processor to the scheduler and remove the machine.
|
||||||
|
if let Some(p) = opt_p {
|
||||||
|
// println!("returning processor to pool");
|
||||||
|
let mut sched = rt.sched.lock().unwrap();
|
||||||
|
sched.processors.push(p);
|
||||||
|
sched.machines.retain(|elem| !ptr::eq(&**elem, self));
|
||||||
|
}
|
||||||
|
// println!("thread run stopped");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Processor {
|
||||||
|
/// The local task queue.
|
||||||
|
worker: Worker<Runnable>,
|
||||||
|
|
||||||
|
/// Contains the next task to run as an optimization that skips the queue.
|
||||||
|
slot: Option<Runnable>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Processor {
|
||||||
|
/// Creates a new processor.
|
||||||
|
fn new() -> Processor {
|
||||||
|
Processor {
|
||||||
|
worker: Worker::new_fifo(),
|
||||||
|
slot: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Is there any available work for this processor?
|
||||||
|
fn has_work(&self) -> bool {
|
||||||
|
self.slot.is_some() || !self.worker.is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Schedules a task to run on this processor.
|
||||||
|
fn schedule(&mut self, rt: &Runtime, task: Runnable) {
|
||||||
|
match self.slot.replace(task) {
|
||||||
|
None => {}
|
||||||
|
Some(task) => {
|
||||||
|
self.worker.push(task);
|
||||||
|
rt.notify();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flushes a task from the slot into the local queue.
|
||||||
|
fn flush_slot(&mut self, rt: &Runtime) {
|
||||||
|
if let Some(task) = self.slot.take() {
|
||||||
|
self.worker.push(task);
|
||||||
|
rt.notify();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Pops a task from this processor.
|
||||||
|
fn pop_task(&mut self) -> Option<Runnable> {
|
||||||
|
self.slot.take().or_else(|| self.worker.pop())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Steals a task from the global queue.
|
||||||
|
fn steal_from_global(&self, rt: &Runtime) -> Steal<Runnable> {
|
||||||
|
rt.injector.steal_batch_and_pop(&self.worker)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Steals a task from other processors.
|
||||||
|
fn steal_from_others(&self, rt: &Runtime) -> Steal<Runnable> {
|
||||||
|
// Pick a random starting point in the list of queues.
|
||||||
|
let len = rt.stealers.len();
|
||||||
|
let start = random(len as u32) as usize;
|
||||||
|
|
||||||
|
// Create an iterator over stealers that starts from the chosen point.
|
||||||
|
let (l, r) = rt.stealers.split_at(start);
|
||||||
|
let stealers = r.iter().chain(l.iter());
|
||||||
|
|
||||||
|
// Try stealing a batch of tasks from each queue.
|
||||||
|
stealers
|
||||||
|
.map(|s| s.steal_batch_and_pop(&self.worker))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,294 @@
|
|||||||
|
use std::cell::UnsafeCell;
|
||||||
|
use std::fmt;
|
||||||
|
use std::ops::{Deref, DerefMut};
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::future::Future;
|
||||||
|
|
||||||
|
use crate::sync::WakerSet;
|
||||||
|
use crate::task::{Context, Poll};
|
||||||
|
|
||||||
|
/// A mutual exclusion primitive for protecting shared data.
|
||||||
|
///
|
||||||
|
/// This type is an async version of [`std::sync::Mutex`].
|
||||||
|
///
|
||||||
|
/// [`std::sync::Mutex`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// # async_std::task::block_on(async {
|
||||||
|
/// #
|
||||||
|
/// use async_std::sync::{Arc, Mutex};
|
||||||
|
/// use async_std::task;
|
||||||
|
///
|
||||||
|
/// let m = Arc::new(Mutex::new(0));
|
||||||
|
/// let mut tasks = vec![];
|
||||||
|
///
|
||||||
|
/// for _ in 0..10 {
|
||||||
|
/// let m = m.clone();
|
||||||
|
/// tasks.push(task::spawn(async move {
|
||||||
|
/// *m.lock().await += 1;
|
||||||
|
/// }));
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// for t in tasks {
|
||||||
|
/// t.await;
|
||||||
|
/// }
|
||||||
|
/// assert_eq!(*m.lock().await, 10);
|
||||||
|
/// #
|
||||||
|
/// # })
|
||||||
|
/// ```
|
||||||
|
pub struct Mutex<T: ?Sized> {
|
||||||
|
locked: AtomicBool,
|
||||||
|
wakers: WakerSet,
|
||||||
|
value: UnsafeCell<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
|
||||||
|
unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
|
||||||
|
|
||||||
|
impl<T> Mutex<T> {
|
||||||
|
/// Creates a new mutex.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use async_std::sync::Mutex;
|
||||||
|
///
|
||||||
|
/// let mutex = Mutex::new(0);
|
||||||
|
/// ```
|
||||||
|
pub fn new(t: T) -> Mutex<T> {
|
||||||
|
Mutex {
|
||||||
|
locked: AtomicBool::new(false),
|
||||||
|
wakers: WakerSet::new(),
|
||||||
|
value: UnsafeCell::new(t),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: ?Sized> Mutex<T> {
|
||||||
|
/// Acquires the lock.
|
||||||
|
///
|
||||||
|
/// Returns a guard that releases the lock when dropped.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// # async_std::task::block_on(async {
|
||||||
|
/// #
|
||||||
|
/// use async_std::sync::{Arc, Mutex};
|
||||||
|
/// use async_std::task;
|
||||||
|
///
|
||||||
|
/// let m1 = Arc::new(Mutex::new(10));
|
||||||
|
/// let m2 = m1.clone();
|
||||||
|
///
|
||||||
|
/// task::spawn(async move {
|
||||||
|
/// *m1.lock().await = 20;
|
||||||
|
/// })
|
||||||
|
/// .await;
|
||||||
|
///
|
||||||
|
/// assert_eq!(*m2.lock().await, 20);
|
||||||
|
/// #
|
||||||
|
/// # })
|
||||||
|
/// ```
|
||||||
|
pub async fn lock(&self) -> MutexGuard<'_, T> {
|
||||||
|
pub struct LockFuture<'a, T: ?Sized> {
|
||||||
|
mutex: &'a Mutex<T>,
|
||||||
|
opt_key: Option<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, T: ?Sized> Future for LockFuture<'a, T> {
|
||||||
|
type Output = MutexGuard<'a, T>;
|
||||||
|
|
||||||
|
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||||
|
loop {
|
||||||
|
// If the current task is in the set, remove it.
|
||||||
|
if let Some(key) = self.opt_key.take() {
|
||||||
|
self.mutex.wakers.remove(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try acquiring the lock.
|
||||||
|
match self.mutex.try_lock() {
|
||||||
|
Some(guard) => return Poll::Ready(guard),
|
||||||
|
None => {
|
||||||
|
// Insert this lock operation.
|
||||||
|
self.opt_key = Some(self.mutex.wakers.insert(cx));
|
||||||
|
|
||||||
|
// If the mutex is still locked, return.
|
||||||
|
if self.mutex.locked.load(Ordering::SeqCst) {
|
||||||
|
return Poll::Pending;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: ?Sized> Drop for LockFuture<'_, T> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// If the current task is still in the set, that means it is being cancelled now.
|
||||||
|
if let Some(key) = self.opt_key {
|
||||||
|
self.mutex.wakers.cancel(key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LockFuture {
|
||||||
|
mutex: self,
|
||||||
|
opt_key: None,
|
||||||
|
}
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempts to acquire the lock.
|
||||||
|
///
|
||||||
|
/// If the lock could not be acquired at this time, then [`None`] is returned. Otherwise, a
|
||||||
|
/// guard is returned that releases the lock when dropped.
|
||||||
|
///
|
||||||
|
/// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// # async_std::task::block_on(async {
|
||||||
|
/// #
|
||||||
|
/// use async_std::sync::{Arc, Mutex};
|
||||||
|
/// use async_std::task;
|
||||||
|
///
|
||||||
|
/// let m1 = Arc::new(Mutex::new(10));
|
||||||
|
/// let m2 = m1.clone();
|
||||||
|
///
|
||||||
|
/// task::spawn(async move {
|
||||||
|
/// if let Some(mut guard) = m1.try_lock() {
|
||||||
|
/// *guard = 20;
|
||||||
|
/// } else {
|
||||||
|
/// println!("try_lock failed");
|
||||||
|
/// }
|
||||||
|
/// })
|
||||||
|
/// .await;
|
||||||
|
///
|
||||||
|
/// assert_eq!(*m2.lock().await, 20);
|
||||||
|
/// #
|
||||||
|
/// # })
|
||||||
|
/// ```
|
||||||
|
#[inline]
|
||||||
|
pub fn try_lock(&self) -> Option<MutexGuard<'_, T>> {
|
||||||
|
if !self.locked.swap(true, Ordering::SeqCst) {
|
||||||
|
Some(MutexGuard(self))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Consumes the mutex, returning the underlying data.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use async_std::sync::Mutex;
|
||||||
|
///
|
||||||
|
/// let mutex = Mutex::new(10);
|
||||||
|
/// assert_eq!(mutex.into_inner(), 10);
|
||||||
|
/// ```
|
||||||
|
pub fn into_inner(self) -> T where T: Sized {
|
||||||
|
self.value.into_inner()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a mutable reference to the underlying data.
|
||||||
|
///
|
||||||
|
/// Since this call borrows the mutex mutably, no actual locking takes place -- the mutable
|
||||||
|
/// borrow statically guarantees no locks exist.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// # async_std::task::block_on(async {
|
||||||
|
/// #
|
||||||
|
/// use async_std::sync::Mutex;
|
||||||
|
///
|
||||||
|
/// let mut mutex = Mutex::new(0);
|
||||||
|
/// *mutex.get_mut() = 10;
|
||||||
|
/// assert_eq!(*mutex.lock().await, 10);
|
||||||
|
/// #
|
||||||
|
/// # })
|
||||||
|
/// ```
|
||||||
|
pub fn get_mut(&mut self) -> &mut T {
|
||||||
|
unsafe { &mut *self.value.get() }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
struct Locked;
|
||||||
|
impl fmt::Debug for Locked {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.write_str("<locked>")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
match self.try_lock() {
|
||||||
|
None => f.debug_struct("Mutex").field("data", &Locked).finish(),
|
||||||
|
Some(guard) => f.debug_struct("Mutex").field("data", &&*guard).finish(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> From<T> for Mutex<T> {
|
||||||
|
fn from(val: T) -> Mutex<T> {
|
||||||
|
Mutex::new(val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: ?Sized + Default> Default for Mutex<T> {
|
||||||
|
fn default() -> Mutex<T> {
|
||||||
|
Mutex::new(Default::default())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A guard that releases the lock when dropped.
|
||||||
|
pub struct MutexGuard<'a, T: ?Sized>(&'a Mutex<T>);
|
||||||
|
|
||||||
|
unsafe impl<T: ?Sized + Send> Send for MutexGuard<'_, T> {}
|
||||||
|
unsafe impl<T: ?Sized + Sync> Sync for MutexGuard<'_, T> {}
|
||||||
|
|
||||||
|
impl<T: ?Sized> Drop for MutexGuard<'_, T> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// Use `SeqCst` ordering to synchronize with `WakerSet::insert()` and `WakerSet::update()`.
|
||||||
|
self.0.locked.store(false, Ordering::SeqCst);
|
||||||
|
|
||||||
|
// Notify a blocked `lock()` operation if none were notified already.
|
||||||
|
self.0.wakers.notify_any();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: ?Sized +fmt::Debug> fmt::Debug for MutexGuard<'_, T> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
fmt::Debug::fmt(&**self, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'_, T> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
(**self).fmt(f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: ?Sized> Deref for MutexGuard<'_, T> {
|
||||||
|
type Target = T;
|
||||||
|
|
||||||
|
fn deref(&self) -> &T {
|
||||||
|
unsafe { &*self.0.value.get() }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: ?Sized> DerefMut for MutexGuard<'_, T> {
|
||||||
|
fn deref_mut(&mut self) -> &mut T {
|
||||||
|
unsafe { &mut *self.0.value.get() }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "unstable")]
|
||||||
|
pub fn guard_lock<'a, T>(guard: &MutexGuard<'a, T>) -> &'a Mutex<T> {
|
||||||
|
guard.0
|
||||||
|
}
|
@ -0,0 +1,89 @@
|
|||||||
|
use std::cell::UnsafeCell;
|
||||||
|
use std::ops::{Deref, DerefMut};
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
|
||||||
|
use crossbeam_utils::Backoff;
|
||||||
|
|
||||||
|
/// A simple spinlock.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Spinlock<T> {
|
||||||
|
locked: AtomicBool,
|
||||||
|
value: UnsafeCell<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl<T: Send> Send for Spinlock<T> {}
|
||||||
|
unsafe impl<T: Send> Sync for Spinlock<T> {}
|
||||||
|
|
||||||
|
impl<T> Spinlock<T> {
|
||||||
|
/// Returns a new spinlock initialized with `value`.
|
||||||
|
pub const fn new(value: T) -> Spinlock<T> {
|
||||||
|
Spinlock {
|
||||||
|
locked: AtomicBool::new(false),
|
||||||
|
value: UnsafeCell::new(value),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Locks the spinlock.
|
||||||
|
pub fn lock(&self) -> SpinlockGuard<'_, T> {
|
||||||
|
let backoff = Backoff::new();
|
||||||
|
while self.locked.compare_and_swap(false, true, Ordering::Acquire) {
|
||||||
|
backoff.snooze();
|
||||||
|
}
|
||||||
|
SpinlockGuard { parent: self }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A guard holding a spinlock locked.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct SpinlockGuard<'a, T> {
|
||||||
|
parent: &'a Spinlock<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl<T: Send> Send for SpinlockGuard<'_, T> {}
|
||||||
|
unsafe impl<T: Sync> Sync for SpinlockGuard<'_, T> {}
|
||||||
|
|
||||||
|
impl<'a, T> Drop for SpinlockGuard<'a, T> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.parent.locked.store(false, Ordering::Release);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, T> Deref for SpinlockGuard<'a, T> {
|
||||||
|
type Target = T;
|
||||||
|
|
||||||
|
fn deref(&self) -> &T {
|
||||||
|
unsafe { &*self.parent.value.get() }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, T> DerefMut for SpinlockGuard<'a, T> {
|
||||||
|
fn deref_mut(&mut self) -> &mut T {
|
||||||
|
unsafe { &mut *self.parent.value.get() }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn spinlock() {
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use crate::sync::{Spinlock};
|
||||||
|
use crate::task;
|
||||||
|
|
||||||
|
task::block_on(async {
|
||||||
|
|
||||||
|
let m = Arc::new(Spinlock::new(0));
|
||||||
|
let mut tasks = vec![];
|
||||||
|
|
||||||
|
for _ in 0..10 {
|
||||||
|
let m = m.clone();
|
||||||
|
tasks.push(task::spawn(async move {
|
||||||
|
*m.lock() += 1;
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for t in tasks {
|
||||||
|
t.await;
|
||||||
|
}
|
||||||
|
assert_eq!(*m.lock(), 10);
|
||||||
|
})
|
||||||
|
}
|
@ -1,31 +0,0 @@
|
|||||||
use std::future::Future;
|
|
||||||
|
|
||||||
use crate::task::{Builder, JoinHandle};
|
|
||||||
|
|
||||||
/// Spawns a task onto the thread-local executor.
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// # #[cfg(feature = "unstable")]
|
|
||||||
/// # async_std::task::block_on(async {
|
|
||||||
/// #
|
|
||||||
/// use async_std::task;
|
|
||||||
///
|
|
||||||
/// let handle = task::spawn_local(async {
|
|
||||||
/// 1 + 2
|
|
||||||
/// });
|
|
||||||
///
|
|
||||||
/// assert_eq!(handle.await, 3);
|
|
||||||
/// #
|
|
||||||
/// # })
|
|
||||||
/// ```
|
|
||||||
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
|
|
||||||
#[inline]
|
|
||||||
pub fn spawn_local<F, T>(future: F) -> JoinHandle<T>
|
|
||||||
where
|
|
||||||
F: Future<Output = T> + 'static,
|
|
||||||
T: 'static,
|
|
||||||
{
|
|
||||||
Builder::new().local(future).expect("cannot spawn task")
|
|
||||||
}
|
|
@ -1,84 +0,0 @@
|
|||||||
use std::cell::Cell;
|
|
||||||
use std::ptr;
|
|
||||||
|
|
||||||
use crate::task::{LocalsMap, Task, TaskId};
|
|
||||||
use crate::utils::abort_on_panic;
|
|
||||||
|
|
||||||
thread_local! {
|
|
||||||
/// A pointer to the currently running task.
|
|
||||||
static CURRENT: Cell<*const TaskLocalsWrapper> = Cell::new(ptr::null_mut());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A wrapper to store task local data.
|
|
||||||
pub(crate) struct TaskLocalsWrapper {
|
|
||||||
/// The actual task details.
|
|
||||||
task: Task,
|
|
||||||
|
|
||||||
/// The map holding task-local values.
|
|
||||||
locals: LocalsMap,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TaskLocalsWrapper {
|
|
||||||
/// Creates a new task handle.
|
|
||||||
///
|
|
||||||
/// If the task is unnamed, the inner representation of the task will be lazily allocated on
|
|
||||||
/// demand.
|
|
||||||
#[inline]
|
|
||||||
pub(crate) fn new(task: Task) -> Self {
|
|
||||||
Self {
|
|
||||||
task,
|
|
||||||
locals: LocalsMap::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the task's unique identifier.
|
|
||||||
#[inline]
|
|
||||||
pub fn id(&self) -> TaskId {
|
|
||||||
self.task.id()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a reference to the inner `Task`.
|
|
||||||
pub(crate) fn task(&self) -> &Task {
|
|
||||||
&self.task
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the map holding task-local values.
|
|
||||||
pub(crate) fn locals(&self) -> &LocalsMap {
|
|
||||||
&self.locals
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set a reference to the current task.
|
|
||||||
pub(crate) unsafe fn set_current<F, R>(task: *const TaskLocalsWrapper, f: F) -> R
|
|
||||||
where
|
|
||||||
F: FnOnce() -> R,
|
|
||||||
{
|
|
||||||
CURRENT.with(|current| {
|
|
||||||
let old_task = current.replace(task);
|
|
||||||
defer! {
|
|
||||||
current.set(old_task);
|
|
||||||
}
|
|
||||||
f()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets a reference to the current task.
|
|
||||||
pub(crate) fn get_current<F, R>(f: F) -> Option<R>
|
|
||||||
where
|
|
||||||
F: FnOnce(&TaskLocalsWrapper) -> R,
|
|
||||||
{
|
|
||||||
let res = CURRENT.try_with(|current| unsafe { current.get().as_ref().map(f) });
|
|
||||||
match res {
|
|
||||||
Ok(Some(val)) => Some(val),
|
|
||||||
Ok(None) | Err(_) => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for TaskLocalsWrapper {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
// Abort the process if dropping task-locals panics.
|
|
||||||
abort_on_panic(|| {
|
|
||||||
unsafe { self.locals.clear() };
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,20 +0,0 @@
|
|||||||
#[cfg(feature = "unstable")]
|
|
||||||
#[test]
|
|
||||||
fn test_send() -> async_std::io::Result<()> {
|
|
||||||
use async_std::prelude::*;
|
|
||||||
use async_std::{stream, task};
|
|
||||||
|
|
||||||
task::block_on(async {
|
|
||||||
fn test_send_trait<T: Send>(_: &T) {}
|
|
||||||
|
|
||||||
let stream = stream::repeat(1u8).take(10);
|
|
||||||
test_send_trait(&stream);
|
|
||||||
|
|
||||||
let fut = stream.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
// This line triggers a compilation error
|
|
||||||
test_send_trait(&fut);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
}
|
|
@ -1,26 +0,0 @@
|
|||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use async_std::future::timeout;
|
|
||||||
use async_std::task;
|
|
||||||
|
|
||||||
#[cfg(target_arch = "wasm32")]
|
|
||||||
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
|
|
||||||
fn timeout_future_many() {
|
|
||||||
task::block_on(async {
|
|
||||||
let futures = (0..10)
|
|
||||||
.map(|i| {
|
|
||||||
timeout(Duration::from_millis(i * 50), async move {
|
|
||||||
task::sleep(Duration::from_millis(i)).await;
|
|
||||||
Ok::<(), async_std::future::TimeoutError>(())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
for future in futures {
|
|
||||||
future.await.unwrap().unwrap();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
@ -1,10 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
wasm-pack test --chrome --headless -- --features unstable --test buf_writer
|
|
||||||
wasm-pack test --chrome --headless -- --features unstable --test channel
|
|
||||||
wasm-pack test --chrome --headless -- --features unstable --test condvar
|
|
||||||
wasm-pack test --chrome --headless -- --features unstable --test mutex
|
|
||||||
wasm-pack test --chrome --headless -- --features unstable --test rwlock
|
|
||||||
wasm-pack test --chrome --headless -- --features unstable --test stream
|
|
||||||
wasm-pack test --chrome --headless -- --features unstable --test task_local
|
|
||||||
wasm-pack test --chrome --headless -- --features unstable --test timeout
|
|
Loading…
Reference in New Issue