New scheduler resilient to blocking
parent
9311fd7fae
commit
36d24cd0e1
@ -0,0 +1,23 @@
|
||||
//! The runtime.
|
||||
|
||||
use std::thread;
|
||||
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
use crate::utils::abort_on_panic;
|
||||
|
||||
pub use reactor::{Reactor, Watcher};
|
||||
pub use runtime::Runtime;
|
||||
|
||||
mod reactor;
|
||||
mod runtime;
|
||||
|
||||
/// The global runtime.
|
||||
pub static RUNTIME: Lazy<Runtime> = Lazy::new(|| {
|
||||
thread::Builder::new()
|
||||
.name("async-std/runtime".to_string())
|
||||
.spawn(|| abort_on_panic(|| RUNTIME.run()))
|
||||
.expect("cannot start a runtime thread");
|
||||
|
||||
Runtime::new()
|
||||
});
|
@ -0,0 +1,449 @@
|
||||
use std::cell::Cell;
|
||||
use std::io;
|
||||
use std::iter;
|
||||
use std::ptr;
|
||||
use std::sync::atomic::{self, AtomicBool, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use crossbeam_deque::{Injector, Steal, Stealer, Worker};
|
||||
use crossbeam_utils::thread::scope;
|
||||
use once_cell::unsync::OnceCell;
|
||||
|
||||
use crate::rt::Reactor;
|
||||
use crate::task::Runnable;
|
||||
use crate::utils::{abort_on_panic, random, Spinlock};
|
||||
|
||||
thread_local! {
|
||||
/// A reference to the current machine, if the current thread runs tasks.
|
||||
static MACHINE: OnceCell<Arc<Machine>> = OnceCell::new();
|
||||
|
||||
/// This flag is set to true whenever `task::yield_now()` is invoked.
|
||||
static YIELD_NOW: Cell<bool> = Cell::new(false);
|
||||
}
|
||||
|
||||
/// Scheduler state.
|
||||
struct Scheduler {
|
||||
/// Set to `true` every time before a machine blocks polling the reactor.
|
||||
progress: bool,
|
||||
|
||||
/// Set to `true` while a machine is polling the reactor.
|
||||
polling: bool,
|
||||
|
||||
/// Idle processors.
|
||||
processors: Vec<Processor>,
|
||||
|
||||
/// Running machines.
|
||||
machines: Vec<Arc<Machine>>,
|
||||
}
|
||||
|
||||
/// An async runtime.
|
||||
pub struct Runtime {
|
||||
/// The reactor.
|
||||
reactor: Reactor,
|
||||
|
||||
/// The global queue of tasks.
|
||||
injector: Injector<Runnable>,
|
||||
|
||||
/// Handles to local queues for stealing work.
|
||||
stealers: Vec<Stealer<Runnable>>,
|
||||
|
||||
/// The scheduler state.
|
||||
sched: Mutex<Scheduler>,
|
||||
}
|
||||
|
||||
impl Runtime {
|
||||
/// Creates a new runtime.
|
||||
pub fn new() -> Runtime {
|
||||
let cpus = num_cpus::get().max(1);
|
||||
let processors: Vec<_> = (0..cpus).map(|_| Processor::new()).collect();
|
||||
let stealers = processors.iter().map(|p| p.worker.stealer()).collect();
|
||||
|
||||
Runtime {
|
||||
reactor: Reactor::new().unwrap(),
|
||||
injector: Injector::new(),
|
||||
stealers,
|
||||
sched: Mutex::new(Scheduler {
|
||||
processors,
|
||||
machines: Vec::new(),
|
||||
progress: false,
|
||||
polling: false,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a reference to the reactor.
|
||||
pub fn reactor(&self) -> &Reactor {
|
||||
&self.reactor
|
||||
}
|
||||
|
||||
/// Flushes the task slot so that tasks get run more fairly.
|
||||
pub fn yield_now(&self) {
|
||||
YIELD_NOW.with(|flag| flag.set(true));
|
||||
}
|
||||
|
||||
/// Schedules a task.
|
||||
pub fn schedule(&self, task: Runnable) {
|
||||
MACHINE.with(|machine| {
|
||||
// If the current thread is a worker thread, schedule it onto the current machine.
|
||||
// Otherwise, push it into the global task queue.
|
||||
match machine.get() {
|
||||
None => {
|
||||
self.injector.push(task);
|
||||
self.notify();
|
||||
}
|
||||
Some(m) => m.schedule(&self, task),
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Runs the runtime on the current thread.
|
||||
pub fn run(&self) {
|
||||
scope(|s| {
|
||||
let mut idle = 0;
|
||||
let mut delay = 0;
|
||||
|
||||
loop {
|
||||
// Get a list of new machines to start, if any need to be started.
|
||||
for m in self.make_machines() {
|
||||
idle = 0;
|
||||
|
||||
s.builder()
|
||||
.name("async-std/machine".to_string())
|
||||
.spawn(move |_| {
|
||||
abort_on_panic(|| {
|
||||
let _ = MACHINE.with(|machine| machine.set(m.clone()));
|
||||
m.run(self);
|
||||
})
|
||||
})
|
||||
.expect("cannot start a machine thread");
|
||||
}
|
||||
|
||||
// Sleep for a bit longer if the scheduler state hasn't changed in a while.
|
||||
if idle > 10 {
|
||||
delay = (delay * 2).min(10_000);
|
||||
} else {
|
||||
idle += 1;
|
||||
delay = 1000;
|
||||
}
|
||||
|
||||
thread::sleep(Duration::from_micros(delay));
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Returns a list of machines that need to be started.
|
||||
fn make_machines(&self) -> Vec<Arc<Machine>> {
|
||||
let mut sched = self.sched.lock().unwrap();
|
||||
let mut to_start = Vec::new();
|
||||
|
||||
// If there is a machine that is stuck on a task and not making any progress, steal its
|
||||
// processor and set up a new machine to take over.
|
||||
for m in &mut sched.machines {
|
||||
if !m.progress.swap(false, Ordering::SeqCst) {
|
||||
let opt_p = m.processor.try_lock().and_then(|mut p| p.take());
|
||||
|
||||
if let Some(p) = opt_p {
|
||||
*m = Arc::new(Machine::new(p));
|
||||
to_start.push(m.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no machine has been polling the reactor in a while, that means the runtime is
|
||||
// overloaded with work and we need to start another machine.
|
||||
if !sched.polling {
|
||||
if !sched.progress {
|
||||
if let Some(p) = sched.processors.pop() {
|
||||
let m = Arc::new(Machine::new(p));
|
||||
to_start.push(m.clone());
|
||||
sched.machines.push(m);
|
||||
}
|
||||
}
|
||||
|
||||
sched.progress = false;
|
||||
}
|
||||
|
||||
to_start
|
||||
}
|
||||
|
||||
/// Unparks a thread polling the reactor.
|
||||
fn notify(&self) {
|
||||
atomic::fence(Ordering::SeqCst);
|
||||
self.reactor.notify().unwrap();
|
||||
}
|
||||
|
||||
/// Attempts to poll the reactor without blocking on it.
|
||||
///
|
||||
/// Returns `Ok(true)` if at least one new task was woken.
|
||||
///
|
||||
/// This function might not poll the reactor at all so do not rely on it doing anything. Only
|
||||
/// use for optimization.
|
||||
fn quick_poll(&self) -> io::Result<bool> {
|
||||
if let Ok(sched) = self.sched.try_lock() {
|
||||
if !sched.polling {
|
||||
return self.reactor.poll(Some(Duration::from_secs(0)));
|
||||
}
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// A thread running a processor.
|
||||
struct Machine {
|
||||
/// Holds the processor until it gets stolen.
|
||||
processor: Spinlock<Option<Processor>>,
|
||||
|
||||
/// Gets set to `true` before running every task to indicate the machine is not stuck.
|
||||
progress: AtomicBool,
|
||||
}
|
||||
|
||||
impl Machine {
|
||||
/// Creates a new machine running a processor.
|
||||
fn new(p: Processor) -> Machine {
|
||||
Machine {
|
||||
processor: Spinlock::new(Some(p)),
|
||||
progress: AtomicBool::new(true),
|
||||
}
|
||||
}
|
||||
|
||||
/// Schedules a task onto the machine.
|
||||
fn schedule(&self, rt: &Runtime, task: Runnable) {
|
||||
match self.processor.lock().as_mut() {
|
||||
None => {
|
||||
rt.injector.push(task);
|
||||
rt.notify();
|
||||
}
|
||||
Some(p) => p.schedule(rt, task),
|
||||
}
|
||||
}
|
||||
|
||||
/// Finds the next runnable task.
|
||||
fn find_task(&self, rt: &Runtime) -> Steal<Runnable> {
|
||||
let mut retry = false;
|
||||
|
||||
// First try finding a task in the local queue or in the global queue.
|
||||
if let Some(p) = self.processor.lock().as_mut() {
|
||||
if let Some(task) = p.pop_task() {
|
||||
return Steal::Success(task);
|
||||
}
|
||||
|
||||
match p.steal_from_global(rt) {
|
||||
Steal::Empty => {}
|
||||
Steal::Retry => retry = true,
|
||||
Steal::Success(task) => return Steal::Success(task),
|
||||
}
|
||||
}
|
||||
|
||||
// Try polling the reactor, but don't block on it.
|
||||
let progress = rt.quick_poll().unwrap();
|
||||
|
||||
// Try finding a task in the local queue, which might hold tasks woken by the reactor. If
|
||||
// the local queue is still empty, try stealing from other processors.
|
||||
if let Some(p) = self.processor.lock().as_mut() {
|
||||
if progress {
|
||||
if let Some(task) = p.pop_task() {
|
||||
return Steal::Success(task);
|
||||
}
|
||||
}
|
||||
|
||||
match p.steal_from_others(rt) {
|
||||
Steal::Empty => {}
|
||||
Steal::Retry => retry = true,
|
||||
Steal::Success(task) => return Steal::Success(task),
|
||||
}
|
||||
}
|
||||
|
||||
if retry { Steal::Retry } else { Steal::Empty }
|
||||
}
|
||||
|
||||
/// Runs the machine on the current thread.
|
||||
fn run(&self, rt: &Runtime) {
|
||||
/// Number of yields when no runnable task is found.
|
||||
const YIELDS: u32 = 3;
|
||||
/// Number of short sleeps when no runnable task in found.
|
||||
const SLEEPS: u32 = 10;
|
||||
/// Number of runs in a row before the global queue is inspected.
|
||||
const RUNS: u32 = 64;
|
||||
|
||||
// The number of times the thread found work in a row.
|
||||
let mut runs = 0;
|
||||
// The number of times the thread didn't find work in a row.
|
||||
let mut fails = 0;
|
||||
|
||||
loop {
|
||||
// let the scheduler know this machine is making progress.
|
||||
self.progress.store(true, Ordering::SeqCst);
|
||||
|
||||
// Check if `task::yield_now()` was invoked and flush the slot if so.
|
||||
YIELD_NOW.with(|flag| {
|
||||
if flag.replace(false) {
|
||||
if let Some(p) = self.processor.lock().as_mut() {
|
||||
p.flush_slot(rt);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// After a number of runs in a row, do some work to ensure no task is left behind
|
||||
// indefinitely. Poll the reactor, steal tasks from the global queue, and flush the
|
||||
// task slot.
|
||||
if runs >= RUNS {
|
||||
runs = 0;
|
||||
rt.quick_poll().unwrap();
|
||||
|
||||
if let Some(p) = self.processor.lock().as_mut() {
|
||||
if let Steal::Success(task) = p.steal_from_global(rt) {
|
||||
p.schedule(rt, task);
|
||||
}
|
||||
|
||||
p.flush_slot(rt);
|
||||
}
|
||||
}
|
||||
|
||||
// Try to find a runnable task.
|
||||
if let Steal::Success(task) = self.find_task(rt) {
|
||||
task.run();
|
||||
runs += 1;
|
||||
fails = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
fails += 1;
|
||||
|
||||
// Check if the processor was stolen.
|
||||
if self.processor.lock().is_none() {
|
||||
break;
|
||||
}
|
||||
|
||||
// Yield the current thread a few times.
|
||||
if fails <= YIELDS {
|
||||
thread::yield_now();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Put the current thread to sleep a few times.
|
||||
if fails <= YIELDS + SLEEPS {
|
||||
let opt_p = self.processor.lock().take();
|
||||
thread::sleep(Duration::from_micros(10));
|
||||
*self.processor.lock() = opt_p;
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut sched = rt.sched.lock().unwrap();
|
||||
|
||||
// One final check for available tasks while the scheduler is locked.
|
||||
if let Some(task) = iter::repeat_with(|| self.find_task(rt))
|
||||
.find(|s| !s.is_retry())
|
||||
.and_then(|s| s.success())
|
||||
{
|
||||
self.schedule(rt, task);
|
||||
continue;
|
||||
}
|
||||
|
||||
// If another thread is already blocked on the reactor, there is no point in keeping
|
||||
// the current thread around since there is too little work to do.
|
||||
if sched.polling {
|
||||
break;
|
||||
}
|
||||
|
||||
// Take out the machine associated with the current thread.
|
||||
let m = match sched
|
||||
.machines
|
||||
.iter()
|
||||
.position(|elem| ptr::eq(&**elem, self))
|
||||
{
|
||||
None => break, // The processor was stolen.
|
||||
Some(pos) => sched.machines.swap_remove(pos),
|
||||
};
|
||||
|
||||
// Unlock the schedule poll the reactor until new I/O events arrive.
|
||||
sched.polling = true;
|
||||
drop(sched);
|
||||
rt.reactor.poll(None).unwrap();
|
||||
|
||||
// Lock the scheduler again and re-register the machine.
|
||||
sched = rt.sched.lock().unwrap();
|
||||
sched.polling = false;
|
||||
sched.machines.push(m);
|
||||
sched.progress = true;
|
||||
|
||||
runs = 0;
|
||||
fails = 0;
|
||||
}
|
||||
|
||||
// When shutting down the thread, take the processor out if still available.
|
||||
let opt_p = self.processor.lock().take();
|
||||
|
||||
// Return the processor to the scheduler and remove the machine.
|
||||
if let Some(p) = opt_p {
|
||||
let mut sched = rt.sched.lock().unwrap();
|
||||
sched.processors.push(p);
|
||||
sched.machines.retain(|elem| !ptr::eq(&**elem, self));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct Processor {
|
||||
/// The local task queue.
|
||||
worker: Worker<Runnable>,
|
||||
|
||||
/// Contains the next task to run as an optimization that skips the queue.
|
||||
slot: Option<Runnable>,
|
||||
}
|
||||
|
||||
impl Processor {
|
||||
/// Creates a new processor.
|
||||
fn new() -> Processor {
|
||||
Processor {
|
||||
worker: Worker::new_fifo(),
|
||||
slot: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Schedules a task to run on this processor.
|
||||
fn schedule(&mut self, rt: &Runtime, task: Runnable) {
|
||||
match self.slot.replace(task) {
|
||||
None => {}
|
||||
Some(task) => {
|
||||
self.worker.push(task);
|
||||
rt.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Flushes a task from the slot into the local queue.
|
||||
fn flush_slot(&mut self, rt: &Runtime) {
|
||||
if let Some(task) = self.slot.take() {
|
||||
self.worker.push(task);
|
||||
rt.notify();
|
||||
}
|
||||
}
|
||||
|
||||
/// Pops a task from this processor.
|
||||
fn pop_task(&mut self) -> Option<Runnable> {
|
||||
self.slot.take().or_else(|| self.worker.pop())
|
||||
}
|
||||
|
||||
/// Steals a task from the global queue.
|
||||
fn steal_from_global(&mut self, rt: &Runtime) -> Steal<Runnable> {
|
||||
rt.injector.steal_batch_and_pop(&self.worker)
|
||||
}
|
||||
|
||||
/// Steals a task from other processors.
|
||||
fn steal_from_others(&mut self, rt: &Runtime) -> Steal<Runnable> {
|
||||
// Pick a random starting point in the list of queues.
|
||||
let len = rt.stealers.len();
|
||||
let start = random(len as u32) as usize;
|
||||
|
||||
// Create an iterator over stealers that starts from the chosen point.
|
||||
let (l, r) = rt.stealers.split_at(start);
|
||||
let stealers = r.iter().chain(l.iter());
|
||||
|
||||
// Try stealing a batch of tasks from each queue.
|
||||
stealers
|
||||
.map(|s| s.steal_batch_and_pop(&self.worker))
|
||||
.collect()
|
||||
}
|
||||
}
|
@ -1,13 +0,0 @@
|
||||
//! Task executor.
|
||||
//!
|
||||
//! API bindings between `crate::task` and this module are very simple:
|
||||
//!
|
||||
//! * The only export is the `schedule` function.
|
||||
//! * The only import is the `crate::task::Runnable` type.
|
||||
|
||||
pub(crate) use pool::schedule;
|
||||
|
||||
use sleepers::Sleepers;
|
||||
|
||||
mod pool;
|
||||
mod sleepers;
|
@ -1,179 +0,0 @@
|
||||
use std::cell::Cell;
|
||||
use std::iter;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use crossbeam_deque::{Injector, Stealer, Worker};
|
||||
use once_cell::sync::Lazy;
|
||||
use once_cell::unsync::OnceCell;
|
||||
|
||||
use crate::task::executor::Sleepers;
|
||||
use crate::task::Runnable;
|
||||
use crate::utils::{abort_on_panic, random};
|
||||
|
||||
/// The state of an executor.
|
||||
struct Pool {
|
||||
/// The global queue of tasks.
|
||||
injector: Injector<Runnable>,
|
||||
|
||||
/// Handles to local queues for stealing work from worker threads.
|
||||
stealers: Vec<Stealer<Runnable>>,
|
||||
|
||||
/// Used for putting idle workers to sleep and notifying them when new tasks come in.
|
||||
sleepers: Sleepers,
|
||||
}
|
||||
|
||||
/// Global executor that runs spawned tasks.
|
||||
static POOL: Lazy<Pool> = Lazy::new(|| {
|
||||
let num_threads = num_cpus::get().max(1);
|
||||
let mut stealers = Vec::new();
|
||||
|
||||
// Spawn worker threads.
|
||||
for _ in 0..num_threads {
|
||||
let worker = Worker::new_fifo();
|
||||
stealers.push(worker.stealer());
|
||||
|
||||
let proc = Processor {
|
||||
worker,
|
||||
slot: Cell::new(None),
|
||||
slot_runs: Cell::new(0),
|
||||
};
|
||||
|
||||
thread::Builder::new()
|
||||
.name("async-std/executor".to_string())
|
||||
.spawn(|| {
|
||||
let _ = PROCESSOR.with(|p| p.set(proc));
|
||||
abort_on_panic(main_loop);
|
||||
})
|
||||
.expect("cannot start a thread driving tasks");
|
||||
}
|
||||
|
||||
Pool {
|
||||
injector: Injector::new(),
|
||||
stealers,
|
||||
sleepers: Sleepers::new(),
|
||||
}
|
||||
});
|
||||
|
||||
/// The state of a worker thread.
|
||||
struct Processor {
|
||||
/// The local task queue.
|
||||
worker: Worker<Runnable>,
|
||||
|
||||
/// Contains the next task to run as an optimization that skips queues.
|
||||
slot: Cell<Option<Runnable>>,
|
||||
|
||||
/// How many times in a row tasks have been taked from the slot rather than the queue.
|
||||
slot_runs: Cell<u32>,
|
||||
}
|
||||
|
||||
thread_local! {
|
||||
/// Worker thread state.
|
||||
static PROCESSOR: OnceCell<Processor> = OnceCell::new();
|
||||
}
|
||||
|
||||
/// Schedules a new runnable task for execution.
|
||||
pub(crate) fn schedule(task: Runnable) {
|
||||
PROCESSOR.with(|proc| {
|
||||
// If the current thread is a worker thread, store it into its task slot or push it into
|
||||
// its local task queue. Otherwise, push it into the global task queue.
|
||||
match proc.get() {
|
||||
Some(proc) => {
|
||||
// Replace the task in the slot.
|
||||
if let Some(task) = proc.slot.replace(Some(task)) {
|
||||
// If the slot already contained a task, push it into the local task queue.
|
||||
proc.worker.push(task);
|
||||
POOL.sleepers.notify_one();
|
||||
}
|
||||
}
|
||||
None => {
|
||||
POOL.injector.push(task);
|
||||
POOL.sleepers.notify_one();
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Main loop running a worker thread.
|
||||
fn main_loop() {
|
||||
/// Number of yields when no runnable task is found.
|
||||
const YIELDS: u32 = 3;
|
||||
/// Number of short sleeps when no runnable task in found.
|
||||
const SLEEPS: u32 = 1;
|
||||
|
||||
// The number of times the thread didn't find work in a row.
|
||||
let mut fails = 0;
|
||||
|
||||
loop {
|
||||
// Try to find a runnable task.
|
||||
match find_runnable() {
|
||||
Some(task) => {
|
||||
fails = 0;
|
||||
|
||||
// Run the found task.
|
||||
task.run();
|
||||
}
|
||||
None => {
|
||||
fails += 1;
|
||||
|
||||
// Yield the current thread or put it to sleep.
|
||||
if fails <= YIELDS {
|
||||
thread::yield_now();
|
||||
} else if fails <= YIELDS + SLEEPS {
|
||||
thread::sleep(Duration::from_micros(10));
|
||||
} else {
|
||||
POOL.sleepers.wait();
|
||||
fails = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the next runnable task.
|
||||
fn find_runnable() -> Option<Runnable> {
|
||||
/// Maximum number of times the slot can be used in a row.
|
||||
const SLOT_LIMIT: u32 = 16;
|
||||
|
||||
PROCESSOR.with(|proc| {
|
||||
let proc = proc.get().unwrap();
|
||||
|
||||
// Try taking a task from the slot.
|
||||
let runs = proc.slot_runs.get();
|
||||
if runs < SLOT_LIMIT {
|
||||
if let Some(task) = proc.slot.take() {
|
||||
proc.slot_runs.set(runs + 1);
|
||||
return Some(task);
|
||||
}
|
||||
}
|
||||
proc.slot_runs.set(0);
|
||||
|
||||
// Pop a task from the local queue, if not empty.
|
||||
proc.worker.pop().or_else(|| {
|
||||
// Otherwise, we need to look for a task elsewhere.
|
||||
iter::repeat_with(|| {
|
||||
// Try stealing a batch of tasks from the global queue.
|
||||
POOL.injector
|
||||
.steal_batch_and_pop(&proc.worker)
|
||||
// Or try stealing a batch of tasks from one of the other threads.
|
||||
.or_else(|| {
|
||||
// First, pick a random starting point in the list of local queues.
|
||||
let len = POOL.stealers.len();
|
||||
let start = random(len as u32) as usize;
|
||||
|
||||
// Try stealing a batch of tasks from each local queue starting from the
|
||||
// chosen point.
|
||||
let (l, r) = POOL.stealers.split_at(start);
|
||||
let stealers = r.iter().chain(l.iter());
|
||||
stealers
|
||||
.map(|s| s.steal_batch_and_pop(&proc.worker))
|
||||
.collect()
|
||||
})
|
||||
})
|
||||
// Loop while no task was stolen and any steal operation needs to be retried.
|
||||
.find(|s| !s.is_retry())
|
||||
// Extract the stolen task, if there is one.
|
||||
.and_then(|s| s.success())
|
||||
})
|
||||
})
|
||||
}
|
@ -1,52 +0,0 @@
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Condvar, Mutex};
|
||||
|
||||
/// The place where worker threads go to sleep.
|
||||
///
|
||||
/// Similar to how thread parking works, if a notification comes up while no threads are sleeping,
|
||||
/// the next thread that attempts to go to sleep will pick up the notification immediately.
|
||||
pub struct Sleepers {
|
||||
/// How many threads are currently a sleep.
|
||||
sleep: Mutex<usize>,
|
||||
|
||||
/// A condvar for notifying sleeping threads.
|
||||
wake: Condvar,
|
||||
|
||||
/// Set to `true` if a notification came up while nobody was sleeping.
|
||||
notified: AtomicBool,
|
||||
}
|
||||
|
||||
impl Sleepers {
|
||||
/// Creates a new `Sleepers`.
|
||||
pub fn new() -> Sleepers {
|
||||
Sleepers {
|
||||
sleep: Mutex::new(0),
|
||||
wake: Condvar::new(),
|
||||
notified: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Puts the current thread to sleep.
|
||||
pub fn wait(&self) {
|
||||
let mut sleep = self.sleep.lock().unwrap();
|
||||
|
||||
if !self.notified.swap(false, Ordering::SeqCst) {
|
||||
*sleep += 1;
|
||||
let _ = self.wake.wait(sleep).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
/// Notifies one thread.
|
||||
pub fn notify_one(&self) {
|
||||
if !self.notified.load(Ordering::SeqCst) {
|
||||
let mut sleep = self.sleep.lock().unwrap();
|
||||
|
||||
if *sleep > 0 {
|
||||
*sleep -= 1;
|
||||
self.wake.notify_one();
|
||||
} else {
|
||||
self.notified.store(true, Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue