Compare commits

..

No commits in common. "master" and "new-scheduler" have entirely different histories.

187 changed files with 2739 additions and 3908 deletions

View file

@ -29,24 +29,6 @@ jobs:
toolchain: ${{ matrix.rust }}
override: true
- name: Cache cargo registry
uses: actions/cache@v2
with:
path: ~/.cargo/registry
key: ${{ matrix.os }}-${{ matrix.rust }}-cargo-registry-${{ hashFiles('**/Cargo.toml') }}
- name: Cache cargo index
uses: actions/cache@v2
with:
path: ~/.cargo/git
key: ${{ matrix.os }}-${{ matrix.rust }}-cargo-index-${{ hashFiles('**/Cargo.toml') }}
- name: Cache cargo build
uses: actions/cache@v2
with:
path: target
key: ${{ matrix.os }}-${{ matrix.rust }}-cargo-build-target-${{ hashFiles('**/Cargo.toml') }}
- name: check
uses: actions-rs/cargo@v1
with:
@ -58,15 +40,6 @@ jobs:
with:
command: check
args: --features unstable --all --bins --examples --tests
- name: check wasm
uses: actions-rs/cargo@v1
with:
command: check
target: wasm32-unknown-unknown
override: true
args: --features unstable --all --bins --tests
- name: check bench
uses: actions-rs/cargo@v1
if: matrix.rust == 'nightly'
@ -90,69 +63,7 @@ jobs:
uses: actions-rs/cargo@v1
with:
command: test
args: --all --features "unstable attributes"
build__with_no_std:
name: Build with no-std
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: setup
run: |
rustup default nightly
rustup target add thumbv7m-none-eabi
- name: check no_std
uses: actions-rs/cargo@v1
with:
command: check
args: --no-default-features --features alloc --target thumbv7m-none-eabi -Z avoid-dev-deps
check_tokio_02_feature:
name: Check tokio02 feature
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: check tokio02
uses: actions-rs/cargo@v1
with:
command: check
args: --all --features tokio02
cross:
name: Cross compile
runs-on: ubuntu-latest
strategy:
matrix:
target:
- i686-unknown-linux-gnu
- powerpc-unknown-linux-gnu
- powerpc64-unknown-linux-gnu
- mips-unknown-linux-gnu
- arm-linux-androideabi
steps:
- uses: actions/checkout@master
- name: Install nightly
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
- name: Install cross
run: cargo install cross
- name: check
run: cross check --all --target ${{ matrix.target }}
- name: check unstable
run: cross check --all --features unstable --target ${{ matrix.target }}
- name: test
run: cross test --all --features unstable --target ${{ matrix.target }}
args: --all --features unstable attributes
check_fmt_and_docs:
name: Checking fmt and docs
@ -181,3 +92,15 @@ jobs:
- name: Docs
run: cargo doc --features docs
# clippy_check:
# name: Clippy check
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v1
# - name: Install rust
# run: rustup update beta && rustup default beta
# - name: Install clippy
# run: rustup component add clippy
# - name: clippy
# run: cargo clippy --all --features unstable

View file

@ -7,199 +7,6 @@ and this project adheres to [Semantic Versioning](https://book.async.rs/overview
## [Unreleased]
# [1.6.2] - 2020-06-19
## Added
- Add `UdpSocket::peer_addr` ([#816](https://github.com/async-rs/async-std/pull/816))
## Changed
## Fixed
- Ensure the reactor is running for sockets and timers ([#819](https://github.com/async-rs/async-std/pull/819)).
- Avoid excessive polling in `flatten` and `flat_map` ([#701](https://github.com/async-rs/async-std/pull/701))
# [1.6.1] - 2020-06-11
## Added
- Added `tokio02` feature flag, to allow compatability usage with tokio@0.2 ([#804](https://github.com/async-rs/async-std/pull/804)).
## Changed
- Removed unstable `stdio` lock methods, due to their unsoundness ([#807](https://github.com/async-rs/async-std/pull/807)).
## Fixed
- Fixed wrong slice index for file reading ([#802](https://github.com/async-rs/async-std/pull/802)).
- Fixed recursive calls to `block_on` ([#799](https://github.com/async-rs/async-std/pull/799)) and ([#809](https://github.com/async-rs/async-std/pull/809)).
- Remove `default` feature requirement for the `unstable` feature ([#806](https://github.com/async-rs/async-std/pull/806)).
# [1.6.0] - 2020-05-22
See `1.6.0-beta.1` and `1.6.0-beta.2`.
# [1.6.0-beta.2] - 2020-05-19
## Added
- Added an environment variable to configure the thread pool size of the runtime. ([#774](https://github.com/async-rs/async-std/pull/774))
- Implement `Clone` for `UnixStream` ([#772](https://github.com/async-rs/async-std/pull/772))
## Changed
- For `wasm`, switched underlying `Timer` implementation to [`futures-timer`](https://github.com/async-rs/futures-timer). ([#776](https://github.com/async-rs/async-std/pull/776))
## Fixed
- Use `smol::block_on` to handle drop of `File`, avoiding nested executor panic. ([#768](https://github.com/async-rs/async-std/pull/768))
# [1.6.0-beta.1] - 2020-05-07
## Added
- Added `task::spawn_local`. ([#757](https://github.com/async-rs/async-std/pull/757))
- Added out of the box support for `wasm`. ([#757](https://github.com/async-rs/async-std/pull/757))
- Added `JoinHandle::cancel` ([#757](https://github.com/async-rs/async-std/pull/757))
- Added `sync::Condvar` ([#369](https://github.com/async-rs/async-std/pull/369))
- Added `sync::Sender::try_send` and `sync::Receiver::try_recv` ([#585](https://github.com/async-rs/async-std/pull/585))
- Added `no_std` support for `task`, `future` and `stream` ([#680](https://github.com/async-rs/async-std/pull/680))
## Changed
- Switched underlying runtime to [`smol`](https://github.com/stjepang/smol/). ([#757](https://github.com/async-rs/async-std/pull/757))
- Switched implementation of `sync::Barrier` to use `sync::Condvar` like `std` does. ([#581](https://github.com/async-rs/async-std/pull/581))
## Fixed
- Allow compilation on 32 bit targets, by using `AtomicUsize` for `TaskId`. ([#756](https://github.com/async-rs/async-std/pull/756))
# [1.5.0] - 2020-02-03
[API Documentation](https://docs.rs/async-std/1.5.0/async-std)
This patch includes various quality of life improvements to async-std.
Including improved performance, stability, and the addition of various
`Clone` impls that replace the use of `Arc` in many cases.
## Added
- Added links to various ecosystem projects from the README ([#660](https://github.com/async-rs/async-std/pull/660))
- Added an example on `FromStream` for `Result<T, E>` ([#643](https://github.com/async-rs/async-std/pull/643))
- Added `stream::pending` as "unstable" ([#615](https://github.com/async-rs/async-std/pull/615))
- Added an example of `stream::timeout` to document the error flow ([#675](https://github.com/async-rs/async-std/pull/675))
- Implement `Clone` for `DirEntry` ([#682](https://github.com/async-rs/async-std/pull/682))
- Implement `Clone` for `TcpStream` ([#689](https://github.com/async-rs/async-std/pull/689))
## Changed
- Removed internal comment on `stream::Interval` ([#645](https://github.com/async-rs/async-std/pull/645))
- The "unstable" feature can now be used without requiring the "default" feature ([#647](https://github.com/async-rs/async-std/pull/647))
- Removed unnecessary trait bound on `stream::FlatMap` ([#651](https://github.com/async-rs/async-std/pull/651))
- Updated the "broadcaster" dependency used by "unstable" to `1.0.0` ([#681](https://github.com/async-rs/async-std/pull/681))
- Updated `async-task` to 1.2.1 ([#676](https://github.com/async-rs/async-std/pull/676))
- `task::block_on` now parks after a single poll, improving performance in many cases ([#684](https://github.com/async-rs/async-std/pull/684))
- Improved reading flow of the "client" part of the async-std tutorial ([#550](https://github.com/async-rs/async-std/pull/550))
- Use `take_while` instead of `scan` in `impl` of `Product`, `Sum` and `FromStream` ([#667](https://github.com/async-rs/async-std/pull/667))
- `TcpStream::connect` no longer uses a thread from the threadpool, improving performance ([#687](https://github.com/async-rs/async-std/pull/687))
## Fixed
- Fixed crate documentation typo ([#655](https://github.com/async-rs/async-std/pull/655))
- Fixed documentation for `UdpSocket::recv` ([#648](https://github.com/async-rs/async-std/pull/648))
- Fixed documentation for `UdpSocket::send` ([#671](https://github.com/async-rs/async-std/pull/671))
- Fixed typo in stream documentation ([#650](https://github.com/async-rs/async-std/pull/650))
- Fixed typo on `sync::JoinHandle` documentation ([#659](https://github.com/async-rs/async-std/pull/659))
- Removed use of `std::error::Error::description` which failed CI ([#661](https://github.com/async-rs/async-std/pull/662))
- Removed the use of rustfmt's unstable `format_code_in_doc_comments` option which failed CI ([#685](https://github.com/async-rs/async-std/pull/685))
- Fixed a code typo in the `task::sleep` example ([#688](https://github.com/async-rs/async-std/pull/688))
# [1.4.0] - 2019-12-20
[API Documentation](https://docs.rs/async-std/1.4.0/async-std)
This patch adds `Future::timeout`, providing a method counterpart to the
`future::timeout` free function. And includes several bug fixes around missing
APIs. Notably we're not shipping our new executor yet, first announced [on our
blog](https://async.rs/blog/stop-worrying-about-blocking-the-new-async-std-runtime/).
## Examples
```rust
use async_std::prelude::*;
use async_std::future;
use std::time::Duration;
let fut = future::pending::<()>(); // This future will never resolve.
let res = fut.timeout(Duration::from_millis(100)).await;
assert!(res.is_err()); // The future timed out, returning an err.
```
## Added
- Added `Future::timeout` as "unstable" [(#600)](https://github.com/async-rs/async-std/pull/600)
## Fixes
- Fixed a doc test and enabled it on CI [(#597)](https://github.com/async-rs/async-std/pull/597)
- Fixed a rendering issue with the `stream` submodule documentation [(#621)](https://github.com/async-rs/async-std/pull/621)
- `Write::write_fmt`'s future is now correctly marked as `#[must_use]` [(#628)](https://github.com/async-rs/async-std/pull/628)
- Fixed the missing `io::Bytes` export [(#633)](https://github.com/async-rs/async-std/pull/633)
- Fixed the missing `io::Chain` export [(#633)](https://github.com/async-rs/async-std/pull/633)
- Fixed the missing `io::Take` export [(#633)](https://github.com/async-rs/async-std/pull/633)
# [1.3.0] - 2019-12-12
[API Documentation](https://docs.rs/async-std/1.3.0/async-std)
This patch introduces `Stream::delay`, more methods on `DoubleEndedStream`,
and improves compile times. `Stream::delay` is a new API that's similar to
[`task::sleep`](https://docs.rs/async-std/1.2.0/async_std/task/fn.sleep.html),
but can be passed as part of as stream, rather than as a separate block. This is
useful for examples, or when manually debugging race conditions.
## Examples
```rust
let start = Instant::now();
let mut s = stream::from_iter(vec![0u8, 1]).delay(Duration::from_millis(200));
// The first time will take more than 200ms due to delay.
s.next().await;
assert!(start.elapsed().as_millis() >= 200);
// There will be no delay after the first time.
s.next().await;
assert!(start.elapsed().as_millis() <= 210);
```
## Added
- Added `Stream::delay` as "unstable" [(#309)](https://github.com/async-rs/async-std/pull/309)
- Added `DoubleEndedStream::next_back` as "unstable" [(#562)](https://github.com/async-rs/async-std/pull/562)
- Added `DoubleEndedStream::nth_back` as "unstable" [(#562)](https://github.com/async-rs/async-std/pull/562)
- Added `DoubleEndedStream::rfind` as "unstable" [(#562)](https://github.com/async-rs/async-std/pull/562)
- Added `DoubleEndedStream::rfold` as "unstable" [(#562)](https://github.com/async-rs/async-std/pull/562)
- Added `DoubleEndedStream::try_rfold` as "unstable" [(#562)](https://github.com/async-rs/async-std/pull/562)
- `stream::Once` now implements `DoubleEndedStream` [(#562)](https://github.com/async-rs/async-std/pull/562)
- `stream::FromIter` now implements `DoubleEndedStream` [(#562)](https://github.com/async-rs/async-std/pull/562)
## Changed
- Removed our dependency on `async-macros`, speeding up compilation [(#610)](https://github.com/async-rs/async-std/pull/610)
## Fixes
- Fixed a link in the task docs [(#598)](https://github.com/async-rs/async-std/pull/598)
- Fixed the `UdpSocket::recv` example [(#603)](https://github.com/async-rs/async-std/pull/603)
- Fixed a link to `task::block_on` [(#608)](https://github.com/async-rs/async-std/pull/608)
- Fixed an incorrect API mention in `task::Builder` [(#612)](https://github.com/async-rs/async-std/pull/612)
- Fixed leftover mentions of `futures-preview` [(#595)](https://github.com/async-rs/async-std/pull/595)
- Fixed a typo in the tutorial [(#614)](https://github.com/async-rs/async-std/pull/614)
- `<TcpStream as Write>::poll_close` now closes the write half of the stream [(#618)](https://github.com/async-rs/async-std/pull/618)
# [1.2.0] - 2019-11-27
[API Documentation](https://docs.rs/async-std/1.2.0/async-std)
@ -746,15 +553,7 @@ task::blocking(async {
- Initial beta release
[Unreleased]: https://github.com/async-rs/async-std/compare/v1.6.2...HEAD
[1.6.2]: https://github.com/async-rs/async-std/compare/v1.6.1...v1.6.2
[1.6.1]: https://github.com/async-rs/async-std/compare/v1.6.0...v1.6.1
[1.6.0]: https://github.com/async-rs/async-std/compare/v1.5.0...v1.6.0
[1.6.0-beta.2]: https://github.com/async-rs/async-std/compare/v1.6.0-beta.1...v1.6.0-beta.2
[1.6.0-beta.1]: https://github.com/async-rs/async-std/compare/v1.5.0...v1.6.0-beta.1
[1.5.0]: https://github.com/async-rs/async-std/compare/v1.4.0...v1.5.0
[1.4.0]: https://github.com/async-rs/async-std/compare/v1.3.0...v1.4.0
[1.3.0]: https://github.com/async-rs/async-std/compare/v1.2.0...v1.3.0
[Unreleased]: https://github.com/async-rs/async-std/compare/v1.2.0...HEAD
[1.2.0]: https://github.com/async-rs/async-std/compare/v1.1.0...v1.2.0
[1.1.0]: https://github.com/async-rs/async-std/compare/v1.0.1...v1.1.0
[1.0.1]: https://github.com/async-rs/async-std/compare/v1.0.0...v1.0.1

View file

@ -1,10 +1,9 @@
[package]
name = "async-std"
version = "1.6.2"
version = "1.2.0"
authors = [
"Stjepan Glavina <stjepang@gmail.com>",
"Yoshua Wuyts <yoshuawuyts@gmail.com>",
"Friedel Ziegelmayer <me@dignifiedquire.com>",
"Contributors to async-std",
]
edition = "2018"
@ -25,74 +24,61 @@ rustdoc-args = ["--cfg", "feature=\"docs\""]
default = [
"std",
"async-task",
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-queue",
"futures-timer",
"kv-log-macro",
"log",
"mio",
"mio-uds",
"num_cpus",
"pin-project-lite",
"smol",
]
docs = ["attributes", "unstable", "default"]
unstable = [
"std",
"futures-timer",
]
docs = ["attributes", "unstable"]
unstable = ["default", "broadcaster"]
attributes = ["async-attributes"]
std = [
"alloc",
"async-macros",
"crossbeam-utils",
"futures-core/std",
"futures-core",
"futures-io",
"memchr",
"once_cell",
"pin-project-lite",
"pin-utils",
"slab",
"wasm-bindgen-futures",
"futures-channel",
"async-mutex",
]
alloc = [
"futures-core/alloc",
"pin-project-lite",
]
tokio02 = ["smol/tokio02"]
[dependencies]
async-attributes = { version = "1.1.1", optional = true }
async-task = { version = "3.0.0", optional = true }
async-mutex = { version = "1.1.3", optional = true }
crossbeam-utils = { version = "0.7.2", optional = true }
futures-core = { version = "0.3.4", optional = true, default-features = false }
futures-io = { version = "0.3.4", optional = true }
kv-log-macro = { version = "1.0.6", optional = true }
async-macros = { version = "2.0.0", optional = true }
async-task = { version = "1.0.0", optional = true }
broadcaster = { version = "0.2.6", optional = true, default-features = false, features = ["default-channels"] }
crossbeam-channel = { version = "0.4.0", optional = true }
crossbeam-deque = { version = "0.7.2", optional = true }
crossbeam-queue = { version = "0.2.0", optional = true }
crossbeam-utils = { version = "0.7.0", optional = true }
futures-core = { version = "0.3.1", optional = true }
futures-io = { version = "0.3.1", optional = true }
futures-timer = { version = "2.0.2", optional = true }
kv-log-macro = { version = "1.0.4", optional = true }
log = { version = "0.4.8", features = ["kv_unstable"], optional = true }
memchr = { version = "2.3.3", optional = true }
num_cpus = { version = "1.12.0", optional = true }
once_cell = { version = "1.3.1", optional = true }
pin-project-lite = { version = "0.1.4", optional = true }
memchr = { version = "2.2.1", optional = true }
mio = { version = "0.6.19", optional = true }
mio-uds = { version = "0.6.7", optional = true }
num_cpus = { version = "1.11.1", optional = true }
once_cell = { version = "1.2.0", optional = true }
pin-project-lite = { version = "0.1.1", optional = true }
pin-utils = { version = "0.1.0-alpha.4", optional = true }
slab = { version = "0.4.2", optional = true }
futures-timer = { version = "3.0.2", optional = true }
# Devdepencency, but they are not allowed to be optional :/
surf = { version = "1.0.3", optional = true }
[target.'cfg(not(target_os = "unknown"))'.dependencies]
smol = { version = "0.1.17", optional = true }
[target.'cfg(target_arch = "wasm32")'.dependencies]
futures-timer = { version = "3.0.2", optional = true, features = ["wasm-bindgen"] }
wasm-bindgen-futures = { version = "0.4.10", optional = true }
futures-channel = { version = "0.3.4", optional = true }
[target.'cfg(target_arch = "wasm32")'.dev-dependencies]
wasm-bindgen-test = "0.3.10"
[dev-dependencies]
femme = "1.3.0"
rand = "0.7.3"
rand = "0.7.2"
surf = "1.0.3"
tempdir = "0.3.7"
futures = "0.3.4"
rand_xorshift = "0.2.0"
futures = "0.3.1"
[[test]]
name = "stream"
@ -101,7 +87,3 @@ required-features = ["unstable"]
[[example]]
name = "tcp-ipv4-and-6-echo"
required-features = ["unstable"]
[[example]]
name = "surf-web"
required-features = ["surf"]

View file

@ -8,11 +8,6 @@
<br />
<div align="center">
<!-- CI status -->
<a href="https://github.com/async-rs/async-std/actions">
<img src="https://github.com/async-rs/async-std/workflows/CI/badge.svg"
alt="CI Status" />
</a>
<!-- Crates version -->
<a href="https://crates.io/crates/async-std">
<img src="https://img.shields.io/crates/v/async-std.svg?style=flat-square"
@ -79,15 +74,18 @@ syntax.
## Examples
```rust
use async_std::task;
All examples require the [`"attributes"` feature] to be enabled. This feature
is not enabled by default because it significantly impacts compile times. See
[`task::block_on`] for an alternative way to start executing tasks.
```rust
async fn say_hello() {
println!("Hello, world!");
}
fn main() {
task::block_on(say_hello())
#[async_std::main]
async fn main() {
say_hello().await;
}
```
@ -125,24 +123,6 @@ documentation] on how to enable them.
[cargo-add]: https://github.com/killercup/cargo-edit
[features documentation]: https://docs.rs/async-std/#features
## Ecosystem
* [async-tls](https://crates.io/crates/async-tls) — Async TLS/SSL streams using **Rustls**.
* [async-native-tls](https://crates.io/crates/async-native-tls) — **Native TLS** for Async. Native TLS for futures and async-std.
* [async-tungstenite](https://crates.io/crates/async-tungstenite) — Asynchronous **WebSockets** for async-std, tokio, gio and any std Futures runtime.
* [Tide](https://crates.io/crates/tide) — Serve the web. A modular **web framework** built around async/await.
* [SQLx](https://crates.io/crates/sqlx) — The Rust **SQL** Toolkit. SQLx is a 100% safe Rust library for Postgres and MySQL with compile-time checked queries.
* [Surf](https://crates.io/crates/surf) — Surf the web. Surf is a friendly **HTTP client** built for casual Rustaceans and veterans alike.
* [Xactor](https://crates.io/crates/xactor) — Xactor is a rust actors framework based on async-std.
* [async-graphql](https://crates.io/crates/async-graphql) — A GraphQL server library implemented in rust, with full support for async/await.
## License
<sup>

View file

@ -19,9 +19,8 @@
- [Clean Shutdown](./tutorial/clean_shutdown.md)
- [Handling Disconnection](./tutorial/handling_disconnection.md)
- [Implementing a Client](./tutorial/implementing_a_client.md)
- [Async Patterns](./patterns.md)
- [TODO: Async Patterns](./patterns.md)
- [TODO: Collected Small Patterns](./patterns/small-patterns.md)
- [Production-Ready Accept Loop](./patterns/accept-loop.md)
- [Security practices](./security/index.md)
- [Security Disclosures and Policy](./security/policy.md)
- [Glossary](./glossary.md)

View file

@ -1,266 +0,0 @@
# Production-Ready Accept Loop
A production-ready accept loop needs the following things:
1. Handling errors
2. Limiting the number of simultanteous connections to avoid deny-of-service
(DoS) attacks
## Handling errors
There are two kinds of errors in an accept loop:
1. Per-connection errors. The system uses them to notify that there was a
connection in the queue and it's dropped by the peer. Subsequent connections
can be already queued so next connection must be accepted immediately.
2. Resource shortages. When these are encountered it doesn't make sense to
accept the next socket immediately. But the listener stays active, so you server
should try to accept socket later.
Here is the example of a per-connection error (printed in normal and debug mode):
```
Error: Connection reset by peer (os error 104)
Error: Os { code: 104, kind: ConnectionReset, message: "Connection reset by peer" }
```
And the following is the most common example of a resource shortage error:
```
Error: Too many open files (os error 24)
Error: Os { code: 24, kind: Other, message: "Too many open files" }
```
### Testing Application
To test your application for these errors try the following (this works
on unixes only).
Lower limits and start the application:
```
$ ulimit -n 100
$ cargo run --example your_app
Compiling your_app v0.1.0 (/work)
Finished dev [unoptimized + debuginfo] target(s) in 5.47s
Running `target/debug/examples/your_app`
Server is listening on: http://127.0.0.1:1234
```
Then in another console run the [`wrk`] benchmark tool:
```
$ wrk -c 1000 http://127.0.0.1:1234
Running 10s test @ http://localhost:8080/
2 threads and 1000 connections
$ telnet localhost 1234
Trying ::1...
Connected to localhost.
```
Important is to check the following things:
1. The application doesn't crash on error (but may log errors, see below)
2. It's possible to connect to the application again once load is stopped
(few seconds after `wrk`). This is what `telnet` does in example above,
make sure it prints `Connected to <hostname>`.
3. The `Too many open files` error is logged in the appropriate log. This
requires to set "maximum number of simultaneous connections" parameter (see
below) of your application to a value greater then `100` for this example.
4. Check CPU usage of the app while doing a test. It should not occupy 100%
of a single CPU core (it's unlikely that you can exhaust CPU by 1000
connections in Rust, so this means error handling is not right).
#### Testing non-HTTP applications
If it's possible, use the appropriate benchmark tool and set the appropriate
number of connections. For example `redis-benchmark` has a `-c` parameter for
that, if you implement redis protocol.
Alternatively, can still use `wrk`, just make sure that connection is not
immediately closed. If it is, put a temporary timeout before handing
the connection to the protocol handler, like this:
```rust,edition2018
# extern crate async_std;
# use std::time::Duration;
# use async_std::{
# net::{TcpListener, ToSocketAddrs},
# prelude::*,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
#async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
# let listener = TcpListener::bind(addr).await?;
# let mut incoming = listener.incoming();
while let Some(stream) = incoming.next().await {
task::spawn(async {
task::sleep(Duration::from_secs(10)).await; // 1
connection_loop(stream).await;
});
}
# Ok(())
# }
```
1. Make sure the sleep coroutine is inside the spawned task, not in the loop.
[`wrk`]: https://github.com/wg/wrk
### Handling Errors Manually
Here is how basic accept loop could look like:
```rust,edition2018
# extern crate async_std;
# use std::time::Duration;
# use async_std::{
# net::{TcpListener, ToSocketAddrs},
# prelude::*,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
let listener = TcpListener::bind(addr).await?;
let mut incoming = listener.incoming();
while let Some(result) = incoming.next().await {
let stream = match stream {
Err(ref e) if is_connection_error(e) => continue, // 1
Err(e) => {
eprintln!("Error: {}. Pausing for 500ms."); // 3
task::sleep(Duration::from_millis(500)).await; // 2
continue;
}
Ok(s) => s,
};
// body
}
Ok(())
}
```
1. Ignore per-connection errors.
2. Sleep and continue on resource shortage.
3. It's important to log the message, because these errors commonly mean the
misconfiguration of the system and are helpful for operations people running
the application.
Be sure to [test your application](#testing-application).
### External Crates
The crate [`async-listen`] has a helper to achieve this task:
```rust,edition2018
# extern crate async_std;
# extern crate async_listen;
# use std::time::Duration;
# use async_std::{
# net::{TcpListener, ToSocketAddrs},
# prelude::*,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
use async_listen::{ListenExt, error_hint};
async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
let listener = TcpListener::bind(addr).await?;
let mut incoming = listener
.incoming()
.log_warnings(log_accept_error) // 1
.handle_errors(Duration::from_millis(500));
while let Some(socket) = incoming.next().await { // 2
// body
}
Ok(())
}
fn log_accept_error(e: &io::Error) {
eprintln!("Error: {}. Listener paused for 0.5s. {}", e, error_hint(e)) // 3
}
```
1. Logs resource shortages (`async-listen` calls them warnings). If you use
`log` crate or any other in your app this should go to the log.
2. Stream yields sockets without `Result` wrapper after `handle_errors` because
all errors are already handled.
3. Together with the error we print a hint, which explains some errors for end
users. For example, it recommends increasing open file limit and gives
a link.
[`async-listen`]: https://crates.io/crates/async-listen/
Be sure to [test your application](#testing-application).
## Connections Limit
Even if you've applied everything described in
[Handling Errors](#handling-errors) section, there is still a problem.
Let's imagine you have a server that needs to open a file to process
client request. At some point, you might encounter the following situation:
1. There are as many client connection as max file descriptors allowed for
the application.
2. Listener gets `Too many open files` error so it sleeps.
3. Some client sends a request via the previously open connection.
4. Opening a file to serve request fails, because of the same
`Too many open files` error, until some other client drops a connection.
There are many more possible situations, this is just a small illustation that
limiting number of connections is very useful. Generally, it's one of the ways
to control resources used by a server and avoiding some kinds of deny of
service (DoS) attacks.
### `async-listen` crate
Limiting maximum number of simultaneous connections with [`async-listen`]
looks like the following:
```rust,edition2018
# extern crate async_std;
# extern crate async_listen;
# use std::time::Duration;
# use async_std::{
# net::{TcpListener, TcpStream, ToSocketAddrs},
# prelude::*,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
use async_listen::{ListenExt, Token, error_hint};
async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
let listener = TcpListener::bind(addr).await?;
let mut incoming = listener
.incoming()
.log_warnings(log_accept_error)
.handle_errors(Duration::from_millis(500)) // 1
.backpressure(100);
while let Some((token, socket)) = incoming.next().await { // 2
task::spawn(async move {
connection_loop(&token, stream).await; // 3
});
}
Ok(())
}
async fn connection_loop(_token: &Token, stream: TcpStream) { // 4
// ...
}
# fn log_accept_error(e: &io::Error) {
# eprintln!("Error: {}. Listener paused for 0.5s. {}", e, error_hint(e));
# }
```
1. We need to handle errors first, because [`backpressure`] helper expects
stream of `TcpStream` rather than `Result`.
2. The token yielded by a new stream is what is counted by backpressure helper.
I.e. if you drop a token, new connection can be established.
3. We give the connection loop a reference to token to bind token's lifetime to
the lifetime of the connection.
4. The token itsellf in the function can be ignored, hence `_token`
[`backpressure`]: https://docs.rs/async-listen/0.1.2/async_listen/trait.ListenExt.html#method.backpressure
Be sure to [test this behavior](#testing-application).

View file

@ -157,7 +157,7 @@ async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
spawn_and_log_error(connection_loop(broker_sender.clone(), stream));
}
drop(broker_sender);
broker_handle.await;
broker_handle.await?;
Ok(())
}

View file

@ -1,16 +1,18 @@
## Implementing a client
Since the protocol is line-based, implementing a client for the chat is straightforward:
Let's now implement the client for the chat.
Because the protocol is line-based, the implementation is pretty straightforward:
* Lines read from stdin should be sent over the socket.
* Lines read from the socket should be echoed to stdout.
Although async does not significantly affect client performance (as unlike the server, the client interacts solely with one user and only needs limited concurrency), async is still useful for managing concurrency!
The client has to read from stdin and the socket *simultaneously*.
Programming this with threads is cumbersome, especially when implementing a clean shutdown.
With async, the `select!` macro is all that is needed.
Unlike the server, the client needs only limited concurrency, as it interacts with only a single user.
For this reason, async doesn't bring a lot of performance benefits in this case.
However, async is still useful for managing concurrency!
Specifically, the client should *simultaneously* read from stdin and from the socket.
Programming this with threads is cumbersome, especially when implementing clean shutdown.
With async, we can just use the `select!` macro.
```rust,edition2018
# extern crate async_std;

View file

@ -111,7 +111,7 @@ We can "fix" it by waiting for the task to be joined, like this:
#
# async move |stream| {
let handle = task::spawn(connection_loop(stream));
handle.await?
handle.await
# };
```

View file

@ -14,9 +14,8 @@ use async_std::task;
async fn process(stream: TcpStream) -> io::Result<()> {
println!("Accepted from: {}", stream.peer_addr()?);
let mut reader = stream.clone();
let mut writer = stream;
io::copy(&mut reader, &mut writer).await?;
let (reader, writer) = &mut (&stream, &stream);
io::copy(reader, writer).await?;
Ok(())
}

View file

@ -15,9 +15,8 @@ use async_std::task;
async fn process(stream: TcpStream) -> io::Result<()> {
println!("Accepted from: {}", stream.peer_addr()?);
let mut reader = stream.clone();
let mut writer = stream;
io::copy(&mut reader, &mut writer).await?;
let (reader, writer) = &mut (&stream, &stream);
io::copy(reader, writer).await?;
Ok(())
}

View file

@ -1 +1,2 @@
version = "Two"
format_code_in_doc_comments = true

View file

@ -4,14 +4,11 @@ use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
impl<T: Ord + Send> stream::Extend<T> for BinaryHeap<T> {
impl<T: Ord> stream::Extend<T> for BinaryHeap<T> {
fn extend<'a, S: IntoStream<Item = T> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = ()> + 'a>> {
let stream = stream.into_stream();
self.reserve(stream.size_hint().0);

View file

@ -4,14 +4,11 @@ use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
impl<T: Ord + Send> FromStream<T> for BinaryHeap<T> {
impl<T: Ord> FromStream<T> for BinaryHeap<T> {
#[inline]
fn from_stream<'a, S: IntoStream<Item = T> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = Self> + 'a>> {
let stream = stream.into_stream();
Box::pin(async move {

View file

@ -4,14 +4,11 @@ use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
impl<K: Ord + Send, V: Send> stream::Extend<(K, V)> for BTreeMap<K, V> {
impl<K: Ord, V> stream::Extend<(K, V)> for BTreeMap<K, V> {
fn extend<'a, S: IntoStream<Item = (K, V)> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = ()> + 'a>> {
Box::pin(stream.into_stream().for_each(move |(k, v)| {
self.insert(k, v);
}))

View file

@ -4,14 +4,11 @@ use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
impl<K: Ord + Send, V: Send> FromStream<(K, V)> for BTreeMap<K, V> {
impl<K: Ord, V> FromStream<(K, V)> for BTreeMap<K, V> {
#[inline]
fn from_stream<'a, S: IntoStream<Item = (K, V)> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = Self> + 'a>> {
let stream = stream.into_stream();
Box::pin(async move {

View file

@ -4,14 +4,11 @@ use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
impl<T: Ord + Send> stream::Extend<T> for BTreeSet<T> {
impl<T: Ord> stream::Extend<T> for BTreeSet<T> {
fn extend<'a, S: IntoStream<Item = T> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = ()> + 'a>> {
Box::pin(stream.into_stream().for_each(move |item| {
self.insert(item);
}))

View file

@ -4,14 +4,11 @@ use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
impl<T: Ord + Send> FromStream<T> for BTreeSet<T> {
impl<T: Ord> FromStream<T> for BTreeSet<T> {
#[inline]
fn from_stream<'a, S: IntoStream<Item = T> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = Self> + 'a>> {
let stream = stream.into_stream();
Box::pin(async move {

View file

@ -7,17 +7,13 @@ use crate::stream::{self, IntoStream};
impl<K, V, H> stream::Extend<(K, V)> for HashMap<K, V, H>
where
K: Eq + Hash + Send,
V: Send,
H: BuildHasher + Default + Send,
K: Eq + Hash,
H: BuildHasher + Default,
{
fn extend<'a, S: IntoStream<Item = (K, V)> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = ()> + 'a>> {
let stream = stream.into_stream();
// The following is adapted from the hashbrown source code:

View file

@ -7,17 +7,13 @@ use crate::stream::{self, FromStream, IntoStream};
impl<K, V, H> FromStream<(K, V)> for HashMap<K, V, H>
where
K: Eq + Hash + Send,
H: BuildHasher + Default + Send,
V: Send,
K: Eq + Hash,
H: BuildHasher + Default,
{
#[inline]
fn from_stream<'a, S: IntoStream<Item = (K, V)> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = Self> + 'a>> {
let stream = stream.into_stream();
Box::pin(async move {

View file

@ -7,16 +7,13 @@ use crate::stream::{self, IntoStream};
impl<T, H> stream::Extend<T> for HashSet<T, H>
where
T: Eq + Hash + Send,
H: BuildHasher + Default + Send,
T: Eq + Hash,
H: BuildHasher + Default,
{
fn extend<'a, S: IntoStream<Item = T> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = ()> + 'a>> {
// The Extend impl for HashSet in the standard library delegates to the internal HashMap.
// Thus, this impl is just a copy of the async Extend impl for HashMap in this crate.

View file

@ -7,16 +7,13 @@ use crate::stream::{self, FromStream, IntoStream};
impl<T, H> FromStream<T> for HashSet<T, H>
where
T: Eq + Hash + Send,
H: BuildHasher + Default + Send,
T: Eq + Hash,
H: BuildHasher + Default,
{
#[inline]
fn from_stream<'a, S: IntoStream<Item = T> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = Self> + 'a>> {
let stream = stream.into_stream();
Box::pin(async move {

View file

@ -4,14 +4,11 @@ use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
impl<T: Send> stream::Extend<T> for LinkedList<T> {
impl<T> stream::Extend<T> for LinkedList<T> {
fn extend<'a, S: IntoStream<Item = T> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = ()> + 'a>> {
let stream = stream.into_stream();
Box::pin(stream.for_each(move |item| self.push_back(item)))
}

View file

@ -4,14 +4,11 @@ use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
impl<T: Send> FromStream<T> for LinkedList<T> {
impl<T> FromStream<T> for LinkedList<T> {
#[inline]
fn from_stream<'a, S: IntoStream<Item = T> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = Self> + 'a>> {
let stream = stream.into_stream();
Box::pin(async move {

View file

@ -4,14 +4,11 @@ use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
impl<T: Send> stream::Extend<T> for VecDeque<T> {
impl<T> stream::Extend<T> for VecDeque<T> {
fn extend<'a, S: IntoStream<Item = T> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = ()> + 'a>> {
let stream = stream.into_stream();
self.reserve(stream.size_hint().0);

View file

@ -4,14 +4,11 @@ use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
impl<T: Send> FromStream<T> for VecDeque<T> {
impl<T> FromStream<T> for VecDeque<T> {
#[inline]
fn from_stream<'a, S: IntoStream<Item = T> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = Self> + 'a>> {
let stream = stream.into_stream();
Box::pin(async move {

View file

@ -158,12 +158,6 @@ impl fmt::Debug for DirEntry {
}
}
impl Clone for DirEntry {
fn clone(&self) -> Self {
DirEntry(self.0.clone())
}
}
cfg_unix! {
use crate::os::unix::fs::DirEntryExt;

View file

@ -12,7 +12,7 @@ use crate::future;
use crate::io::{self, Read, Seek, SeekFrom, Write};
use crate::path::Path;
use crate::prelude::*;
use crate::task::{spawn_blocking, Context, Poll, Waker};
use crate::task::{self, spawn_blocking, Context, Poll, Waker};
use crate::utils::Context as _;
/// An open file on the filesystem.
@ -315,7 +315,7 @@ impl Drop for File {
// non-blocking fashion, but our only other option here is losing data remaining in the
// write cache. Good task schedulers should be resilient to occasional blocking hiccups in
// file destructors so we don't expect this to be a common problem in practice.
let _ = smol::block_on(self.flush());
let _ = task::block_on(self.flush());
}
}
@ -673,7 +673,7 @@ impl LockGuard<State> {
if available > 0 || self.cache.is_empty() {
// Copy data from the cache into the buffer.
let n = cmp::min(available, buf.len());
buf[..n].copy_from_slice(&self.cache[start..(start + n)]);
buf[..n].copy_from_slice(&self.cache[start..n]);
// Move the read cursor forward.
self.mode = Mode::Reading(start + n);
@ -867,15 +867,3 @@ impl LockGuard<State> {
Poll::Ready(Ok(()))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn async_file_drop() {
crate::task::block_on(async move {
File::open(file!()).await.unwrap();
});
}
}

View file

@ -2,10 +2,10 @@ use std::future::Future;
use std::pin::Pin;
use std::time::Duration;
use futures_timer::Delay;
use pin_project_lite::pin_project;
use crate::task::{Context, Poll};
use crate::utils::{timer_after, Timer};
pin_project! {
#[doc(hidden)]
@ -14,13 +14,13 @@ pin_project! {
#[pin]
future: F,
#[pin]
delay: Timer,
delay: Delay,
}
}
impl<F> DelayFuture<F> {
pub fn new(future: F, dur: Duration) -> DelayFuture<F> {
let delay = timer_after(dur);
let delay = Delay::new(dur);
DelayFuture { future, delay }
}

View file

@ -1,6 +1,6 @@
use std::pin::Pin;
use crate::future::MaybeDone;
use async_macros::MaybeDone;
use pin_project_lite::pin_project;
use crate::task::{Context, Poll};

View file

@ -7,6 +7,7 @@ cfg_unstable! {
mod try_join;
use std::time::Duration;
use delay::DelayFuture;
use flatten::FlattenFuture;
use crate::future::IntoFuture;
@ -16,13 +17,9 @@ cfg_unstable! {
use try_join::TryJoin;
}
cfg_unstable_default! {
use crate::future::timeout::TimeoutFuture;
}
extension_trait! {
use core::pin::Pin;
use core::ops::{Deref, DerefMut};
use std::pin::Pin;
use std::ops::{Deref, DerefMut};
use crate::task::{Context, Poll};
@ -136,7 +133,7 @@ extension_trait! {
[`Future`]: ../future/trait.Future.html
"#]
pub trait FutureExt: core::future::Future {
pub trait FutureExt: std::future::Future {
/// Returns a Future that delays execution for a specified time.
///
/// # Examples
@ -151,7 +148,7 @@ extension_trait! {
/// dbg!(a.await);
/// # })
/// ```
#[cfg(feature = "unstable")]
#[cfg(all(feature = "default", feature = "unstable"))]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
fn delay(self, dur: Duration) -> impl Future<Output = Self::Output> [DelayFuture<Self>]
where
@ -358,40 +355,6 @@ extension_trait! {
{
TryJoin::new(self, other)
}
#[doc = r#"
Waits for both the future and a timeout, if the timeout completes before
the future, it returns an TimeoutError.
# Example
```
# async_std::task::block_on(async {
#
use std::time::Duration;
use async_std::prelude::*;
use async_std::future;
let fut = future::ready(0);
let dur = Duration::from_millis(100);
let res = fut.timeout(dur).await;
assert!(res.is_ok());
let fut = future::pending::<()>();
let dur = Duration::from_millis(100);
let res = fut.timeout(dur).await;
assert!(res.is_err())
#
# });
```
"#]
#[cfg(any(all(feature = "default", feature = "unstable"), feature = "docs"))]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
fn timeout(self, dur: Duration) -> impl Future<Output = Self::Output> [TimeoutFuture<Self>]
where Self: Sized
{
TimeoutFuture::new(self, dur)
}
}
impl<F: Future + Unpin + ?Sized> Future for Box<F> {

View file

@ -1,7 +1,7 @@
use std::future::Future;
use std::pin::Pin;
use crate::future::MaybeDone;
use async_macros::MaybeDone;
use pin_project_lite::pin_project;
use crate::task::{Context, Poll};

View file

@ -1,6 +1,6 @@
use std::pin::Pin;
use crate::future::MaybeDone;
use async_macros::MaybeDone;
use pin_project_lite::pin_project;
use crate::task::{Context, Poll};

View file

@ -1,6 +1,6 @@
use std::pin::Pin;
use crate::future::MaybeDone;
use async_macros::MaybeDone;
use pin_project_lite::pin_project;
use crate::task::{Context, Poll};

View file

@ -1,79 +0,0 @@
//! A type that wraps a future to keep track of its completion status.
//!
//! This implementation was taken from the original `macro_rules` `join/try_join`
//! macros in the `futures-preview` crate.
use std::future::Future;
use std::mem;
use std::pin::Pin;
use std::task::{Context, Poll};
use futures_core::ready;
/// A future that may have completed.
#[derive(Debug)]
pub(crate) enum MaybeDone<Fut: Future> {
/// A not-yet-completed future
Future(Fut),
/// The output of the completed future
Done(Fut::Output),
/// The empty variant after the result of a [`MaybeDone`] has been
/// taken using the [`take`](MaybeDone::take) method.
Gone,
}
impl<Fut: Future> MaybeDone<Fut> {
/// Create a new instance of `MaybeDone`.
pub(crate) fn new(future: Fut) -> MaybeDone<Fut> {
Self::Future(future)
}
/// Returns an [`Option`] containing a reference to the output of the future.
/// The output of this method will be [`Some`] if and only if the inner
/// future has been completed and [`take`](MaybeDone::take)
/// has not yet been called.
#[inline]
pub(crate) fn output(self: Pin<&Self>) -> Option<&Fut::Output> {
let this = self.get_ref();
match this {
MaybeDone::Done(res) => Some(res),
_ => None,
}
}
/// Attempt to take the output of a `MaybeDone` without driving it
/// towards completion.
#[inline]
pub(crate) fn take(self: Pin<&mut Self>) -> Option<Fut::Output> {
unsafe {
let this = self.get_unchecked_mut();
match this {
MaybeDone::Done(_) => {}
MaybeDone::Future(_) | MaybeDone::Gone => return None,
};
if let MaybeDone::Done(output) = mem::replace(this, MaybeDone::Gone) {
Some(output)
} else {
unreachable!()
}
}
}
}
impl<Fut: Future> Future for MaybeDone<Fut> {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let res = unsafe {
match Pin::as_mut(&mut self).get_unchecked_mut() {
MaybeDone::Future(a) => ready!(Pin::new_unchecked(a).poll(cx)),
MaybeDone::Done(_) => return Poll::Ready(()),
MaybeDone::Gone => panic!("MaybeDone polled after value taken"),
}
};
self.set(MaybeDone::Done(res));
Poll::Ready(())
}
}

View file

@ -46,29 +46,22 @@
//! [`Future::race`]: trait.Future.html#method.race
//! [`Future::try_race`]: trait.Future.html#method.try_race
cfg_alloc! {
pub use future::Future;
pub(crate) mod future;
pub use future::Future;
pub use pending::pending;
pub use poll_fn::poll_fn;
pub use ready::ready;
pub(crate) mod future;
mod pending;
mod poll_fn;
mod ready;
cfg_default! {
pub use timeout::{timeout, TimeoutError};
mod timeout;
}
cfg_std! {
pub use pending::pending;
pub use poll_fn::poll_fn;
pub use ready::ready;
mod pending;
mod poll_fn;
mod ready;
}
#[cfg(any(feature = "unstable", feature = "default"))]
pub use timeout::{timeout, TimeoutError};
#[cfg(any(feature = "unstable", feature = "default"))]
mod timeout;
cfg_unstable! {
pub use into_future::IntoFuture;
pub(crate) use maybe_done::MaybeDone;
mod into_future;
mod maybe_done;
}

View file

@ -1,13 +1,13 @@
use std::error::Error;
use std::fmt;
use std::future::Future;
use std::pin::Pin;
use std::time::Duration;
use std::future::Future;
use futures_timer::Delay;
use pin_project_lite::pin_project;
use crate::task::{Context, Poll};
use crate::utils::{timer_after, Timer};
/// Awaits a future or times out after a duration of time.
///
@ -33,26 +33,20 @@ pub async fn timeout<F, T>(dur: Duration, f: F) -> Result<T, TimeoutError>
where
F: Future<Output = T>,
{
TimeoutFuture::new(f, dur).await
let f = TimeoutFuture {
future: f,
delay: Delay::new(dur),
};
f.await
}
pin_project! {
/// A future that times out after a duration of time.
pub struct TimeoutFuture<F> {
struct TimeoutFuture<F> {
#[pin]
future: F,
#[pin]
delay: Timer,
}
}
impl<F> TimeoutFuture<F> {
#[allow(dead_code)]
pub(super) fn new(future: F, dur: Duration) -> TimeoutFuture<F> {
TimeoutFuture {
future,
delay: timer_after(dur),
}
delay: Delay,
}
}

View file

@ -29,7 +29,7 @@ extension_trait! {
```
# #[allow(unused_imports)]
use async_std::io::prelude::*;
use async_std::prelude::*;
```
[`std::io::BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html

View file

@ -105,8 +105,8 @@
//!
//! ```no_run
//! use async_std::fs::File;
//! use async_std::io::prelude::*;
//! use async_std::io::BufWriter;
//! use async_std::io::prelude::*;
//!
//! # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
//! #
@ -116,8 +116,8 @@
//!
//! // write a byte to the buffer
//! writer.write(&[42]).await?;
//!
//! } // the buffer is flushed once writer goes out of scope
//! //
//! #
//! # Ok(()) }) }
//! ```
@ -275,13 +275,13 @@ cfg_std! {
#[doc(inline)]
pub use std::io::{Error, ErrorKind, IoSlice, IoSliceMut, Result, SeekFrom};
pub use buf_read::{BufRead, Lines, Split};
pub use buf_read::{BufRead, Lines};
pub use buf_reader::BufReader;
pub use buf_writer::{BufWriter, IntoInnerError};
pub use copy::copy;
pub use cursor::Cursor;
pub use empty::{empty, Empty};
pub use read::*;
pub use read::Read;
pub use repeat::{repeat, Repeat};
pub use seek::Seek;
pub use sink::{sink, Sink};
@ -307,24 +307,22 @@ cfg_std! {
cfg_default! {
// For use in the print macros.
#[doc(hidden)]
#[cfg(not(target_os = "unknown"))]
pub use stdio::{_eprint, _print};
#[cfg(not(target_os = "unknown"))]
pub use stderr::{stderr, Stderr};
#[cfg(not(target_os = "unknown"))]
pub use stdin::{stdin, Stdin};
#[cfg(not(target_os = "unknown"))]
pub use stdout::{stdout, Stdout};
pub use timeout::timeout;
mod timeout;
#[cfg(not(target_os = "unknown"))]
mod stderr;
#[cfg(not(target_os = "unknown"))]
mod stdin;
#[cfg(not(target_os = "unknown"))]
mod stdio;
#[cfg(not(target_os = "unknown"))]
mod stdout;
}
cfg_unstable! {
pub use stderr::StderrLock;
pub use stdin::StdinLock;
pub use stdout::StdoutLock;
}

View file

@ -32,7 +32,7 @@ impl<T: Read + Unpin> Stream for Bytes<T> {
}
}
#[cfg(all(test, default))]
#[cfg(test)]
mod tests {
use crate::io;
use crate::prelude::*;

View file

@ -165,7 +165,7 @@ impl<T: BufRead, U: BufRead> BufRead for Chain<T, U> {
}
}
#[cfg(all(test, default))]
#[cfg(test)]
mod tests {
use crate::io;
use crate::prelude::*;

View file

@ -17,10 +17,6 @@ use std::mem;
use crate::io::IoSliceMut;
pub use bytes::Bytes;
pub use chain::Chain;
pub use take::Take;
extension_trait! {
use std::pin::Pin;
use std::ops::{Deref, DerefMut};
@ -305,11 +301,11 @@ extension_trait! {
# Ok(()) }) }
```
"#]
fn take(self, limit: u64) -> Take<Self>
fn take(self, limit: u64) -> take::Take<Self>
where
Self: Sized,
{
Take { inner: self, limit }
take::Take { inner: self, limit }
}
#[doc = r#"
@ -381,8 +377,8 @@ extension_trait! {
# Ok(()) }) }
```
"#]
fn bytes(self) -> Bytes<Self> where Self: Sized {
Bytes { inner: self }
fn bytes(self) -> bytes::Bytes<Self> where Self: Sized {
bytes::Bytes { inner: self }
}
#[doc = r#"
@ -417,8 +413,8 @@ extension_trait! {
# Ok(()) }) }
```
"#]
fn chain<R: Read>(self, next: R) -> Chain<Self, R> where Self: Sized {
Chain { first: self, second: next, done_first: false }
fn chain<R: Read>(self, next: R) -> chain::Chain<Self, R> where Self: Sized {
chain::Chain { first: self, second: next, done_first: false }
}
}
@ -477,13 +473,13 @@ unsafe fn initialize<R: futures_io::AsyncRead>(_reader: &R, buf: &mut [u8]) {
std::ptr::write_bytes(buf.as_mut_ptr(), 0, buf.len())
}
#[cfg(all(test, not(target_os = "unknown")))]
#[cfg(test)]
mod tests {
use crate::io;
use crate::prelude::*;
#[test]
fn test_read_by_ref() {
fn test_read_by_ref() -> io::Result<()> {
crate::task::block_on(async {
let mut f = io::Cursor::new(vec![0u8, 1, 2, 3, 4, 5, 6, 7, 8]);
let mut buffer = Vec::new();
@ -493,13 +489,14 @@ mod tests {
let reference = f.by_ref();
// read at most 5 bytes
assert_eq!(reference.take(5).read_to_end(&mut buffer).await.unwrap(), 5);
assert_eq!(reference.take(5).read_to_end(&mut buffer).await?, 5);
assert_eq!(&buffer, &[0, 1, 2, 3, 4])
} // drop our &mut reference so we can use f again
// original file still usable, read the rest
assert_eq!(f.read_to_end(&mut other_buffer).await.unwrap(), 4);
assert_eq!(f.read_to_end(&mut other_buffer).await?, 4);
assert_eq!(&other_buffer, &[5, 6, 7, 8]);
});
Ok(())
})
}
}

View file

@ -218,7 +218,7 @@ impl<T: BufRead> BufRead for Take<T> {
}
}
#[cfg(all(test, not(target_os = "unknown")))]
#[cfg(test)]
mod tests {
use crate::io;
use crate::prelude::*;

View file

@ -5,6 +5,11 @@ use std::future::Future;
use crate::io::{self, Write};
use crate::task::{spawn_blocking, Context, JoinHandle, Poll};
cfg_unstable! {
use once_cell::sync::Lazy;
use std::io::Write as _;
}
/// Constructs a new handle to the standard error of the current process.
///
/// This function is an async version of [`std::io::stderr`].
@ -53,6 +58,22 @@ pub fn stderr() -> Stderr {
#[derive(Debug)]
pub struct Stderr(Mutex<State>);
/// A locked reference to the Stderr handle.
///
/// This handle implements the [`Write`] traits, and is constructed via the [`Stderr::lock`]
/// method.
///
/// [`Write`]: trait.Read.html
/// [`Stderr::lock`]: struct.Stderr.html#method.lock
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
#[derive(Debug)]
pub struct StderrLock<'a>(std::io::StderrLock<'a>);
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
unsafe impl Send for StderrLock<'_> {}
/// The state of the asynchronous stderr.
///
/// The stderr can be either idle or busy performing an asynchronous operation.
@ -87,14 +108,42 @@ enum Operation {
Flush(io::Result<()>),
}
impl Stderr {
/// Locks this handle to the standard error stream, returning a writable guard.
///
/// The lock is released when the returned lock goes out of scope. The returned guard also implements the Write trait for writing data.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::io;
/// use async_std::prelude::*;
///
/// let stderr = io::stderr();
/// let mut handle = stderr.lock().await;
///
/// handle.write_all(b"hello world").await?;
/// #
/// # Ok(()) }) }
/// ```
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
#[cfg(any(feature = "unstable", feature = "docs"))]
pub async fn lock(&self) -> StderrLock<'static> {
static STDERR: Lazy<std::io::Stderr> = Lazy::new(std::io::stderr);
spawn_blocking(move || StderrLock(STDERR.lock())).await
}
}
impl Write for Stderr {
fn poll_write(
self: Pin<&mut Self>,
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
let mut state_guard = self.0.lock().unwrap();
let state = &mut *state_guard;
let state = &mut *self.0.lock().unwrap();
loop {
match state {
@ -138,9 +187,8 @@ impl Write for Stderr {
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let mut state_guard = self.0.lock().unwrap();
let state = &mut *state_guard;
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let state = &mut *self.0.lock().unwrap();
loop {
match state {
@ -191,3 +239,23 @@ cfg_windows! {
}
}
}
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
impl io::Write for StderrLock<'_> {
fn poll_write(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Poll::Ready(self.0.write(buf))
}
fn poll_flush(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(self.0.flush())
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.poll_flush(cx)
}
}

View file

@ -7,6 +7,11 @@ use crate::io::{self, Read};
use crate::task::{spawn_blocking, Context, JoinHandle, Poll};
use crate::utils::Context as _;
cfg_unstable! {
use once_cell::sync::Lazy;
use std::io::Read as _;
}
/// Constructs a new handle to the standard input of the current process.
///
/// This function is an async version of [`std::io::stdin`].
@ -56,6 +61,21 @@ pub fn stdin() -> Stdin {
#[derive(Debug)]
pub struct Stdin(Mutex<State>);
/// A locked reference to the Stdin handle.
///
/// This handle implements the [`Read`] traits, and is constructed via the [`Stdin::lock`] method.
///
/// [`Read`]: trait.Read.html
/// [`Stdin::lock`]: struct.Stdin.html#method.lock
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
#[cfg(feature = "unstable")]
#[derive(Debug)]
pub struct StdinLock<'a>(std::io::StdinLock<'a>);
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
unsafe impl Send for StdinLock<'_> {}
/// The state of the asynchronous stdin.
///
/// The stdin can be either idle or busy performing an asynchronous operation.
@ -145,16 +165,44 @@ impl Stdin {
.await
.context(|| String::from("could not read line on stdin"))
}
/// Locks this handle to the standard input stream, returning a readable guard.
///
/// The lock is released when the returned lock goes out of scope. The returned guard also implements the Read trait for accessing the underlying data.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::io;
/// use async_std::prelude::*;
///
/// let mut buffer = String::new();
///
/// let stdin = io::stdin();
/// let mut handle = stdin.lock().await;
///
/// handle.read_to_string(&mut buffer).await?;
/// #
/// # Ok(()) }) }
/// ```
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
#[cfg(any(feature = "unstable", feature = "docs"))]
pub async fn lock(&self) -> StdinLock<'static> {
static STDIN: Lazy<std::io::Stdin> = Lazy::new(std::io::stdin);
spawn_blocking(move || StdinLock(STDIN.lock())).await
}
}
impl Read for Stdin {
fn poll_read(
self: Pin<&mut Self>,
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
let mut state_guard = self.0.lock().unwrap();
let state = &mut *state_guard;
let state = &mut *self.0.lock().unwrap();
loop {
match state {
@ -217,3 +265,15 @@ cfg_windows! {
}
}
}
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
impl Read for StdinLock<'_> {
fn poll_read(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
Poll::Ready(self.0.read(buf))
}
}

View file

@ -5,6 +5,11 @@ use std::future::Future;
use crate::io::{self, Write};
use crate::task::{spawn_blocking, Context, JoinHandle, Poll};
cfg_unstable! {
use once_cell::sync::Lazy;
use std::io::Write as _;
}
/// Constructs a new handle to the standard output of the current process.
///
/// This function is an async version of [`std::io::stdout`].
@ -53,6 +58,22 @@ pub fn stdout() -> Stdout {
#[derive(Debug)]
pub struct Stdout(Mutex<State>);
/// A locked reference to the Stderr handle.
///
/// This handle implements the [`Write`] traits, and is constructed via the [`Stdout::lock`]
/// method.
///
/// [`Write`]: trait.Read.html
/// [`Stdout::lock`]: struct.Stdout.html#method.lock
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
#[derive(Debug)]
pub struct StdoutLock<'a>(std::io::StdoutLock<'a>);
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
unsafe impl Send for StdoutLock<'_> {}
/// The state of the asynchronous stdout.
///
/// The stdout can be either idle or busy performing an asynchronous operation.
@ -87,14 +108,42 @@ enum Operation {
Flush(io::Result<()>),
}
impl Stdout {
/// Locks this handle to the standard error stream, returning a writable guard.
///
/// The lock is released when the returned lock goes out of scope. The returned guard also implements the Write trait for writing data.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::io;
/// use async_std::prelude::*;
///
/// let stdout = io::stdout();
/// let mut handle = stdout.lock().await;
///
/// handle.write_all(b"hello world").await?;
/// #
/// # Ok(()) }) }
/// ```
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
#[cfg(any(feature = "unstable", feature = "docs"))]
pub async fn lock(&self) -> StdoutLock<'static> {
static STDOUT: Lazy<std::io::Stdout> = Lazy::new(std::io::stdout);
spawn_blocking(move || StdoutLock(STDOUT.lock())).await
}
}
impl Write for Stdout {
fn poll_write(
self: Pin<&mut Self>,
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
let mut state_guard = self.0.lock().unwrap();
let state = &mut *state_guard;
let state = &mut *self.0.lock().unwrap();
loop {
match state {
@ -138,9 +187,8 @@ impl Write for Stdout {
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let mut state_guard = self.0.lock().unwrap();
let state = &mut *state_guard;
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let state = &mut *self.0.lock().unwrap();
loop {
match state {
@ -191,3 +239,23 @@ cfg_windows! {
}
}
}
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
impl Write for StdoutLock<'_> {
fn poll_write(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Poll::Ready(self.0.write(buf))
}
fn poll_flush(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(self.0.flush())
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.poll_flush(cx)
}
}

View file

@ -1,12 +1,12 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use std::future::Future;
use futures_timer::Delay;
use pin_project_lite::pin_project;
use crate::io;
use crate::utils::{timer_after, Timer};
/// Awaits an I/O future or times out after a duration of time.
///
@ -37,7 +37,7 @@ where
F: Future<Output = io::Result<T>>,
{
Timeout {
timeout: timer_after(dur),
timeout: Delay::new(dur),
future: f,
}
.await
@ -53,7 +53,7 @@ pin_project! {
#[pin]
future: F,
#[pin]
timeout: Timer,
timeout: Delay,
}
}

View file

@ -1,7 +1,5 @@
use crate::utils::Context;
use std::{error::Error as StdError, fmt, io};
/// Wrap `std::io::Error` with additional message
///
/// Keeps the original error kind and stores the original I/O error as `source`.
@ -11,6 +9,8 @@ impl<T> Context for Result<T, std::io::Error> {
}
}
use std::{error::Error as StdError, fmt, io};
#[derive(Debug)]
pub(crate) struct VerboseError {
source: io::Error,
@ -36,6 +36,10 @@ impl fmt::Display for VerboseError {
}
impl StdError for VerboseError {
fn description(&self) -> &str {
self.source.description()
}
fn source(&self) -> Option<&(dyn StdError + 'static)> {
Some(&self.source)
}

View file

@ -6,7 +6,6 @@ use crate::task::{Context, Poll};
#[doc(hidden)]
#[allow(missing_debug_implementations)]
#[must_use]
pub struct WriteFmtFuture<'a, T: Unpin + ?Sized> {
pub(crate) writer: &'a mut T,
pub(crate) res: Option<io::Result<Vec<u8>>>,

View file

@ -47,7 +47,7 @@
//! encouraged to read it. The `async-std` source is generally high
//! quality and a peek behind the curtains is often enlightening.
//!
//! Modules in this crate are organized in the same way as in `std`, except blocking
//! Modules in this crate are organized in the same way as in `async-std`, except blocking
//! functions have been replaced with async functions and threads have been replaced with
//! lightweight tasks.
//!
@ -138,8 +138,7 @@
//!
//! Call an async function from the main function:
//!
#![cfg_attr(feature = "attributes", doc = "```")]
#![cfg_attr(not(feature = "attributes"), doc = "```ignore")]
//! ```
//! async fn say_hello() {
//! println!("Hello, world!");
//! }
@ -152,10 +151,7 @@
//!
//! Await two futures concurrently, and return a tuple of their output:
//!
#![cfg_attr(feature = "attributes", doc = "```")]
#![cfg_attr(not(feature = "attributes"), doc = "```ignore")]
//! use async_std::prelude::*;
//!
//! ```
//! #[async_std::main]
//! async fn main() {
//! let a = async { 1u8 };
@ -166,13 +162,12 @@
//!
//! Create a UDP server that echoes back each received message to the sender:
//!
#![cfg_attr(feature = "attributes", doc = "```no_run")]
#![cfg_attr(not(feature = "attributes"), doc = "```ignore")]
//! ```no_run
//! use async_std::net::UdpSocket;
//!
//! #[async_std::main]
//! async fn main() -> std::io::Result<()> {
//! let socket = UdpSocket::bind("127.0.0.1:8080").await?;
//! let mut socket = UdpSocket::bind("127.0.0.1:8080")?;
//! println!("Listening on {}", socket.local_addr()?);
//!
//! let mut buf = vec![0u8; 1024];
@ -197,7 +192,7 @@
//!
//! ```toml
//! [dependencies.async-std]
//! version = "1.6.2"
//! version = "1.0.0"
//! features = ["unstable"]
//! ```
//!
@ -210,55 +205,20 @@
//!
//! ```toml
//! [dependencies.async-std]
//! version = "1.6.2"
//! version = "1.0.0"
//! features = ["attributes"]
//! ```
//!
//! Compatibility with the `tokio` runtime is possible using the `tokio02`
//! Cargo feature:
//!
//! ```toml
//! [dependencies.async-std]
//! version = "1.6.2"
//! features = ["tokio02"]
//! ```
//!
//! Additionally it's possible to only use the core traits and combinators by
//! only enabling the `std` Cargo feature:
//!
//! ```toml
//! [dependencies.async-std]
//! version = "1.6.2"
//! version = "1.0.0"
//! default-features = false
//! features = ["std"]
//! ```
//!
//! And to use async-std on `no_std` targets that only support `alloc` only
//! enable the `alloc` Cargo feature:
//!
//! ```toml
//! [dependencies.async-std]
//! version = "1.6.2"
//! default-features = false
//! features = ["alloc"]
//! ```
//!
//! # Runtime configuration
//!
//! Several environment variables are available to tune the async-std
//! runtime:
//!
//! * `ASYNC_STD_THREAD_COUNT`: The number of threads that the
//! async-std runtime will start. By default, this is one per logical
//! cpu as reported by the [num_cpus](num_cpus) crate, which may be
//! different than the number of physical cpus. Async-std _will panic_
//! if this is set to any value other than a positive integer.
//! * `ASYNC_STD_THREAD_NAME`: The name that async-std's runtime
//! threads report to the operating system. The default value is
//! `"async-std/runtime"`.
//!
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(feature = "docs", feature(doc_cfg))]
#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)]
#![allow(clippy::mutex_atomic, clippy::module_inception)]
@ -267,8 +227,6 @@
#![doc(html_logo_url = "https://async.rs/images/logo--hero.svg")]
#![recursion_limit = "2048"]
extern crate alloc;
#[macro_use]
mod utils;
@ -280,31 +238,25 @@ pub use async_attributes::{main, test};
#[cfg(feature = "std")]
mod macros;
cfg_alloc! {
pub mod task;
pub mod future;
pub mod stream;
}
cfg_std! {
pub mod future;
pub mod io;
pub mod os;
pub mod prelude;
pub mod stream;
pub mod sync;
pub mod task;
}
cfg_default! {
#[cfg(not(target_os = "unknown"))]
pub mod fs;
pub mod path;
pub mod net;
#[cfg(not(target_os = "unknown"))]
pub(crate) mod rt;
}
cfg_unstable! {
pub mod pin;
#[cfg(not(target_os = "unknown"))]
pub mod process;
mod unit;
@ -313,9 +265,7 @@ cfg_unstable! {
mod option;
mod string;
mod collections;
}
cfg_unstable_default! {
#[doc(inline)]
pub use std::{write, writeln};
}

View file

@ -61,16 +61,10 @@ pub use std::net::Shutdown;
pub use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
pub use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6};
#[cfg(not(target_os = "unknown"))]
pub use addr::ToSocketAddrs;
#[cfg(not(target_os = "unknown"))]
pub use tcp::{Incoming, TcpListener, TcpStream};
#[cfg(not(target_os = "unknown"))]
pub use udp::UdpSocket;
#[cfg(not(target_os = "unknown"))]
mod addr;
#[cfg(not(target_os = "unknown"))]
mod tcp;
#[cfg(not(target_os = "unknown"))]
mod udp;

View file

@ -2,12 +2,11 @@ use std::future::Future;
use std::net::SocketAddr;
use std::pin::Pin;
use smol::Async;
use crate::future;
use crate::io;
use crate::rt::Watcher;
use crate::net::{TcpStream, ToSocketAddrs};
use crate::stream::Stream;
use crate::sync::Arc;
use crate::task::{Context, Poll};
/// A TCP socket server, listening for connections.
@ -49,7 +48,7 @@ use crate::task::{Context, Poll};
/// ```
#[derive(Debug)]
pub struct TcpListener {
watcher: Async<std::net::TcpListener>,
watcher: Watcher<mio::net::TcpListener>,
}
impl TcpListener {
@ -75,15 +74,17 @@ impl TcpListener {
///
/// [`local_addr`]: #method.local_addr
pub async fn bind<A: ToSocketAddrs>(addrs: A) -> io::Result<TcpListener> {
once_cell::sync::Lazy::force(&crate::rt::RUNTIME);
let mut last_err = None;
let addrs = addrs.to_socket_addrs().await?;
let addrs = addrs
.to_socket_addrs()
.await?;
for addr in addrs {
match Async::<std::net::TcpListener>::bind(&addr) {
Ok(listener) => {
return Ok(TcpListener { watcher: listener });
match mio::net::TcpListener::bind(&addr) {
Ok(mio_listener) => {
return Ok(TcpListener {
watcher: Watcher::new(mio_listener),
});
}
Err(err) => last_err = Some(err),
}
@ -114,9 +115,13 @@ impl TcpListener {
/// # Ok(()) }) }
/// ```
pub async fn accept(&self) -> io::Result<(TcpStream, SocketAddr)> {
let (stream, addr) = self.watcher.accept().await?;
let (io, addr) =
future::poll_fn(|cx| self.watcher.poll_read_with(cx, |inner| inner.accept_std()))
.await?;
let mio_stream = mio::net::TcpStream::from_stream(io)?;
let stream = TcpStream {
watcher: Arc::new(stream),
watcher: Watcher::new(mio_stream),
};
Ok((stream, addr))
}
@ -202,10 +207,9 @@ impl<'a> Stream for Incoming<'a> {
impl From<std::net::TcpListener> for TcpListener {
/// Converts a `std::net::TcpListener` into its asynchronous equivalent.
fn from(listener: std::net::TcpListener) -> TcpListener {
once_cell::sync::Lazy::force(&crate::rt::RUNTIME);
let mio_listener = mio::net::TcpListener::from_std(listener).unwrap();
TcpListener {
watcher: Async::new(listener).expect("TcpListener is known to be good"),
watcher: Watcher::new(mio_listener),
}
}
}
@ -227,31 +231,29 @@ cfg_unix! {
impl IntoRawFd for TcpListener {
fn into_raw_fd(self) -> RawFd {
self.watcher.into_raw_fd()
self.watcher.into_inner().into_raw_fd()
}
}
}
cfg_windows! {
use crate::os::windows::io::{
AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket,
};
impl AsRawSocket for TcpListener {
fn as_raw_socket(&self) -> RawSocket {
self.watcher.as_raw_socket()
}
}
impl FromRawSocket for TcpListener {
unsafe fn from_raw_socket(handle: RawSocket) -> TcpListener {
std::net::TcpListener::from_raw_socket(handle).into()
}
}
impl IntoRawSocket for TcpListener {
fn into_raw_socket(self) -> RawSocket {
self.watcher.into_raw_socket()
}
}
// use crate::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle};
//
// impl AsRawSocket for TcpListener {
// fn as_raw_socket(&self) -> RawSocket {
// self.raw_socket
// }
// }
//
// impl FromRawSocket for TcpListener {
// unsafe fn from_raw_socket(handle: RawSocket) -> TcpListener {
// net::TcpListener::from_raw_socket(handle).try_into().unwrap()
// }
// }
//
// impl IntoRawSocket for TcpListener {
// fn into_raw_socket(self) -> RawSocket {
// self.raw_socket
// }
// }
}

View file

@ -1,13 +1,13 @@
use std::io::{IoSlice, IoSliceMut};
use std::io::{IoSlice, IoSliceMut, Read as _, Write as _};
use std::net::SocketAddr;
use std::pin::Pin;
use smol::Async;
use crate::future;
use crate::io::{self, Read, Write};
use crate::rt::Watcher;
use crate::net::ToSocketAddrs;
use crate::sync::Arc;
use crate::task::{Context, Poll};
use crate::task::{spawn_blocking, Context, Poll};
use crate::utils::Context as _;
/// A TCP stream between a local and a remote socket.
///
@ -45,9 +45,9 @@ use crate::task::{Context, Poll};
/// #
/// # Ok(()) }) }
/// ```
#[derive(Debug, Clone)]
#[derive(Debug)]
pub struct TcpStream {
pub(super) watcher: Arc<Async<std::net::TcpStream>>,
pub(super) watcher: Watcher<mio::net::TcpStream>,
}
impl TcpStream {
@ -71,22 +71,26 @@ impl TcpStream {
/// # Ok(()) }) }
/// ```
pub async fn connect<A: ToSocketAddrs>(addrs: A) -> io::Result<TcpStream> {
once_cell::sync::Lazy::force(&crate::rt::RUNTIME);
let mut last_err = None;
let addrs = addrs.to_socket_addrs().await?;
let addrs = addrs
.to_socket_addrs()
.await?;
for addr in addrs {
match Async::<std::net::TcpStream>::connect(&addr).await {
Ok(stream) => {
return Ok(TcpStream {
watcher: Arc::new(stream),
});
}
Err(e) => {
last_err = Some(e);
continue;
}
let res = spawn_blocking(move || {
let std_stream = std::net::TcpStream::connect(addr)
.context(|| format!("could not connect to {}", addr))?;
let mio_stream = mio::net::TcpStream::from_stream(std_stream)
.context(|| format!("could not open async connection to {}", addr))?;
Ok(TcpStream {
watcher: Watcher::new(mio_stream),
})
})
.await;
match res {
Ok(stream) => return Ok(stream),
Err(err) => last_err = Some(err),
}
}
@ -204,7 +208,7 @@ impl TcpStream {
/// # Ok(()) }) }
/// ```
pub async fn peek(&self, buf: &mut [u8]) -> io::Result<usize> {
self.watcher.peek(buf).await
future::poll_fn(|cx| self.watcher.poll_read_with(cx, |inner| inner.peek(buf))).await
}
/// Gets the value of the `TCP_NODELAY` option on this socket.
@ -307,7 +311,7 @@ impl Read for &TcpStream {
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut &*self.watcher).poll_read(cx, buf)
self.watcher.poll_read_with(cx, |mut inner| inner.read(buf))
}
}
@ -343,25 +347,25 @@ impl Write for &TcpStream {
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut &*self.watcher).poll_write(cx, buf)
self.watcher
.poll_write_with(cx, |mut inner| inner.write(buf))
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut &*self.watcher).poll_flush(cx)
self.watcher.poll_write_with(cx, |mut inner| inner.flush())
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut &*self.watcher).poll_close(cx)
fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
}
impl From<std::net::TcpStream> for TcpStream {
/// Converts a `std::net::TcpStream` into its asynchronous equivalent.
fn from(stream: std::net::TcpStream) -> TcpStream {
once_cell::sync::Lazy::force(&crate::rt::RUNTIME);
let mio_stream = mio::net::TcpStream::from_stream(stream).unwrap();
TcpStream {
watcher: Arc::new(Async::new(stream).expect("TcpStream is known to be good")),
watcher: Watcher::new(mio_stream),
}
}
}
@ -383,37 +387,29 @@ cfg_unix! {
impl IntoRawFd for TcpStream {
fn into_raw_fd(self) -> RawFd {
// TODO(stjepang): This does not mean `RawFd` is now the sole owner of the file
// descriptor because it's possible that there are other clones of this `TcpStream`
// using it at the same time. We should probably document that behavior.
self.as_raw_fd()
self.watcher.into_inner().into_raw_fd()
}
}
}
cfg_windows! {
use crate::os::windows::io::{
RawSocket, AsRawSocket, FromRawSocket, IntoRawSocket
};
impl AsRawSocket for TcpStream {
fn as_raw_socket(&self) -> RawSocket {
self.watcher.get_ref().as_raw_socket()
}
}
impl FromRawSocket for TcpStream {
unsafe fn from_raw_socket(handle: RawSocket) -> TcpStream {
std::net::TcpStream::from_raw_socket(handle).into()
}
}
impl IntoRawSocket for TcpStream {
fn into_raw_socket(self) -> RawSocket {
// TODO(stjepang): This does not mean `RawFd` is now the sole owner of the file
// descriptor because it's possible that there are other clones of this `TcpStream`
// using it at the same time. We should probably document that behavior.
self.as_raw_socket()
}
}
// use crate::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle};
//
// impl AsRawSocket for TcpStream {
// fn as_raw_socket(&self) -> RawSocket {
// self.raw_socket
// }
// }
//
// impl FromRawSocket for TcpStream {
// unsafe fn from_raw_socket(handle: RawSocket) -> TcpStream {
// net::TcpStream::from_raw_socket(handle).try_into().unwrap()
// }
// }
//
// impl IntoRawSocket for TcpListener {
// fn into_raw_socket(self) -> RawSocket {
// self.raw_socket
// }
// }
}

View file

@ -2,9 +2,9 @@ use std::io;
use std::net::SocketAddr;
use std::net::{Ipv4Addr, Ipv6Addr};
use smol::Async;
use crate::future;
use crate::net::ToSocketAddrs;
use crate::rt::Watcher;
use crate::utils::Context as _;
/// A UDP socket.
@ -45,7 +45,7 @@ use crate::utils::Context as _;
/// ```
#[derive(Debug)]
pub struct UdpSocket {
watcher: Async<std::net::UdpSocket>,
watcher: Watcher<mio::net::UdpSocket>,
}
impl UdpSocket {
@ -68,15 +68,17 @@ impl UdpSocket {
/// # Ok(()) }) }
/// ```
pub async fn bind<A: ToSocketAddrs>(addrs: A) -> io::Result<UdpSocket> {
once_cell::sync::Lazy::force(&crate::rt::RUNTIME);
let mut last_err = None;
let addrs = addrs.to_socket_addrs().await?;
let addrs = addrs
.to_socket_addrs()
.await?;
for addr in addrs {
match Async::<std::net::UdpSocket>::bind(&addr) {
Ok(socket) => {
return Ok(UdpSocket { watcher: socket });
match mio::net::UdpSocket::bind(&addr) {
Ok(mio_socket) => {
return Ok(UdpSocket {
watcher: Watcher::new(mio_socket),
});
}
Err(err) => last_err = Some(err),
}
@ -90,32 +92,6 @@ impl UdpSocket {
}))
}
/// Returns the peer address that this listener is connected to.
///
/// This can be useful, for example, when connect to port 0 to figure out which port was
/// actually connected.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::net::UdpSocket;
///
/// let socket1 = UdpSocket::bind("127.0.0.1:0").await?;
/// let socket2 = UdpSocket::bind("127.0.0.1:0").await?;
/// socket1.connect(socket2.local_addr()?).await?;
/// let addr = socket1.peer_addr()?;
/// #
/// # Ok(()) }) }
/// ```
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
self.watcher
.get_ref()
.peer_addr()
.context(|| String::from("could not get peer address"))
}
/// Returns the local address that this listener is bound to.
///
/// This can be useful, for example, when binding to port 0 to figure out which port was
@ -177,10 +153,12 @@ impl UdpSocket {
}
};
self.watcher
.send_to(buf, addr)
.await
.context(|| format!("could not send packet to {}", addr))
future::poll_fn(|cx| {
self.watcher
.poll_write_with(cx, |inner| inner.send_to(buf, &addr))
})
.await
.context(|| format!("could not send packet to {}", addr))
}
/// Receives data from the socket.
@ -203,7 +181,22 @@ impl UdpSocket {
/// # Ok(()) }) }
/// ```
pub async fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.watcher.recv_from(buf).await
future::poll_fn(|cx| {
self.watcher
.poll_read_with(cx, |inner| inner.recv_from(buf))
})
.await
.context(|| {
use std::fmt::Write;
let mut error = String::from("could not receive data on ");
if let Ok(addr) = self.local_addr() {
let _ = write!(&mut error, "{}", addr);
} else {
error.push_str("socket");
}
error
})
}
/// Connects the UDP socket to a remote address.
@ -251,12 +244,9 @@ impl UdpSocket {
}))
}
/// Sends data on the socket to the remote address to which it is connected.
/// Sends data on the socket to the given address.
///
/// The [`connect`] method will connect this socket to a remote address.
/// This method will fail if the socket is not connected.
///
/// [`connect`]: #method.connect
/// On success, returns the number of bytes written.
///
/// # Examples
///
@ -265,21 +255,40 @@ impl UdpSocket {
/// #
/// use async_std::net::UdpSocket;
///
/// let socket = UdpSocket::bind("127.0.0.1:34254").await?;
/// socket.connect("127.0.0.1:8080").await?;
/// let bytes = socket.send(b"Hi there!").await?;
/// const THE_MERCHANT_OF_VENICE: &[u8] = b"
/// If you prick us, do we not bleed?
/// If you tickle us, do we not laugh?
/// If you poison us, do we not die?
/// And if you wrong us, shall we not revenge?
/// ";
///
/// println!("Sent {} bytes", bytes);
/// let socket = UdpSocket::bind("127.0.0.1:0").await?;
///
/// let addr = "127.0.0.1:7878";
/// let sent = socket.send_to(THE_MERCHANT_OF_VENICE, &addr).await?;
/// println!("Sent {} bytes to {}", sent, addr);
/// #
/// # Ok(()) }) }
/// ```
pub async fn send(&self, buf: &[u8]) -> io::Result<usize> {
self.watcher.send(buf).await
future::poll_fn(|cx| self.watcher.poll_write_with(cx, |inner| inner.send(buf)))
.await
.context(|| {
use std::fmt::Write;
let mut error = String::from("could not send data on ");
if let Ok(addr) = self.local_addr() {
let _ = write!(&mut error, "{}", addr);
} else {
error.push_str("socket");
}
error
})
}
/// Receives data from the socket.
///
/// On success, returns the number of bytes read.
/// On success, returns the number of bytes read and the origin.
///
/// # Examples
///
@ -298,7 +307,19 @@ impl UdpSocket {
/// # Ok(()) }) }
/// ```
pub async fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
self.watcher.recv(buf).await
future::poll_fn(|cx| self.watcher.poll_read_with(cx, |inner| inner.recv(buf)))
.await
.context(|| {
use std::fmt::Write;
let mut error = String::from("could not receive data on ");
if let Ok(addr) = self.local_addr() {
let _ = write!(&mut error, "{}", addr);
} else {
error.push_str("socket");
}
error
})
}
/// Gets the value of the `SO_BROADCAST` option for this socket.
@ -481,10 +502,9 @@ impl UdpSocket {
impl From<std::net::UdpSocket> for UdpSocket {
/// Converts a `std::net::UdpSocket` into its asynchronous equivalent.
fn from(socket: std::net::UdpSocket) -> UdpSocket {
once_cell::sync::Lazy::force(&crate::rt::RUNTIME);
let mio_socket = mio::net::UdpSocket::from_socket(socket).unwrap();
UdpSocket {
watcher: Async::new(socket).expect("UdpSocket is known to be good"),
watcher: Watcher::new(mio_socket),
}
}
}
@ -506,31 +526,29 @@ cfg_unix! {
impl IntoRawFd for UdpSocket {
fn into_raw_fd(self) -> RawFd {
self.watcher.into_raw_fd()
self.watcher.into_inner().into_raw_fd()
}
}
}
cfg_windows! {
use crate::os::windows::io::{
RawSocket, AsRawSocket, IntoRawSocket, FromRawSocket
};
impl AsRawSocket for UdpSocket {
fn as_raw_socket(&self) -> RawSocket {
self.watcher.get_ref().as_raw_socket()
}
}
impl FromRawSocket for UdpSocket {
unsafe fn from_raw_socket(handle: RawSocket) -> UdpSocket {
std::net::UdpSocket::from_raw_socket(handle).into()
}
}
impl IntoRawSocket for UdpSocket {
fn into_raw_socket(self) -> RawSocket {
self.watcher.into_raw_socket()
}
}
// use crate::os::windows::io::{AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle};
//
// impl AsRawSocket for UdpSocket {
// fn as_raw_socket(&self) -> RawSocket {
// self.raw_socket
// }
// }
//
// impl FromRawSocket for UdpSocket {
// unsafe fn from_raw_socket(handle: RawSocket) -> UdpSocket {
// net::UdpSocket::from_raw_socket(handle).into()
// }
// }
//
// impl IntoRawSocket for UdpSocket {
// fn into_raw_socket(self) -> RawSocket {
// self.raw_socket
// }
// }
}

View file

@ -2,9 +2,8 @@ use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{FromStream, IntoStream};
use std::convert::identity;
impl<T: Send, V> FromStream<Option<T>> for Option<V>
impl<T, V> FromStream<Option<T>> for Option<V>
where
V: FromStream<T>,
{
@ -14,29 +13,28 @@ where
#[inline]
fn from_stream<'a, S: IntoStream<Item = Option<T>> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = Self> + 'a>> {
let stream = stream.into_stream();
Box::pin(async move {
// Using `take_while` here because it is able to stop the stream early
// Using `scan` here because it is able to stop the stream early
// if a failure occurs
let mut found_none = false;
let mut found_error = false;
let out: V = stream
.take_while(|elem| {
elem.is_some() || {
found_none = true;
// Stop processing the stream on `None`
false
.scan((), |_, elem| {
match elem {
Some(elem) => Some(elem),
None => {
found_error = true;
// Stop processing the stream on error
None
}
}
})
.filter_map(identity)
.collect()
.await;
if found_none { None } else { Some(out) }
if found_error { None } else { Some(out) }
})
}
}

View file

@ -1,8 +1,7 @@
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{Product, Stream};
use std::convert::identity;
use crate::stream::{Stream, Product};
impl<T, U> Product<Option<U>> for Option<T>
where
@ -37,27 +36,29 @@ where
```
"#]
fn product<'a, S>(stream: S) -> Pin<Box<dyn Future<Output = Option<T>> + 'a>>
where
S: Stream<Item = Option<U>> + 'a,
where S: Stream<Item = Option<U>> + 'a
{
Box::pin(async move {
// Using `take_while` here because it is able to stop the stream early
// Using `scan` here because it is able to stop the stream early
// if a failure occurs
let mut found_none = false;
let out = <T as Product<U>>::product(
stream
.take_while(|elem| {
elem.is_some() || {
let out = <T as Product<U>>::product(stream
.scan((), |_, elem| {
match elem {
Some(elem) => Some(elem),
None => {
found_none = true;
// Stop processing the stream on `None`
false
// Stop processing the stream on error
None
}
})
.filter_map(identity),
)
.await;
}
})).await;
if found_none { None } else { Some(out) }
if found_none {
None
} else {
Some(out)
}
})
}
}

View file

@ -2,7 +2,6 @@ use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{Stream, Sum};
use std::convert::identity;
impl<T, U> Sum<Option<U>> for Option<T>
where
@ -32,27 +31,29 @@ where
```
"#]
fn sum<'a, S>(stream: S) -> Pin<Box<dyn Future<Output = Option<T>> + 'a>>
where
S: Stream<Item = Option<U>> + 'a,
where S: Stream<Item = Option<U>> + 'a
{
Box::pin(async move {
// Using `take_while` here because it is able to stop the stream early
// Using `scan` here because it is able to stop the stream early
// if a failure occurs
let mut found_none = false;
let out = <T as Sum<U>>::sum(
stream
.take_while(|elem| {
elem.is_some() || {
let out = <T as Sum<U>>::sum(stream
.scan((), |_, elem| {
match elem {
Some(elem) => Some(elem),
None => {
found_none = true;
// Stop processing the stream on `None`
false
// Stop processing the stream on error
None
}
})
.filter_map(identity),
)
.await;
}
})).await;
if found_none { None } else { Some(out) }
if found_none {
None
} else {
Some(out)
}
})
}
}

View file

@ -2,14 +2,16 @@
use std::fmt;
use std::net::Shutdown;
use std::os::unix::net::UnixDatagram as StdUnixDatagram;
use smol::Async;
use mio_uds;
use super::SocketAddr;
use crate::future;
use crate::io;
use crate::rt::Watcher;
use crate::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use crate::path::Path;
use crate::task::spawn_blocking;
/// A Unix datagram socket.
///
@ -40,15 +42,13 @@ use crate::path::Path;
/// # Ok(()) }) }
/// ```
pub struct UnixDatagram {
watcher: Async<StdUnixDatagram>,
watcher: Watcher<mio_uds::UnixDatagram>,
}
impl UnixDatagram {
fn new(socket: StdUnixDatagram) -> UnixDatagram {
once_cell::sync::Lazy::force(&crate::rt::RUNTIME);
fn new(socket: mio_uds::UnixDatagram) -> UnixDatagram {
UnixDatagram {
watcher: Async::new(socket).expect("UnixDatagram is known to be good"),
watcher: Watcher::new(socket),
}
}
@ -66,11 +66,9 @@ impl UnixDatagram {
/// # Ok(()) }) }
/// ```
pub async fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixDatagram> {
once_cell::sync::Lazy::force(&crate::rt::RUNTIME);
let path = path.as_ref().to_owned();
let socket = Async::<StdUnixDatagram>::bind(path)?;
Ok(UnixDatagram { watcher: socket })
let socket = spawn_blocking(move || mio_uds::UnixDatagram::bind(path)).await?;
Ok(UnixDatagram::new(socket))
}
/// Creates a Unix datagram which is not bound to any address.
@ -87,7 +85,7 @@ impl UnixDatagram {
/// # Ok(()) }) }
/// ```
pub fn unbound() -> io::Result<UnixDatagram> {
let socket = StdUnixDatagram::unbound()?;
let socket = mio_uds::UnixDatagram::unbound()?;
Ok(UnixDatagram::new(socket))
}
@ -107,7 +105,7 @@ impl UnixDatagram {
/// # Ok(()) }) }
/// ```
pub fn pair() -> io::Result<(UnixDatagram, UnixDatagram)> {
let (a, b) = StdUnixDatagram::pair()?;
let (a, b) = mio_uds::UnixDatagram::pair()?;
let a = UnixDatagram::new(a);
let b = UnixDatagram::new(b);
Ok((a, b))
@ -199,7 +197,11 @@ impl UnixDatagram {
/// # Ok(()) }) }
/// ```
pub async fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
self.watcher.recv_from(buf).await
future::poll_fn(|cx| {
self.watcher
.poll_read_with(cx, |inner| inner.recv_from(buf))
})
.await
}
/// Receives data from the socket.
@ -220,7 +222,7 @@ impl UnixDatagram {
/// # Ok(()) }) }
/// ```
pub async fn recv(&self, buf: &mut [u8]) -> io::Result<usize> {
self.watcher.recv(buf).await
future::poll_fn(|cx| self.watcher.poll_read_with(cx, |inner| inner.recv(buf))).await
}
/// Sends data on the socket to the specified address.
@ -240,7 +242,11 @@ impl UnixDatagram {
/// # Ok(()) }) }
/// ```
pub async fn send_to<P: AsRef<Path>>(&self, buf: &[u8], path: P) -> io::Result<usize> {
self.watcher.send_to(buf, path.as_ref()).await
future::poll_fn(|cx| {
self.watcher
.poll_write_with(cx, |inner| inner.send_to(buf, path.as_ref()))
})
.await
}
/// Sends data on the socket to the socket's peer.
@ -261,7 +267,7 @@ impl UnixDatagram {
/// # Ok(()) }) }
/// ```
pub async fn send(&self, buf: &[u8]) -> io::Result<usize> {
self.watcher.send(buf).await
future::poll_fn(|cx| self.watcher.poll_write_with(cx, |inner| inner.send(buf))).await
}
/// Shut down the read, write, or both halves of this connection.
@ -306,35 +312,31 @@ impl fmt::Debug for UnixDatagram {
}
}
impl From<StdUnixDatagram> for UnixDatagram {
impl From<std::os::unix::net::UnixDatagram> for UnixDatagram {
/// Converts a `std::os::unix::net::UnixDatagram` into its asynchronous equivalent.
fn from(datagram: StdUnixDatagram) -> UnixDatagram {
once_cell::sync::Lazy::force(&crate::rt::RUNTIME);
fn from(datagram: std::os::unix::net::UnixDatagram) -> UnixDatagram {
let mio_datagram = mio_uds::UnixDatagram::from_datagram(datagram).unwrap();
UnixDatagram {
watcher: Async::new(datagram).expect("UnixDatagram is known to be good"),
watcher: Watcher::new(mio_datagram),
}
}
}
impl AsRawFd for UnixDatagram {
fn as_raw_fd(&self) -> RawFd {
self.watcher.as_raw_fd()
self.watcher.get_ref().as_raw_fd()
}
}
impl FromRawFd for UnixDatagram {
unsafe fn from_raw_fd(fd: RawFd) -> UnixDatagram {
once_cell::sync::Lazy::force(&crate::rt::RUNTIME);
let raw = StdUnixDatagram::from_raw_fd(fd);
let datagram = Async::<StdUnixDatagram>::new(raw).expect("invalid file descriptor");
UnixDatagram { watcher: datagram }
let datagram = std::os::unix::net::UnixDatagram::from_raw_fd(fd);
datagram.into()
}
}
impl IntoRawFd for UnixDatagram {
fn into_raw_fd(self) -> RawFd {
self.watcher.into_raw_fd()
self.watcher.into_inner().into_raw_fd()
}
}

View file

@ -1,20 +1,20 @@
//! Unix-specific networking extensions.
use std::fmt;
use std::future::Future;
use std::os::unix::net::UnixListener as StdUnixListener;
use std::pin::Pin;
use std::future::Future;
use smol::Async;
use mio_uds;
use super::SocketAddr;
use super::UnixStream;
use crate::future;
use crate::io;
use crate::rt::Watcher;
use crate::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use crate::path::Path;
use crate::stream::Stream;
use crate::sync::Arc;
use crate::task::{Context, Poll};
use crate::task::{spawn_blocking, Context, Poll};
/// A Unix domain socket server, listening for connections.
///
@ -50,7 +50,7 @@ use crate::task::{Context, Poll};
/// # Ok(()) }) }
/// ```
pub struct UnixListener {
watcher: Async<StdUnixListener>,
watcher: Watcher<mio_uds::UnixListener>,
}
impl UnixListener {
@ -68,12 +68,12 @@ impl UnixListener {
/// # Ok(()) }) }
/// ```
pub async fn bind<P: AsRef<Path>>(path: P) -> io::Result<UnixListener> {
once_cell::sync::Lazy::force(&crate::rt::RUNTIME);
let path = path.as_ref().to_owned();
let listener = Async::<StdUnixListener>::bind(path)?;
let listener = spawn_blocking(move || mio_uds::UnixListener::bind(path)).await?;
Ok(UnixListener { watcher: listener })
Ok(UnixListener {
watcher: Watcher::new(listener),
})
}
/// Accepts a new incoming connection to this listener.
@ -93,14 +93,29 @@ impl UnixListener {
/// # Ok(()) }) }
/// ```
pub async fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> {
let (stream, addr) = self.watcher.accept().await?;
future::poll_fn(|cx| {
let res = futures_core::ready!(self.watcher.poll_read_with(cx, |inner| {
match inner.accept_std() {
// Converting to `WouldBlock` so that the watcher will
// add the waker of this task to a list of readers.
Ok(None) => Err(io::ErrorKind::WouldBlock.into()),
res => res,
}
}));
Ok((
UnixStream {
watcher: Arc::new(stream),
},
addr,
))
match res? {
Some((io, addr)) => {
let mio_stream = mio_uds::UnixStream::from_stream(io)?;
let stream = UnixStream {
watcher: Watcher::new(mio_stream),
};
Poll::Ready(Ok((stream, addr)))
}
// This should never happen since `None` is converted to `WouldBlock`
None => unreachable!(),
}
})
.await
}
/// Returns a stream of incoming connections.
@ -191,20 +206,19 @@ impl Stream for Incoming<'_> {
}
}
impl From<StdUnixListener> for UnixListener {
impl From<std::os::unix::net::UnixListener> for UnixListener {
/// Converts a `std::os::unix::net::UnixListener` into its asynchronous equivalent.
fn from(listener: StdUnixListener) -> UnixListener {
once_cell::sync::Lazy::force(&crate::rt::RUNTIME);
fn from(listener: std::os::unix::net::UnixListener) -> UnixListener {
let mio_listener = mio_uds::UnixListener::from_listener(listener).unwrap();
UnixListener {
watcher: Async::new(listener).expect("UnixListener is known to be good"),
watcher: Watcher::new(mio_listener),
}
}
}
impl AsRawFd for UnixListener {
fn as_raw_fd(&self) -> RawFd {
self.watcher.as_raw_fd()
self.watcher.get_ref().as_raw_fd()
}
}
@ -217,6 +231,6 @@ impl FromRawFd for UnixListener {
impl IntoRawFd for UnixListener {
fn into_raw_fd(self) -> RawFd {
self.watcher.into_raw_fd()
self.watcher.into_inner().into_raw_fd()
}
}

View file

@ -1,18 +1,18 @@
//! Unix-specific networking extensions.
use std::fmt;
use std::io::{Read as _, Write as _};
use std::net::Shutdown;
use std::os::unix::net::UnixStream as StdUnixStream;
use std::pin::Pin;
use smol::Async;
use mio_uds;
use super::SocketAddr;
use crate::io::{self, Read, Write};
use crate::rt::Watcher;
use crate::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use crate::path::Path;
use crate::sync::Arc;
use crate::task::{Context, Poll};
use crate::task::{spawn_blocking, Context, Poll};
/// A Unix stream socket.
///
@ -37,9 +37,8 @@ use crate::task::{Context, Poll};
/// #
/// # Ok(()) }) }
/// ```
#[derive(Clone)]
pub struct UnixStream {
pub(super) watcher: Arc<Async<StdUnixStream>>,
pub(super) watcher: Watcher<mio_uds::UnixStream>,
}
impl UnixStream {
@ -57,12 +56,16 @@ impl UnixStream {
/// # Ok(()) }) }
/// ```
pub async fn connect<P: AsRef<Path>>(path: P) -> io::Result<UnixStream> {
once_cell::sync::Lazy::force(&crate::rt::RUNTIME);
let path = path.as_ref().to_owned();
let stream = Arc::new(Async::<StdUnixStream>::connect(path).await?);
Ok(UnixStream { watcher: stream })
spawn_blocking(move || {
let std_stream = std::os::unix::net::UnixStream::connect(path)?;
let mio_stream = mio_uds::UnixStream::from_stream(std_stream)?;
Ok(UnixStream {
watcher: Watcher::new(mio_stream),
})
})
.await
}
/// Creates an unnamed pair of connected sockets.
@ -81,14 +84,12 @@ impl UnixStream {
/// # Ok(()) }) }
/// ```
pub fn pair() -> io::Result<(UnixStream, UnixStream)> {
once_cell::sync::Lazy::force(&crate::rt::RUNTIME);
let (a, b) = Async::<StdUnixStream>::pair()?;
let (a, b) = mio_uds::UnixStream::pair()?;
let a = UnixStream {
watcher: Arc::new(a),
watcher: Watcher::new(a),
};
let b = UnixStream {
watcher: Arc::new(b),
watcher: Watcher::new(b),
};
Ok((a, b))
}
@ -168,7 +169,7 @@ impl Read for &UnixStream {
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut &*self.watcher).poll_read(cx, buf)
self.watcher.poll_read_with(cx, |mut inner| inner.read(buf))
}
}
@ -196,15 +197,16 @@ impl Write for &UnixStream {
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut &*self.watcher).poll_write(cx, buf)
self.watcher
.poll_write_with(cx, |mut inner| inner.write(buf))
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut &*self.watcher).poll_flush(cx)
self.watcher.poll_write_with(cx, |mut inner| inner.flush())
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut &*self.watcher).poll_close(cx)
fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
}
@ -225,21 +227,19 @@ impl fmt::Debug for UnixStream {
}
}
impl From<StdUnixStream> for UnixStream {
impl From<std::os::unix::net::UnixStream> for UnixStream {
/// Converts a `std::os::unix::net::UnixStream` into its asynchronous equivalent.
fn from(stream: StdUnixStream) -> UnixStream {
once_cell::sync::Lazy::force(&crate::rt::RUNTIME);
let stream = Async::new(stream).expect("UnixStream is known to be good");
fn from(stream: std::os::unix::net::UnixStream) -> UnixStream {
let mio_stream = mio_uds::UnixStream::from_stream(stream).unwrap();
UnixStream {
watcher: Arc::new(stream),
watcher: Watcher::new(mio_stream),
}
}
}
impl AsRawFd for UnixStream {
fn as_raw_fd(&self) -> RawFd {
self.watcher.as_raw_fd()
self.watcher.get_ref().as_raw_fd()
}
}
@ -252,6 +252,6 @@ impl FromRawFd for UnixStream {
impl IntoRawFd for UnixStream {
fn into_raw_fd(self) -> RawFd {
self.as_raw_fd()
self.watcher.into_inner().into_raw_fd()
}
}

View file

@ -2,8 +2,7 @@
cfg_not_docs! {
pub use std::os::windows::io::{
AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle,
AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket,
AsRawHandle, FromRawHandle, IntoRawHandle, RawHandle, RawSocket,
};
}
@ -46,33 +45,4 @@ cfg_docs! {
/// it once it's no longer needed.
fn into_raw_handle(self) -> RawHandle;
}
/// Creates I/O objects from raw sockets.
pub trait FromRawSocket {
/// Creates a new I/O object from the given raw socket.
///
/// This function will consume ownership of the socket provided and it will be closed when the returned object goes out of scope.
///
/// This function is also unsafe as the primitives currently returned have the contract that they are the sole owner of the
/// file descriptor they are wrapping. Usage of this function could accidentally allow violating this contract which can cause
/// memory unsafety in code that relies on it being true.
unsafe fn from_raw_socket(sock: RawSocket) -> Self;
}
/// Extracts raw sockets.
pub trait AsRawSocket {
/// Extracts the underlying raw socket from this object.
fn as_raw_socket(&self) -> RawSocket;
}
/// A trait to express the ability to consume an object and acquire ownership of
/// its raw `SOCKET`.
pub trait IntoRawSocket {
/// Consumes this object, returning the raw underlying socket.
///
/// This function **transfers ownership** of the underlying socket to the
/// caller. Callers are then the unique owners of the socket and must close
/// it once it's no longer needed.
fn into_raw_socket(self) -> RawSocket;
}
}

View file

@ -4,9 +4,9 @@ use std::ffi::{OsStr, OsString};
use std::rc::Rc;
use std::sync::Arc;
use crate::fs;
use crate::io;
use crate::path::{Ancestors, Components, Display, Iter, PathBuf, StripPrefixError};
#[cfg(not(target_os = "unknown"))]
use crate::{fs, io};
/// A slice of a path.
///
@ -584,7 +584,6 @@ impl Path {
/// #
/// # Ok(()) }) }
/// ```
#[cfg(not(target_os = "unknown"))]
pub async fn metadata(&self) -> io::Result<fs::Metadata> {
fs::metadata(self).await
}
@ -608,7 +607,6 @@ impl Path {
/// #
/// # Ok(()) }) }
/// ```
#[cfg(not(target_os = "unknown"))]
pub async fn symlink_metadata(&self) -> io::Result<fs::Metadata> {
fs::symlink_metadata(self).await
}
@ -634,7 +632,6 @@ impl Path {
/// #
/// # Ok(()) }) }
/// ```
#[cfg(not(target_os = "unknown"))]
pub async fn canonicalize(&self) -> io::Result<PathBuf> {
fs::canonicalize(self).await
}
@ -657,7 +654,6 @@ impl Path {
/// #
/// # Ok(()) }) }
/// ```
#[cfg(not(target_os = "unknown"))]
pub async fn read_link(&self) -> io::Result<PathBuf> {
fs::read_link(self).await
}
@ -692,7 +688,6 @@ impl Path {
/// #
/// # Ok(()) }) }
/// ```
#[cfg(not(target_os = "unknown"))]
pub async fn read_dir(&self) -> io::Result<fs::ReadDir> {
fs::read_dir(self).await
}
@ -722,7 +717,6 @@ impl Path {
/// check errors, call [fs::metadata].
///
/// [fs::metadata]: ../fs/fn.metadata.html
#[cfg(not(target_os = "unknown"))]
pub async fn exists(&self) -> bool {
fs::metadata(self).await.is_ok()
}
@ -755,7 +749,6 @@ impl Path {
///
/// [fs::metadata]: ../fs/fn.metadata.html
/// [fs::Metadata::is_file]: ../fs/struct.Metadata.html#method.is_file
#[cfg(not(target_os = "unknown"))]
pub async fn is_file(&self) -> bool {
fs::metadata(self)
.await
@ -792,7 +785,6 @@ impl Path {
///
/// [fs::metadata]: ../fs/fn.metadata.html
/// [fs::Metadata::is_dir]: ../fs/struct.Metadata.html#method.is_dir
#[cfg(not(target_os = "unknown"))]
pub async fn is_dir(&self) -> bool {
fs::metadata(self)
.await

View file

@ -323,10 +323,7 @@ impl<P: AsRef<Path>> stream::Extend<P> for PathBuf {
fn extend<'a, S: IntoStream<Item = P> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = ()> + 'a>> {
let stream = stream.into_stream();
Box::pin(async move {
@ -340,14 +337,11 @@ impl<P: AsRef<Path>> stream::Extend<P> for PathBuf {
}
#[cfg(feature = "unstable")]
impl<'b, P: AsRef<Path> + 'b + Send> FromStream<P> for PathBuf {
impl<'b, P: AsRef<Path> + 'b> FromStream<P> for PathBuf {
#[inline]
fn from_stream<'a, S: IntoStream<Item = P> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = Self> + 'a>> {
let stream = stream.into_stream();
Box::pin(async move {

View file

@ -5,68 +5,38 @@ use crate::stream::{FromStream, IntoStream};
impl<T, E, V> FromStream<Result<T, E>> for Result<V, E>
where
T: Send,
E: Send,
V: FromStream<T>,
{
/// Takes each element in the stream: if it is an `Err`, no further
/// elements are taken, and the `Err` is returned. Should no `Err`
/// occur, a container with the values of each `Result` is returned.
///
/// # Examples
///
/// ```
/// # fn main() { async_std::task::block_on(async {
/// #
/// use async_std::prelude::*;
/// use async_std::stream;
///
/// let v = stream::from_iter(vec![1, 2]);
/// let res: Result<Vec<u32>, &'static str> = v.map(|x: u32|
/// x.checked_add(1).ok_or("Overflow!")
/// ).collect().await;
/// assert_eq!(res, Ok(vec![2, 3]));
/// #
/// # }) }
/// ```
#[inline]
fn from_stream<'a, S: IntoStream<Item = Result<T, E>> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = Self> + 'a>> {
let stream = stream.into_stream();
Box::pin(async move {
// Using `take_while` here because it is able to stop the stream early
// Using `scan` here because it is able to stop the stream early
// if a failure occurs
let mut is_error = false;
let mut found_error = None;
let out: V = stream
.take_while(|elem| {
// Stop processing the stream on `Err`
!is_error
&& (elem.is_ok() || {
is_error = true;
// Capture first `Err`
true
})
})
.filter_map(|elem| match elem {
Ok(value) => Some(value),
Err(err) => {
found_error = Some(err);
None
.scan((), |_, elem| {
match elem {
Ok(elem) => Some(elem),
Err(err) => {
found_error = Some(err);
// Stop processing the stream on error
None
}
}
})
.collect()
.await;
if is_error {
Err(found_error.unwrap())
} else {
Ok(out)
match found_error {
Some(err) => Err(err),
None => Ok(out),
}
})
}

View file

@ -1,7 +1,7 @@
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{Product, Stream};
use crate::stream::{Stream, Product};
impl<T, U, E> Product<Result<U, E>> for Result<T, E>
where
@ -36,39 +36,26 @@ where
```
"#]
fn product<'a, S>(stream: S) -> Pin<Box<dyn Future<Output = Result<T, E>> + 'a>>
where
S: Stream<Item = Result<U, E>> + 'a,
where S: Stream<Item = Result<U, E>> + 'a
{
Box::pin(async move {
// Using `take_while` here because it is able to stop the stream early
// Using `scan` here because it is able to stop the stream early
// if a failure occurs
let mut is_error = false;
let mut found_error = None;
let out = <T as Product<U>>::product(
stream
.take_while(|elem| {
// Stop processing the stream on `Err`
!is_error
&& (elem.is_ok() || {
is_error = true;
// Capture first `Err`
true
})
})
.filter_map(|elem| match elem {
Ok(value) => Some(value),
let out = <T as Product<U>>::product(stream
.scan((), |_, elem| {
match elem {
Ok(elem) => Some(elem),
Err(err) => {
found_error = Some(err);
// Stop processing the stream on error
None
}
}),
)
.await;
if is_error {
Err(found_error.unwrap())
} else {
Ok(out)
}
})).await;
match found_error {
Some(err) => Err(err),
None => Ok(out)
}
})
}

View file

@ -36,39 +36,26 @@ where
```
"#]
fn sum<'a, S>(stream: S) -> Pin<Box<dyn Future<Output = Result<T, E>> + 'a>>
where
S: Stream<Item = Result<U, E>> + 'a,
where S: Stream<Item = Result<U, E>> + 'a
{
Box::pin(async move {
// Using `take_while` here because it is able to stop the stream early
// Using `scan` here because it is able to stop the stream early
// if a failure occurs
let mut is_error = false;
let mut found_error = None;
let out = <T as Sum<U>>::sum(
stream
.take_while(|elem| {
// Stop processing the stream on `Err`
!is_error
&& (elem.is_ok() || {
is_error = true;
// Capture first `Err`
true
})
})
.filter_map(|elem| match elem {
Ok(value) => Some(value),
let out = <T as Sum<U>>::sum(stream
.scan((), |_, elem| {
match elem {
Ok(elem) => Some(elem),
Err(err) => {
found_error = Some(err);
// Stop processing the stream on error
None
}
}),
)
.await;
if is_error {
Err(found_error.unwrap())
} else {
Ok(out)
}
})).await;
match found_error {
Some(err) => Err(err),
None => Ok(out)
}
})
}

View file

@ -1,34 +1,23 @@
//! The runtime.
use std::env;
use std::thread;
use once_cell::sync::Lazy;
use crate::future;
use crate::utils::abort_on_panic;
/// Dummy runtime struct.
pub struct Runtime {}
pub use reactor::{Reactor, Watcher};
pub use runtime::Runtime;
mod reactor;
mod runtime;
/// The global runtime.
pub static RUNTIME: Lazy<Runtime> = Lazy::new(|| {
// Create an executor thread pool.
thread::Builder::new()
.name("async-std/runtime".to_string())
.spawn(|| abort_on_panic(|| RUNTIME.run()))
.expect("cannot start a runtime thread");
let thread_count = env::var("ASYNC_STD_THREAD_COUNT")
.map(|env| {
env.parse()
.expect("ASYNC_STD_THREAD_COUNT must be a number")
})
.unwrap_or_else(|_| num_cpus::get())
.max(1);
let thread_name = env::var("ASYNC_STD_THREAD_NAME").unwrap_or("async-std/runtime".to_string());
for _ in 0..thread_count {
thread::Builder::new()
.name(thread_name.clone())
.spawn(|| crate::task::block_on(future::pending::<()>()))
.expect("cannot start a runtime thread");
}
Runtime {}
Runtime::new()
});

286
src/rt/reactor.rs Normal file
View file

@ -0,0 +1,286 @@
use std::fmt;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use mio::{self, Evented};
use slab::Slab;
use crate::io;
use crate::rt::RUNTIME;
use crate::task::{Context, Poll, Waker};
/// Data associated with a registered I/O handle.
#[derive(Debug)]
struct Entry {
/// A unique identifier.
token: mio::Token,
/// Tasks that are blocked on reading from this I/O handle.
readers: Mutex<Vec<Waker>>,
/// Tasks that are blocked on writing to this I/O handle.
writers: Mutex<Vec<Waker>>,
}
/// The state of a networking driver.
pub struct Reactor {
/// A mio instance that polls for new events.
poller: mio::Poll,
/// A list into which mio stores events.
events: Mutex<mio::Events>,
/// A collection of registered I/O handles.
entries: Mutex<Slab<Arc<Entry>>>,
/// Dummy I/O handle that is only used to wake up the polling thread.
notify_reg: (mio::Registration, mio::SetReadiness),
/// An identifier for the notification handle.
notify_token: mio::Token,
}
impl Reactor {
/// Creates a new reactor for polling I/O events.
pub fn new() -> io::Result<Reactor> {
let poller = mio::Poll::new()?;
let notify_reg = mio::Registration::new2();
let mut reactor = Reactor {
poller,
events: Mutex::new(mio::Events::with_capacity(1000)),
entries: Mutex::new(Slab::new()),
notify_reg,
notify_token: mio::Token(0),
};
// Register a dummy I/O handle for waking up the polling thread.
let entry = reactor.register(&reactor.notify_reg.0)?;
reactor.notify_token = entry.token;
Ok(reactor)
}
/// Registers an I/O event source and returns its associated entry.
fn register(&self, source: &dyn Evented) -> io::Result<Arc<Entry>> {
let mut entries = self.entries.lock().unwrap();
// Reserve a vacant spot in the slab and use its key as the token value.
let vacant = entries.vacant_entry();
let token = mio::Token(vacant.key());
// Allocate an entry and insert it into the slab.
let entry = Arc::new(Entry {
token,
readers: Mutex::new(Vec::new()),
writers: Mutex::new(Vec::new()),
});
vacant.insert(entry.clone());
// Register the I/O event source in the poller.
let interest = mio::Ready::all();
let opts = mio::PollOpt::edge();
self.poller.register(source, token, interest, opts)?;
Ok(entry)
}
/// Deregisters an I/O event source associated with an entry.
fn deregister(&self, source: &dyn Evented, entry: &Entry) -> io::Result<()> {
// Deregister the I/O object from the mio instance.
self.poller.deregister(source)?;
// Remove the entry associated with the I/O object.
self.entries.lock().unwrap().remove(entry.token.0);
Ok(())
}
/// Notifies the reactor so that polling stops blocking.
pub fn notify(&self) -> io::Result<()> {
self.notify_reg.1.set_readiness(mio::Ready::readable())
}
/// Waits on the poller for new events and wakes up tasks blocked on I/O handles.
///
/// Returns `Ok(true)` if at least one new task was woken.
pub fn poll(&self, timeout: Option<Duration>) -> io::Result<bool> {
let mut events = self.events.lock().unwrap();
// Block on the poller until at least one new event comes in.
self.poller.poll(&mut events, timeout)?;
// Lock the entire entry table while we're processing new events.
let entries = self.entries.lock().unwrap();
// The number of woken tasks.
let mut progress = false;
for event in events.iter() {
let token = event.token();
if token == self.notify_token {
// If this is the notification token, we just need the notification state.
self.notify_reg.1.set_readiness(mio::Ready::empty())?;
} else {
// Otherwise, look for the entry associated with this token.
if let Some(entry) = entries.get(token.0) {
// Set the readiness flags from this I/O event.
let readiness = event.readiness();
// Wake up reader tasks blocked on this I/O handle.
let reader_interests = mio::Ready::all() - mio::Ready::writable();
if !(readiness & reader_interests).is_empty() {
for w in entry.readers.lock().unwrap().drain(..) {
w.wake();
progress = true;
}
}
// Wake up writer tasks blocked on this I/O handle.
let writer_interests = mio::Ready::all() - mio::Ready::readable();
if !(readiness & writer_interests).is_empty() {
for w in entry.writers.lock().unwrap().drain(..) {
w.wake();
progress = true;
}
}
}
}
}
Ok(progress)
}
}
/// An I/O handle powered by the networking driver.
///
/// This handle wraps an I/O event source and exposes a "futurized" interface on top of it,
/// implementing traits `AsyncRead` and `AsyncWrite`.
pub struct Watcher<T: Evented> {
/// Data associated with the I/O handle.
entry: Arc<Entry>,
/// The I/O event source.
source: Option<T>,
}
impl<T: Evented> Watcher<T> {
/// Creates a new I/O handle.
///
/// The provided I/O event source will be kept registered inside the reactor's poller for the
/// lifetime of the returned I/O handle.
pub fn new(source: T) -> Watcher<T> {
Watcher {
entry: RUNTIME
.reactor()
.register(&source)
.expect("cannot register an I/O event source"),
source: Some(source),
}
}
/// Returns a reference to the inner I/O event source.
pub fn get_ref(&self) -> &T {
self.source.as_ref().unwrap()
}
/// Polls the inner I/O source for a non-blocking read operation.
///
/// If the operation returns an error of the `io::ErrorKind::WouldBlock` kind, the current task
/// will be registered for wakeup when the I/O source becomes readable.
pub fn poll_read_with<'a, F, R>(&'a self, cx: &mut Context<'_>, mut f: F) -> Poll<io::Result<R>>
where
F: FnMut(&'a T) -> io::Result<R>,
{
// If the operation isn't blocked, return its result.
match f(self.source.as_ref().unwrap()) {
Err(err) if err.kind() == io::ErrorKind::WouldBlock => {}
res => return Poll::Ready(res),
}
// Lock the waker list.
let mut list = self.entry.readers.lock().unwrap();
// Try running the operation again.
match f(self.source.as_ref().unwrap()) {
Err(err) if err.kind() == io::ErrorKind::WouldBlock => {}
res => return Poll::Ready(res),
}
// Register the task if it isn't registered already.
if list.iter().all(|w| !w.will_wake(cx.waker())) {
list.push(cx.waker().clone());
}
Poll::Pending
}
/// Polls the inner I/O source for a non-blocking write operation.
///
/// If the operation returns an error of the `io::ErrorKind::WouldBlock` kind, the current task
/// will be registered for wakeup when the I/O source becomes writable.
pub fn poll_write_with<'a, F, R>(
&'a self,
cx: &mut Context<'_>,
mut f: F,
) -> Poll<io::Result<R>>
where
F: FnMut(&'a T) -> io::Result<R>,
{
// If the operation isn't blocked, return its result.
match f(self.source.as_ref().unwrap()) {
Err(err) if err.kind() == io::ErrorKind::WouldBlock => {}
res => return Poll::Ready(res),
}
// Lock the waker list.
let mut list = self.entry.writers.lock().unwrap();
// Try running the operation again.
match f(self.source.as_ref().unwrap()) {
Err(err) if err.kind() == io::ErrorKind::WouldBlock => {}
res => return Poll::Ready(res),
}
// Register the task if it isn't registered already.
if list.iter().all(|w| !w.will_wake(cx.waker())) {
list.push(cx.waker().clone());
}
Poll::Pending
}
/// Deregisters and returns the inner I/O source.
///
/// This method is typically used to convert `Watcher`s to raw file descriptors/handles.
#[allow(dead_code)]
pub fn into_inner(mut self) -> T {
let source = self.source.take().unwrap();
RUNTIME
.reactor()
.deregister(&source, &self.entry)
.expect("cannot deregister I/O event source");
source
}
}
impl<T: Evented> Drop for Watcher<T> {
fn drop(&mut self) {
if let Some(ref source) = self.source {
RUNTIME
.reactor()
.deregister(source, &self.entry)
.expect("cannot deregister I/O event source");
}
}
}
impl<T: Evented + fmt::Debug> fmt::Debug for Watcher<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Watcher")
.field("entry", &self.entry)
.field("source", &self.source)
.finish()
}
}

449
src/rt/runtime.rs Normal file
View file

@ -0,0 +1,449 @@
use std::cell::Cell;
use std::io;
use std::iter;
use std::ptr;
use std::sync::atomic::{self, AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;
use crossbeam_deque::{Injector, Steal, Stealer, Worker};
use crossbeam_utils::thread::scope;
use once_cell::unsync::OnceCell;
use crate::rt::Reactor;
use crate::task::Runnable;
use crate::utils::{abort_on_panic, random, Spinlock};
thread_local! {
/// A reference to the current machine, if the current thread runs tasks.
static MACHINE: OnceCell<Arc<Machine>> = OnceCell::new();
/// This flag is set to true whenever `task::yield_now()` is invoked.
static YIELD_NOW: Cell<bool> = Cell::new(false);
}
/// Scheduler state.
struct Scheduler {
/// Set to `true` every time before a machine blocks polling the reactor.
progress: bool,
/// Set to `true` while a machine is polling the reactor.
polling: bool,
/// Idle processors.
processors: Vec<Processor>,
/// Running machines.
machines: Vec<Arc<Machine>>,
}
/// An async runtime.
pub struct Runtime {
/// The reactor.
reactor: Reactor,
/// The global queue of tasks.
injector: Injector<Runnable>,
/// Handles to local queues for stealing work.
stealers: Vec<Stealer<Runnable>>,
/// The scheduler state.
sched: Mutex<Scheduler>,
}
impl Runtime {
/// Creates a new runtime.
pub fn new() -> Runtime {
let cpus = num_cpus::get().max(1);
let processors: Vec<_> = (0..cpus).map(|_| Processor::new()).collect();
let stealers = processors.iter().map(|p| p.worker.stealer()).collect();
Runtime {
reactor: Reactor::new().unwrap(),
injector: Injector::new(),
stealers,
sched: Mutex::new(Scheduler {
processors,
machines: Vec::new(),
progress: false,
polling: false,
}),
}
}
/// Returns a reference to the reactor.
pub fn reactor(&self) -> &Reactor {
&self.reactor
}
/// Flushes the task slot so that tasks get run more fairly.
pub fn yield_now(&self) {
YIELD_NOW.with(|flag| flag.set(true));
}
/// Schedules a task.
pub fn schedule(&self, task: Runnable) {
MACHINE.with(|machine| {
// If the current thread is a worker thread, schedule it onto the current machine.
// Otherwise, push it into the global task queue.
match machine.get() {
None => {
self.injector.push(task);
self.notify();
}
Some(m) => m.schedule(&self, task),
}
});
}
/// Runs the runtime on the current thread.
pub fn run(&self) {
scope(|s| {
let mut idle = 0;
let mut delay = 0;
loop {
// Get a list of new machines to start, if any need to be started.
for m in self.make_machines() {
idle = 0;
s.builder()
.name("async-std/machine".to_string())
.spawn(move |_| {
abort_on_panic(|| {
let _ = MACHINE.with(|machine| machine.set(m.clone()));
m.run(self);
})
})
.expect("cannot start a machine thread");
}
// Sleep for a bit longer if the scheduler state hasn't changed in a while.
if idle > 10 {
delay = (delay * 2).min(10_000);
} else {
idle += 1;
delay = 1000;
}
thread::sleep(Duration::from_micros(delay));
}
})
.unwrap();
}
/// Returns a list of machines that need to be started.
fn make_machines(&self) -> Vec<Arc<Machine>> {
let mut sched = self.sched.lock().unwrap();
let mut to_start = Vec::new();
// If there is a machine that is stuck on a task and not making any progress, steal its
// processor and set up a new machine to take over.
for m in &mut sched.machines {
if !m.progress.swap(false, Ordering::SeqCst) {
let opt_p = m.processor.try_lock().and_then(|mut p| p.take());
if let Some(p) = opt_p {
*m = Arc::new(Machine::new(p));
to_start.push(m.clone());
}
}
}
// If no machine has been polling the reactor in a while, that means the runtime is
// overloaded with work and we need to start another machine.
if !sched.polling {
if !sched.progress {
if let Some(p) = sched.processors.pop() {
let m = Arc::new(Machine::new(p));
to_start.push(m.clone());
sched.machines.push(m);
}
}
sched.progress = false;
}
to_start
}
/// Unparks a thread polling the reactor.
fn notify(&self) {
atomic::fence(Ordering::SeqCst);
self.reactor.notify().unwrap();
}
/// Attempts to poll the reactor without blocking on it.
///
/// Returns `Ok(true)` if at least one new task was woken.
///
/// This function might not poll the reactor at all so do not rely on it doing anything. Only
/// use for optimization.
fn quick_poll(&self) -> io::Result<bool> {
if let Ok(sched) = self.sched.try_lock() {
if !sched.polling {
return self.reactor.poll(Some(Duration::from_secs(0)));
}
}
Ok(false)
}
}
/// A thread running a processor.
struct Machine {
/// Holds the processor until it gets stolen.
processor: Spinlock<Option<Processor>>,
/// Gets set to `true` before running every task to indicate the machine is not stuck.
progress: AtomicBool,
}
impl Machine {
/// Creates a new machine running a processor.
fn new(p: Processor) -> Machine {
Machine {
processor: Spinlock::new(Some(p)),
progress: AtomicBool::new(true),
}
}
/// Schedules a task onto the machine.
fn schedule(&self, rt: &Runtime, task: Runnable) {
match self.processor.lock().as_mut() {
None => {
rt.injector.push(task);
rt.notify();
}
Some(p) => p.schedule(rt, task),
}
}
/// Finds the next runnable task.
fn find_task(&self, rt: &Runtime) -> Steal<Runnable> {
let mut retry = false;
// First try finding a task in the local queue or in the global queue.
if let Some(p) = self.processor.lock().as_mut() {
if let Some(task) = p.pop_task() {
return Steal::Success(task);
}
match p.steal_from_global(rt) {
Steal::Empty => {}
Steal::Retry => retry = true,
Steal::Success(task) => return Steal::Success(task),
}
}
// Try polling the reactor, but don't block on it.
let progress = rt.quick_poll().unwrap();
// Try finding a task in the local queue, which might hold tasks woken by the reactor. If
// the local queue is still empty, try stealing from other processors.
if let Some(p) = self.processor.lock().as_mut() {
if progress {
if let Some(task) = p.pop_task() {
return Steal::Success(task);
}
}
match p.steal_from_others(rt) {
Steal::Empty => {}
Steal::Retry => retry = true,
Steal::Success(task) => return Steal::Success(task),
}
}
if retry { Steal::Retry } else { Steal::Empty }
}
/// Runs the machine on the current thread.
fn run(&self, rt: &Runtime) {
/// Number of yields when no runnable task is found.
const YIELDS: u32 = 3;
/// Number of short sleeps when no runnable task in found.
const SLEEPS: u32 = 10;
/// Number of runs in a row before the global queue is inspected.
const RUNS: u32 = 64;
// The number of times the thread found work in a row.
let mut runs = 0;
// The number of times the thread didn't find work in a row.
let mut fails = 0;
loop {
// let the scheduler know this machine is making progress.
self.progress.store(true, Ordering::SeqCst);
// Check if `task::yield_now()` was invoked and flush the slot if so.
YIELD_NOW.with(|flag| {
if flag.replace(false) {
if let Some(p) = self.processor.lock().as_mut() {
p.flush_slot(rt);
}
}
});
// After a number of runs in a row, do some work to ensure no task is left behind
// indefinitely. Poll the reactor, steal tasks from the global queue, and flush the
// task slot.
if runs >= RUNS {
runs = 0;
rt.quick_poll().unwrap();
if let Some(p) = self.processor.lock().as_mut() {
if let Steal::Success(task) = p.steal_from_global(rt) {
p.schedule(rt, task);
}
p.flush_slot(rt);
}
}
// Try to find a runnable task.
if let Steal::Success(task) = self.find_task(rt) {
task.run();
runs += 1;
fails = 0;
continue;
}
fails += 1;
// Check if the processor was stolen.
if self.processor.lock().is_none() {
break;
}
// Yield the current thread a few times.
if fails <= YIELDS {
thread::yield_now();
continue;
}
// Put the current thread to sleep a few times.
if fails <= YIELDS + SLEEPS {
let opt_p = self.processor.lock().take();
thread::sleep(Duration::from_micros(10));
*self.processor.lock() = opt_p;
continue;
}
let mut sched = rt.sched.lock().unwrap();
// One final check for available tasks while the scheduler is locked.
if let Some(task) = iter::repeat_with(|| self.find_task(rt))
.find(|s| !s.is_retry())
.and_then(|s| s.success())
{
self.schedule(rt, task);
continue;
}
// If another thread is already blocked on the reactor, there is no point in keeping
// the current thread around since there is too little work to do.
if sched.polling {
break;
}
// Take out the machine associated with the current thread.
let m = match sched
.machines
.iter()
.position(|elem| ptr::eq(&**elem, self))
{
None => break, // The processor was stolen.
Some(pos) => sched.machines.swap_remove(pos),
};
// Unlock the schedule poll the reactor until new I/O events arrive.
sched.polling = true;
drop(sched);
rt.reactor.poll(None).unwrap();
// Lock the scheduler again and re-register the machine.
sched = rt.sched.lock().unwrap();
sched.polling = false;
sched.machines.push(m);
sched.progress = true;
runs = 0;
fails = 0;
}
// When shutting down the thread, take the processor out if still available.
let opt_p = self.processor.lock().take();
// Return the processor to the scheduler and remove the machine.
if let Some(p) = opt_p {
let mut sched = rt.sched.lock().unwrap();
sched.processors.push(p);
sched.machines.retain(|elem| !ptr::eq(&**elem, self));
}
}
}
struct Processor {
/// The local task queue.
worker: Worker<Runnable>,
/// Contains the next task to run as an optimization that skips the queue.
slot: Option<Runnable>,
}
impl Processor {
/// Creates a new processor.
fn new() -> Processor {
Processor {
worker: Worker::new_fifo(),
slot: None,
}
}
/// Schedules a task to run on this processor.
fn schedule(&mut self, rt: &Runtime, task: Runnable) {
match self.slot.replace(task) {
None => {}
Some(task) => {
self.worker.push(task);
rt.notify();
}
}
}
/// Flushes a task from the slot into the local queue.
fn flush_slot(&mut self, rt: &Runtime) {
if let Some(task) = self.slot.take() {
self.worker.push(task);
rt.notify();
}
}
/// Pops a task from this processor.
fn pop_task(&mut self) -> Option<Runnable> {
self.slot.take().or_else(|| self.worker.pop())
}
/// Steals a task from the global queue.
fn steal_from_global(&mut self, rt: &Runtime) -> Steal<Runnable> {
rt.injector.steal_batch_and_pop(&self.worker)
}
/// Steals a task from other processors.
fn steal_from_others(&mut self, rt: &Runtime) -> Steal<Runnable> {
// Pick a random starting point in the list of queues.
let len = rt.stealers.len();
let start = random(len as u32) as usize;
// Create an iterator over stealers that starts from the chosen point.
let (l, r) = rt.stealers.split_at(start);
let stealers = r.iter().chain(l.iter());
// Try stealing a batch of tasks from each queue.
stealers
.map(|s| s.steal_batch_and_pop(&self.worker))
.collect()
}
}

View file

@ -0,0 +1,24 @@
use crate::stream::Stream;
use std::pin::Pin;
use std::task::{Context, Poll};
/// A stream able to yield elements from both ends.
///
/// Something that implements `DoubleEndedStream` has one extra capability
/// over something that implements [`Stream`]: the ability to also take
/// `Item`s from the back, as well as the front.
///
/// [`Stream`]: trait.Stream.html
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
pub trait DoubleEndedStream: Stream {
/// Removes and returns an element from the end of the stream.
///
/// Returns `None` when there are no more elements.
///
/// The [trait-level] docs contain more details.
///
/// [trait-level]: trait.DoubleEndedStream.html
fn poll_next_back(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>>;
}

View file

@ -1,246 +0,0 @@
use crate::stream::Stream;
use std::pin::Pin;
use std::task::{Context, Poll};
mod next_back;
mod nth_back;
mod rfind;
mod rfold;
mod try_rfold;
use next_back::NextBackFuture;
use nth_back::NthBackFuture;
use rfind::RFindFuture;
use rfold::RFoldFuture;
use try_rfold::TryRFoldFuture;
/// A stream able to yield elements from both ends.
///
/// Something that implements `DoubleEndedStream` has one extra capability
/// over something that implements [`Stream`]: the ability to also take
/// `Item`s from the back, as well as the front.
///
/// [`Stream`]: trait.Stream.html
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
pub trait DoubleEndedStream: Stream {
#[doc = r#"
Attempts to receive the next item from the back of the stream.
There are several possible return values:
* `Poll::Pending` means this stream's next_back value is not ready yet.
* `Poll::Ready(None)` means this stream has been exhausted.
* `Poll::Ready(Some(item))` means `item` was received out of the stream.
# Examples
```
# fn main() { async_std::task::block_on(async {
#
use std::pin::Pin;
use async_std::prelude::*;
use async_std::stream;
use async_std::task::{Context, Poll};
fn increment(
s: impl DoubleEndedStream<Item = i32> + Unpin,
) -> impl DoubleEndedStream<Item = i32> + Unpin {
struct Increment<S>(S);
impl<S: DoubleEndedStream<Item = i32> + Unpin> Stream for Increment<S> {
type Item = S::Item;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
match Pin::new(&mut self.0).poll_next(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(None) => Poll::Ready(None),
Poll::Ready(Some(item)) => Poll::Ready(Some(item + 1)),
}
}
}
impl<S: DoubleEndedStream<Item = i32> + Unpin> DoubleEndedStream for Increment<S> {
fn poll_next_back(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
match Pin::new(&mut self.0).poll_next_back(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(None) => Poll::Ready(None),
Poll::Ready(Some(item)) => Poll::Ready(Some(item + 1)),
}
}
}
Increment(s)
}
let mut s = increment(stream::once(7));
assert_eq!(s.next_back().await, Some(8));
assert_eq!(s.next_back().await, None);
#
# }) }
```
"#]
fn poll_next_back(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>>;
#[doc = r#"
Advances the stream and returns the next value.
Returns [`None`] when iteration is finished. Individual stream implementations may
choose to resume iteration, and so calling `next()` again may or may not eventually
start returning more values.
[`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None
# Examples
```
# fn main() { async_std::task::block_on(async {
#
use async_std::prelude::*;
use async_std::stream;
let mut s = stream::from_iter(vec![7u8]);
assert_eq!(s.next_back().await, Some(7));
assert_eq!(s.next_back().await, None);
#
# }) }
```
"#]
fn next_back(&mut self) -> NextBackFuture<'_, Self>
where
Self: Unpin,
{
NextBackFuture { stream: self }
}
#[doc = r#"
Returns the nth element from the back of the stream.
# Examples
Basic usage:
```
# fn main() { async_std::task::block_on(async {
#
use async_std::prelude::*;
use async_std::stream;
let mut s = stream::from_iter(vec![1u8, 2, 3, 4, 5]);
let second = s.nth_back(1).await;
assert_eq!(second, Some(4));
#
# }) }
```
"#]
fn nth_back(&mut self, n: usize) -> NthBackFuture<'_, Self>
where
Self: Unpin + Sized,
{
NthBackFuture::new(self, n)
}
#[doc = r#"
Returns the the frist element from the right that matches the predicate.
# Examples
Basic usage:
```
# fn main() { async_std::task::block_on(async {
#
use async_std::prelude::*;
use async_std::stream;
let mut s = stream::from_iter(vec![1u8, 2, 3, 4, 5]);
let second = s.rfind(|v| v % 2 == 0).await;
assert_eq!(second, Some(4));
#
# }) }
```
"#]
fn rfind<P>(&mut self, p: P) -> RFindFuture<'_, Self, P>
where
Self: Unpin + Sized,
P: FnMut(&Self::Item) -> bool,
{
RFindFuture::new(self, p)
}
#[doc = r#"
# Examples
Basic usage:
```
# fn main() { async_std::task::block_on(async {
#
use async_std::prelude::*;
use async_std::stream;
let s = stream::from_iter(vec![1u8, 2, 3, 4, 5]);
let second = s.rfold(0, |acc, v| v + acc).await;
assert_eq!(second, 15);
#
# }) }
```
"#]
fn rfold<B, F>(self, accum: B, f: F) -> RFoldFuture<Self, F, B>
where
Self: Sized,
F: FnMut(B, Self::Item) -> B,
{
RFoldFuture::new(self, accum, f)
}
#[doc = r#"
A combinator that applies a function as long as it returns successfully, producing a single, final value.
Immediately returns the error when the function returns unsuccessfully.
# Examples
Basic usage:
```
# fn main() { async_std::task::block_on(async {
#
use async_std::prelude::*;
use async_std::stream;
let s = stream::from_iter(vec![1u8, 2, 3, 4, 5]);
let sum = s.try_rfold(0, |acc, v| {
if (acc+v) % 2 == 1 {
Ok(v+3)
} else {
Err("fail")
}
}).await;
assert_eq!(sum, Err("fail"));
#
# }) }
```
"#]
fn try_rfold<B, F, E>(self, accum: B, f: F) -> TryRFoldFuture<Self, F, B>
where
Self: Sized,
F: FnMut(B, Self::Item) -> Result<B, E>,
{
TryRFoldFuture::new(self, accum, f)
}
}

View file

@ -1,19 +0,0 @@
use core::pin::Pin;
use core::future::Future;
use crate::stream::DoubleEndedStream;
use crate::task::{Context, Poll};
#[doc(hidden)]
#[allow(missing_debug_implementations)]
pub struct NextBackFuture<'a, T: Unpin + ?Sized> {
pub(crate) stream: &'a mut T,
}
impl<T: DoubleEndedStream + Unpin + ?Sized> Future for NextBackFuture<'_, T> {
type Output = Option<T::Item>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut *self.stream).poll_next_back(cx)
}
}

View file

@ -1,41 +0,0 @@
use core::future::Future;
use core::pin::Pin;
use core::task::{Context, Poll};
use crate::stream::DoubleEndedStream;
#[doc(hidden)]
#[allow(missing_debug_implementations)]
pub struct NthBackFuture<'a, S> {
stream: &'a mut S,
n: usize,
}
impl<'a, S> NthBackFuture<'a, S> {
pub(crate) fn new(stream: &'a mut S, n: usize) -> Self {
NthBackFuture { stream, n }
}
}
impl<'a, S> Future for NthBackFuture<'a, S>
where
S: DoubleEndedStream + Sized + Unpin,
{
type Output = Option<S::Item>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let next = futures_core::ready!(Pin::new(&mut *self.stream).poll_next_back(cx));
match next {
Some(v) => match self.n {
0 => Poll::Ready(Some(v)),
_ => {
self.n -= 1;
cx.waker().wake_by_ref();
Poll::Pending
}
},
None => Poll::Ready(None),
}
}
}

View file

@ -1,41 +0,0 @@
use core::task::{Context, Poll};
use core::future::Future;
use core::pin::Pin;
use crate::stream::DoubleEndedStream;
#[doc(hidden)]
#[allow(missing_debug_implementations)]
pub struct RFindFuture<'a, S, P> {
stream: &'a mut S,
p: P,
}
impl<'a, S, P> RFindFuture<'a, S, P> {
pub(super) fn new(stream: &'a mut S, p: P) -> Self {
RFindFuture { stream, p }
}
}
impl<S: Unpin, P> Unpin for RFindFuture<'_, S, P> {}
impl<'a, S, P> Future for RFindFuture<'a, S, P>
where
S: DoubleEndedStream + Unpin + Sized,
P: FnMut(&S::Item) -> bool,
{
type Output = Option<S::Item>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let item = futures_core::ready!(Pin::new(&mut *self.stream).poll_next_back(cx));
match item {
Some(v) if (&mut self.p)(&v) => Poll::Ready(Some(v)),
Some(_) => {
cx.waker().wake_by_ref();
Poll::Pending
}
None => Poll::Ready(None),
}
}
}

View file

@ -1,52 +0,0 @@
use core::future::Future;
use core::pin::Pin;
use core::task::{Context, Poll};
use pin_project_lite::pin_project;
use crate::stream::DoubleEndedStream;
pin_project! {
#[doc(hidden)]
#[allow(missing_debug_implementations)]
pub struct RFoldFuture<S, F, B> {
#[pin]
stream: S,
f: F,
acc: Option<B>,
}
}
impl<S, F, B> RFoldFuture<S, F, B> {
pub(super) fn new(stream: S, init: B, f: F) -> Self {
RFoldFuture {
stream,
f,
acc: Some(init),
}
}
}
impl<S, F, B> Future for RFoldFuture<S, F, B>
where
S: DoubleEndedStream + Sized,
F: FnMut(B, S::Item) -> B,
{
type Output = B;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
loop {
let next = futures_core::ready!(this.stream.as_mut().poll_next_back(cx));
match next {
Some(v) => {
let old = this.acc.take().unwrap();
let new = (this.f)(old, v);
*this.acc = Some(new);
}
None => return Poll::Ready(this.acc.take().unwrap()),
}
}
}
}

View file

@ -1,56 +0,0 @@
use crate::future::Future;
use core::pin::Pin;
use crate::task::{Context, Poll};
use pin_project_lite::pin_project;
use crate::stream::DoubleEndedStream;
pin_project! {
#[doc(hidden)]
#[allow(missing_debug_implementations)]
pub struct TryRFoldFuture<S, F, T> {
#[pin]
stream: S,
f: F,
acc: Option<T>,
}
}
impl<S, F, T> TryRFoldFuture<S, F, T> {
pub(super) fn new(stream: S, init: T, f: F) -> Self {
TryRFoldFuture {
stream,
f,
acc: Some(init),
}
}
}
impl<S, F, T, E> Future for TryRFoldFuture<S, F, T>
where
S: DoubleEndedStream + Unpin,
F: FnMut(T, S::Item) -> Result<T, E>,
{
type Output = Result<T, E>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
loop {
let next = futures_core::ready!(this.stream.as_mut().poll_next_back(cx));
match next {
Some(v) => {
let old = this.acc.take().unwrap();
let new = (this.f)(old, v);
match new {
Ok(o) => *this.acc = Some(o),
Err(e) => return Poll::Ready(Err(e)),
}
}
None => return Poll::Ready(Ok(this.acc.take().unwrap())),
}
}
}
}

View file

@ -1,5 +1,5 @@
use core::marker::PhantomData;
use core::pin::Pin;
use std::marker::PhantomData;
use std::pin::Pin;
use crate::stream::Stream;
use crate::task::{Context, Poll};

View file

@ -1,6 +1,6 @@
use core::pin::Pin;
use core::future::Future;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::IntoStream;
/// Extends a collection with the contents of a stream.
@ -34,9 +34,7 @@ pub trait Extend<A> {
fn extend<'a, T: IntoStream<Item = A> + 'a>(
&'a mut self,
stream: T,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<T as IntoStream>::IntoStream: Send;
) -> Pin<Box<dyn Future<Output = ()> + 'a>>;
}
/// Extends a collection with the contents of a stream.
@ -71,7 +69,6 @@ pub async fn extend<'a, C, T, S>(collection: &mut C, stream: S)
where
C: Extend<T>,
S: IntoStream<Item = T> + 'a,
<S as IntoStream>::IntoStream: Send,
{
Extend::extend(collection, stream).await
}

View file

@ -1,4 +1,4 @@
use core::pin::Pin;
use std::pin::Pin;
use crate::stream::Stream;
use crate::task::{Context, Poll};

View file

@ -1,10 +1,8 @@
use core::pin::Pin;
use std::pin::Pin;
use pin_project_lite::pin_project;
use crate::stream::Stream;
#[cfg(feature = "unstable")]
use crate::stream::double_ended_stream::DoubleEndedStream;
use crate::task::{Context, Poll};
pin_project! {
@ -53,10 +51,3 @@ impl<I: Iterator> Stream for FromIter<I> {
Poll::Ready(self.iter.next())
}
}
#[cfg(feature = "unstable")]
impl<T: DoubleEndedIterator> DoubleEndedStream for FromIter<T> {
fn poll_next_back(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Option<T::Item>> {
Poll::Ready(self.iter.next_back())
}
}

View file

@ -1,5 +1,5 @@
use core::future::Future;
use core::pin::Pin;
use std::future::Future;
use std::pin::Pin;
use crate::stream::IntoStream;
@ -72,10 +72,7 @@ use crate::stream::IntoStream;
/// impl FromStream<i32> for MyCollection {
/// fn from_stream<'a, S: IntoStream<Item = i32> + 'a>(
/// stream: S,
/// ) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
/// where
/// <S as IntoStream>::IntoStream: Send,
/// {
/// ) -> Pin<Box<dyn Future<Output = Self> + 'a>> {
/// let stream = stream.into_stream();
///
/// Box::pin(async move {
@ -110,12 +107,12 @@ use crate::stream::IntoStream;
/// assert_eq!(c.0, vec![5, 5, 5, 5, 5]);
/// #
/// # Ok(()) }) }
/// ```
///```
///
/// [`IntoStream`]: trait.IntoStream.html
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
pub trait FromStream<T: Send> {
pub trait FromStream<T> {
/// Creates a value from a stream.
///
/// # Examples
@ -138,7 +135,5 @@ pub trait FromStream<T: Send> {
/// ```
fn from_stream<'a, S: IntoStream<Item = T> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send;
) -> Pin<Box<dyn Future<Output = Self> + 'a>>;
}

View file

@ -1,10 +1,10 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use std::time::{Duration, Instant};
use crate::stream::Stream;
use crate::utils::{timer_after, Timer};
use futures_timer::Delay;
use crate::prelude::*;
/// Creates a new stream that yields at a set interval.
///
@ -45,7 +45,7 @@ use crate::utils::{timer_after, Timer};
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
pub fn interval(dur: Duration) -> Interval {
Interval {
delay: timer_after(dur),
delay: Delay::new(dur),
interval: dur,
}
}
@ -60,7 +60,7 @@ pub fn interval(dur: Duration) -> Interval {
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
#[derive(Debug)]
pub struct Interval {
delay: Timer,
delay: Delay,
interval: Duration,
}
@ -71,8 +71,125 @@ impl Stream for Interval {
if Pin::new(&mut self.delay).poll(cx).is_pending() {
return Poll::Pending;
}
let interval = self.interval;
let _ = std::mem::replace(&mut self.delay, timer_after(interval));
let when = Instant::now();
let next = next_interval(when, Instant::now(), self.interval);
self.delay.reset(next);
Poll::Ready(Some(()))
}
}
/// Converts Duration object to raw nanoseconds if possible
///
/// This is useful to divide intervals.
///
/// While technically for large duration it's impossible to represent any
/// duration as nanoseconds, the largest duration we can represent is about
/// 427_000 years. Large enough for any interval we would use or calculate in
/// tokio.
fn duration_to_nanos(dur: Duration) -> Option<u64> {
dur.as_secs()
.checked_mul(1_000_000_000)
.and_then(|v| v.checked_add(u64::from(dur.subsec_nanos())))
}
fn next_interval(prev: Instant, now: Instant, interval: Duration) -> Instant {
let new = prev + interval;
if new > now {
return new;
}
let spent_ns = duration_to_nanos(now.duration_since(prev)).expect("interval should be expired");
let interval_ns =
duration_to_nanos(interval).expect("interval is less that 427 thousand years");
let mult = spent_ns / interval_ns + 1;
assert!(
mult < (1 << 32),
"can't skip more than 4 billion intervals of {:?} \
(trying to skip {})",
interval,
mult
);
prev + interval * (mult as u32)
}
#[cfg(test)]
mod test {
use super::next_interval;
use std::cmp::Ordering;
use std::time::{Duration, Instant};
struct Timeline(Instant);
impl Timeline {
fn new() -> Timeline {
Timeline(Instant::now())
}
fn at(&self, millis: u64) -> Instant {
self.0 + Duration::from_millis(millis)
}
fn at_ns(&self, sec: u64, nanos: u32) -> Instant {
self.0 + Duration::new(sec, nanos)
}
}
fn dur(millis: u64) -> Duration {
Duration::from_millis(millis)
}
// The math around Instant/Duration isn't 100% precise due to rounding
// errors, see #249 for more info
fn almost_eq(a: Instant, b: Instant) -> bool {
match a.cmp(&b) {
Ordering::Equal => true,
Ordering::Greater => a - b < Duration::from_millis(1),
Ordering::Less => b - a < Duration::from_millis(1),
}
}
#[test]
fn norm_next() {
let tm = Timeline::new();
assert!(almost_eq(
next_interval(tm.at(1), tm.at(2), dur(10)),
tm.at(11)
));
assert!(almost_eq(
next_interval(tm.at(7777), tm.at(7788), dur(100)),
tm.at(7877)
));
assert!(almost_eq(
next_interval(tm.at(1), tm.at(1000), dur(2100)),
tm.at(2101)
));
}
#[test]
fn fast_forward() {
let tm = Timeline::new();
assert!(almost_eq(
next_interval(tm.at(1), tm.at(1000), dur(10)),
tm.at(1001)
));
assert!(almost_eq(
next_interval(tm.at(7777), tm.at(8888), dur(100)),
tm.at(8977)
));
assert!(almost_eq(
next_interval(tm.at(1), tm.at(10000), dur(2100)),
tm.at(10501)
));
}
/// TODO: this test actually should be successful, but since we can't
/// multiply Duration on anything larger than u32 easily we decided
/// to allow it to fail for now
#[test]
#[should_panic(expected = "can't skip more than 4 billion intervals")]
fn large_skip() {
let tm = Timeline::new();
assert_eq!(
next_interval(tm.at_ns(0, 1), tm.at_ns(25, 0), Duration::new(0, 2)),
tm.at_ns(25, 1)
);
}
}

View file

@ -325,7 +325,6 @@ cfg_unstable! {
mod fused_stream;
mod interval;
mod into_stream;
mod pending;
mod product;
mod successors;
mod sum;
@ -337,7 +336,6 @@ cfg_unstable! {
pub use fused_stream::FusedStream;
pub use interval::{interval, Interval};
pub use into_stream::IntoStream;
pub use pending::{pending, Pending};
pub use product::Product;
pub use stream::Merge;
pub use successors::{successors, Successors};

View file

@ -1,13 +1,10 @@
use core::pin::Pin;
use std::pin::Pin;
use pin_project_lite::pin_project;
use crate::stream::Stream;
use crate::task::{Context, Poll};
#[cfg(feature = "unstable")]
use crate::stream::DoubleEndedStream;
/// Creates a stream that yields a single item.
///
/// # Examples
@ -49,10 +46,3 @@ impl<T> Stream for Once<T> {
Poll::Ready(self.project().value.take())
}
}
#[cfg(feature = "unstable")]
impl <T> DoubleEndedStream for Once<T> {
fn poll_next_back(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
Poll::Ready(self.project().value.take())
}
}

View file

@ -1,68 +0,0 @@
use core::marker::PhantomData;
use core::pin::Pin;
use core::task::{Context, Poll};
use crate::stream::{DoubleEndedStream, ExactSizeStream, FusedStream, Stream};
/// A stream that never returns any items.
///
/// This stream is created by the [`pending`] function. See its
/// documentation for more.
///
/// [`pending`]: fn.pending.html
#[derive(Debug)]
pub struct Pending<T> {
_marker: PhantomData<T>,
}
/// Creates a stream that never returns any items.
///
/// The returned stream will always return `Pending` when polled.
/// # Examples
///
/// ```
/// # async_std::task::block_on(async {
/// #
/// use std::time::Duration;
///
/// use async_std::prelude::*;
/// use async_std::stream;
///
/// let dur = Duration::from_millis(100);
/// let mut s = stream::pending::<()>().timeout(dur);
///
/// let item = s.next().await;
///
/// assert!(item.is_some());
/// assert!(item.unwrap().is_err());
///
/// #
/// # })
/// ```
pub fn pending<T>() -> Pending<T> {
Pending {
_marker: PhantomData,
}
}
impl<T> Stream for Pending<T> {
type Item = T;
fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<T>> {
Poll::Pending
}
}
impl<T> DoubleEndedStream for Pending<T> {
fn poll_next_back(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<T>> {
Poll::Pending
}
}
impl<T> FusedStream for Pending<T> {}
impl<T> ExactSizeStream for Pending<T> {
fn len(&self) -> usize {
0
}
}

View file

@ -1,5 +1,5 @@
use core::pin::Pin;
use core::future::Future;
use std::pin::Pin;
use std::future::Future;
use crate::stream::Stream;

View file

@ -1,4 +1,4 @@
use core::pin::Pin;
use std::pin::Pin;
use crate::stream::Stream;
use crate::task::{Context, Poll};

View file

@ -1,4 +1,4 @@
use core::pin::Pin;
use std::pin::Pin;
use crate::stream::Stream;
use crate::task::{Context, Poll};

View file

@ -1,6 +1,6 @@
use core::marker::PhantomData;
use core::pin::Pin;
use core::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::future::Future;
use crate::stream::Stream;
use crate::task::{Context, Poll};

View file

@ -1,6 +1,6 @@
use core::marker::PhantomData;
use core::pin::Pin;
use core::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::future::Future;
use crate::stream::Stream;
use crate::task::{Context, Poll};

View file

@ -1,10 +1,9 @@
use core::pin::Pin;
use std::pin::Pin;
use pin_project_lite::pin_project;
use super::fuse::Fuse;
use crate::stream::stream::StreamExt;
use crate::stream::Stream;
use crate::prelude::*;
use crate::task::{Context, Poll};
pin_project! {

View file

@ -1,7 +1,7 @@
use crate::stream::Stream;
use crate::task::{Context, Poll};
use pin_project_lite::pin_project;
use core::pin::Pin;
use std::pin::Pin;
pin_project! {
/// A stream that clones the elements of an underlying stream.

View file

@ -1,11 +1,11 @@
use core::cmp::Ordering;
use core::future::Future;
use core::pin::Pin;
use std::cmp::Ordering;
use std::pin::Pin;
use std::future::Future;
use pin_project_lite::pin_project;
use super::fuse::Fuse;
use crate::stream::stream::StreamExt;
use crate::prelude::*;
use crate::stream::Stream;
use crate::task::{Context, Poll};

View file

@ -1,7 +1,7 @@
use crate::stream::Stream;
use crate::task::{Context, Poll};
use pin_project_lite::pin_project;
use core::pin::Pin;
use std::pin::Pin;
pin_project! {
/// A stream that copies the elements of an underlying stream.

View file

@ -1,5 +1,5 @@
use core::future::Future;
use core::pin::Pin;
use std::future::Future;
use std::pin::Pin;
use pin_project_lite::pin_project;
@ -9,7 +9,7 @@ use crate::task::{Context, Poll};
pin_project! {
#[doc(hidden)]
#[allow(missing_debug_implementations)]
#[cfg(feature = "unstable")]
#[cfg(all(feature = "default", feature = "unstable"))]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
pub struct CountFuture<S> {
#[pin]

View file

@ -1,5 +1,5 @@
use core::mem::ManuallyDrop;
use core::pin::Pin;
use std::mem::ManuallyDrop;
use std::pin::Pin;
use crate::stream::Stream;
use crate::task::{Context, Poll};

View file

@ -1,12 +1,11 @@
use core::future::Future;
use core::pin::Pin;
use core::time::Duration;
use std::future::Future;
use std::pin::Pin;
use std::time::Duration;
use pin_project_lite::pin_project;
use crate::stream::Stream;
use crate::task::{Context, Poll};
use crate::utils::{timer_after, Timer};
pin_project! {
#[doc(hidden)]
@ -15,7 +14,7 @@ pin_project! {
#[pin]
stream: S,
#[pin]
delay: Timer,
delay: futures_timer::Delay,
delay_done: bool,
}
}
@ -24,7 +23,7 @@ impl<S> Delay<S> {
pub(super) fn new(stream: S, dur: Duration) -> Self {
Delay {
stream,
delay: timer_after(dur),
delay: futures_timer::Delay::new(dur),
delay_done: false,
}
}

View file

@ -1,4 +1,4 @@
use core::pin::Pin;
use std::pin::Pin;
use pin_project_lite::pin_project;

View file

@ -1,10 +1,10 @@
use core::future::Future;
use core::pin::Pin;
use std::pin::Pin;
use std::future::Future;
use pin_project_lite::pin_project;
use super::fuse::Fuse;
use crate::stream::stream::StreamExt;
use crate::prelude::*;
use crate::stream::Stream;
use crate::task::{Context, Poll};

View file

@ -1,4 +1,4 @@
use core::pin::Pin;
use std::pin::Pin;
use pin_project_lite::pin_project;

Some files were not shown because too many files have changed in this diff Show more