Compare commits

..

1 Commits

Author SHA1 Message Date
Yoshua Wuyts b7eeed7785
Remove unused pin-utils dep
pin-utils is no longer used. This removes it from our dependency tree.
5 years ago

@ -1,3 +0,0 @@
Our contribution policy can be found at [async.rs/contribute][policy].
[policy]: https://async.rs/contribute/

@ -4,7 +4,6 @@ on:
pull_request:
push:
branches:
- master
- staging
- trying
@ -18,7 +17,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macOS-latest]
rust: [nightly, beta, stable]
rust: [nightly]
steps:
- uses: actions/checkout@master
@ -29,130 +28,23 @@ jobs:
toolchain: ${{ matrix.rust }}
override: true
- name: Cache cargo registry
uses: actions/cache@v2
with:
path: ~/.cargo/registry
key: ${{ matrix.os }}-${{ matrix.rust }}-cargo-registry-${{ hashFiles('**/Cargo.toml') }}
- name: Cache cargo index
uses: actions/cache@v2
with:
path: ~/.cargo/git
key: ${{ matrix.os }}-${{ matrix.rust }}-cargo-index-${{ hashFiles('**/Cargo.toml') }}
- name: Cache cargo build
uses: actions/cache@v2
with:
path: target
key: ${{ matrix.os }}-${{ matrix.rust }}-cargo-build-target-${{ hashFiles('**/Cargo.toml') }}
- name: check
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --tests
args: --all --bins --examples
- name: check unstable
uses: actions-rs/cargo@v1
with:
command: check
args: --features unstable --all --bins --examples --tests
- name: check wasm
uses: actions-rs/cargo@v1
with:
command: check
target: wasm32-unknown-unknown
override: true
args: --features unstable --all --bins --tests
- name: check bench
uses: actions-rs/cargo@v1
if: matrix.rust == 'nightly'
with:
command: check
args: --benches
- name: check std only
uses: actions-rs/cargo@v1
with:
command: check
args: --no-default-features --features std
- name: check attributes
uses: actions-rs/cargo@v1
with:
command: check
args: --features attributes
args: --features unstable --all --benches --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --all --features "unstable attributes"
build__with_no_std:
name: Build with no-std
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: setup
run: |
rustup default nightly
rustup target add thumbv7m-none-eabi
- name: check no_std
uses: actions-rs/cargo@v1
with:
command: check
args: --no-default-features --features alloc --target thumbv7m-none-eabi -Z avoid-dev-deps
check_tokio_02_feature:
name: Check tokio02 feature
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: check tokio02
uses: actions-rs/cargo@v1
with:
command: check
args: --all --features tokio02
cross:
name: Cross compile
runs-on: ubuntu-latest
strategy:
matrix:
target:
- i686-unknown-linux-gnu
- powerpc-unknown-linux-gnu
- powerpc64-unknown-linux-gnu
- mips-unknown-linux-gnu
- arm-linux-androideabi
steps:
- uses: actions/checkout@master
- name: Install nightly
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
- name: Install cross
run: cargo install cross
- name: check
run: cross check --all --target ${{ matrix.target }}
- name: check unstable
run: cross check --all --features unstable --target ${{ matrix.target }}
- name: test
run: cross test --all --features unstable --target ${{ matrix.target }}
args: --all --features unstable
check_fmt_and_docs:
name: Checking fmt and docs
@ -160,12 +52,15 @@ jobs:
steps:
- uses: actions/checkout@master
- id: component
uses: actions-rs/components-nightly@v1
with:
component: rustfmt
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
toolchain: ${{ steps.component.outputs.toolchain }}
override: true
components: rustfmt
- name: setup
run: |
@ -181,3 +76,15 @@ jobs:
- name: Docs
run: cargo doc --features docs
clippy_check:
name: Clippy check
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Install rust
run: rustup update beta && rustup default beta
- name: Install clippy
run: rustup component add clippy
- name: clippy
run: cargo clippy --all --features unstable

@ -7,414 +7,6 @@ and this project adheres to [Semantic Versioning](https://book.async.rs/overview
## [Unreleased]
# [1.6.2] - 2020-06-19
## Added
- Add `UdpSocket::peer_addr` ([#816](https://github.com/async-rs/async-std/pull/816))
## Changed
## Fixed
- Ensure the reactor is running for sockets and timers ([#819](https://github.com/async-rs/async-std/pull/819)).
- Avoid excessive polling in `flatten` and `flat_map` ([#701](https://github.com/async-rs/async-std/pull/701))
# [1.6.1] - 2020-06-11
## Added
- Added `tokio02` feature flag, to allow compatability usage with tokio@0.2 ([#804](https://github.com/async-rs/async-std/pull/804)).
## Changed
- Removed unstable `stdio` lock methods, due to their unsoundness ([#807](https://github.com/async-rs/async-std/pull/807)).
## Fixed
- Fixed wrong slice index for file reading ([#802](https://github.com/async-rs/async-std/pull/802)).
- Fixed recursive calls to `block_on` ([#799](https://github.com/async-rs/async-std/pull/799)) and ([#809](https://github.com/async-rs/async-std/pull/809)).
- Remove `default` feature requirement for the `unstable` feature ([#806](https://github.com/async-rs/async-std/pull/806)).
# [1.6.0] - 2020-05-22
See `1.6.0-beta.1` and `1.6.0-beta.2`.
# [1.6.0-beta.2] - 2020-05-19
## Added
- Added an environment variable to configure the thread pool size of the runtime. ([#774](https://github.com/async-rs/async-std/pull/774))
- Implement `Clone` for `UnixStream` ([#772](https://github.com/async-rs/async-std/pull/772))
## Changed
- For `wasm`, switched underlying `Timer` implementation to [`futures-timer`](https://github.com/async-rs/futures-timer). ([#776](https://github.com/async-rs/async-std/pull/776))
## Fixed
- Use `smol::block_on` to handle drop of `File`, avoiding nested executor panic. ([#768](https://github.com/async-rs/async-std/pull/768))
# [1.6.0-beta.1] - 2020-05-07
## Added
- Added `task::spawn_local`. ([#757](https://github.com/async-rs/async-std/pull/757))
- Added out of the box support for `wasm`. ([#757](https://github.com/async-rs/async-std/pull/757))
- Added `JoinHandle::cancel` ([#757](https://github.com/async-rs/async-std/pull/757))
- Added `sync::Condvar` ([#369](https://github.com/async-rs/async-std/pull/369))
- Added `sync::Sender::try_send` and `sync::Receiver::try_recv` ([#585](https://github.com/async-rs/async-std/pull/585))
- Added `no_std` support for `task`, `future` and `stream` ([#680](https://github.com/async-rs/async-std/pull/680))
## Changed
- Switched underlying runtime to [`smol`](https://github.com/stjepang/smol/). ([#757](https://github.com/async-rs/async-std/pull/757))
- Switched implementation of `sync::Barrier` to use `sync::Condvar` like `std` does. ([#581](https://github.com/async-rs/async-std/pull/581))
## Fixed
- Allow compilation on 32 bit targets, by using `AtomicUsize` for `TaskId`. ([#756](https://github.com/async-rs/async-std/pull/756))
# [1.5.0] - 2020-02-03
[API Documentation](https://docs.rs/async-std/1.5.0/async-std)
This patch includes various quality of life improvements to async-std.
Including improved performance, stability, and the addition of various
`Clone` impls that replace the use of `Arc` in many cases.
## Added
- Added links to various ecosystem projects from the README ([#660](https://github.com/async-rs/async-std/pull/660))
- Added an example on `FromStream` for `Result<T, E>` ([#643](https://github.com/async-rs/async-std/pull/643))
- Added `stream::pending` as "unstable" ([#615](https://github.com/async-rs/async-std/pull/615))
- Added an example of `stream::timeout` to document the error flow ([#675](https://github.com/async-rs/async-std/pull/675))
- Implement `Clone` for `DirEntry` ([#682](https://github.com/async-rs/async-std/pull/682))
- Implement `Clone` for `TcpStream` ([#689](https://github.com/async-rs/async-std/pull/689))
## Changed
- Removed internal comment on `stream::Interval` ([#645](https://github.com/async-rs/async-std/pull/645))
- The "unstable" feature can now be used without requiring the "default" feature ([#647](https://github.com/async-rs/async-std/pull/647))
- Removed unnecessary trait bound on `stream::FlatMap` ([#651](https://github.com/async-rs/async-std/pull/651))
- Updated the "broadcaster" dependency used by "unstable" to `1.0.0` ([#681](https://github.com/async-rs/async-std/pull/681))
- Updated `async-task` to 1.2.1 ([#676](https://github.com/async-rs/async-std/pull/676))
- `task::block_on` now parks after a single poll, improving performance in many cases ([#684](https://github.com/async-rs/async-std/pull/684))
- Improved reading flow of the "client" part of the async-std tutorial ([#550](https://github.com/async-rs/async-std/pull/550))
- Use `take_while` instead of `scan` in `impl` of `Product`, `Sum` and `FromStream` ([#667](https://github.com/async-rs/async-std/pull/667))
- `TcpStream::connect` no longer uses a thread from the threadpool, improving performance ([#687](https://github.com/async-rs/async-std/pull/687))
## Fixed
- Fixed crate documentation typo ([#655](https://github.com/async-rs/async-std/pull/655))
- Fixed documentation for `UdpSocket::recv` ([#648](https://github.com/async-rs/async-std/pull/648))
- Fixed documentation for `UdpSocket::send` ([#671](https://github.com/async-rs/async-std/pull/671))
- Fixed typo in stream documentation ([#650](https://github.com/async-rs/async-std/pull/650))
- Fixed typo on `sync::JoinHandle` documentation ([#659](https://github.com/async-rs/async-std/pull/659))
- Removed use of `std::error::Error::description` which failed CI ([#661](https://github.com/async-rs/async-std/pull/662))
- Removed the use of rustfmt's unstable `format_code_in_doc_comments` option which failed CI ([#685](https://github.com/async-rs/async-std/pull/685))
- Fixed a code typo in the `task::sleep` example ([#688](https://github.com/async-rs/async-std/pull/688))
# [1.4.0] - 2019-12-20
[API Documentation](https://docs.rs/async-std/1.4.0/async-std)
This patch adds `Future::timeout`, providing a method counterpart to the
`future::timeout` free function. And includes several bug fixes around missing
APIs. Notably we're not shipping our new executor yet, first announced [on our
blog](https://async.rs/blog/stop-worrying-about-blocking-the-new-async-std-runtime/).
## Examples
```rust
use async_std::prelude::*;
use async_std::future;
use std::time::Duration;
let fut = future::pending::<()>(); // This future will never resolve.
let res = fut.timeout(Duration::from_millis(100)).await;
assert!(res.is_err()); // The future timed out, returning an err.
```
## Added
- Added `Future::timeout` as "unstable" [(#600)](https://github.com/async-rs/async-std/pull/600)
## Fixes
- Fixed a doc test and enabled it on CI [(#597)](https://github.com/async-rs/async-std/pull/597)
- Fixed a rendering issue with the `stream` submodule documentation [(#621)](https://github.com/async-rs/async-std/pull/621)
- `Write::write_fmt`'s future is now correctly marked as `#[must_use]` [(#628)](https://github.com/async-rs/async-std/pull/628)
- Fixed the missing `io::Bytes` export [(#633)](https://github.com/async-rs/async-std/pull/633)
- Fixed the missing `io::Chain` export [(#633)](https://github.com/async-rs/async-std/pull/633)
- Fixed the missing `io::Take` export [(#633)](https://github.com/async-rs/async-std/pull/633)
# [1.3.0] - 2019-12-12
[API Documentation](https://docs.rs/async-std/1.3.0/async-std)
This patch introduces `Stream::delay`, more methods on `DoubleEndedStream`,
and improves compile times. `Stream::delay` is a new API that's similar to
[`task::sleep`](https://docs.rs/async-std/1.2.0/async_std/task/fn.sleep.html),
but can be passed as part of as stream, rather than as a separate block. This is
useful for examples, or when manually debugging race conditions.
## Examples
```rust
let start = Instant::now();
let mut s = stream::from_iter(vec![0u8, 1]).delay(Duration::from_millis(200));
// The first time will take more than 200ms due to delay.
s.next().await;
assert!(start.elapsed().as_millis() >= 200);
// There will be no delay after the first time.
s.next().await;
assert!(start.elapsed().as_millis() <= 210);
```
## Added
- Added `Stream::delay` as "unstable" [(#309)](https://github.com/async-rs/async-std/pull/309)
- Added `DoubleEndedStream::next_back` as "unstable" [(#562)](https://github.com/async-rs/async-std/pull/562)
- Added `DoubleEndedStream::nth_back` as "unstable" [(#562)](https://github.com/async-rs/async-std/pull/562)
- Added `DoubleEndedStream::rfind` as "unstable" [(#562)](https://github.com/async-rs/async-std/pull/562)
- Added `DoubleEndedStream::rfold` as "unstable" [(#562)](https://github.com/async-rs/async-std/pull/562)
- Added `DoubleEndedStream::try_rfold` as "unstable" [(#562)](https://github.com/async-rs/async-std/pull/562)
- `stream::Once` now implements `DoubleEndedStream` [(#562)](https://github.com/async-rs/async-std/pull/562)
- `stream::FromIter` now implements `DoubleEndedStream` [(#562)](https://github.com/async-rs/async-std/pull/562)
## Changed
- Removed our dependency on `async-macros`, speeding up compilation [(#610)](https://github.com/async-rs/async-std/pull/610)
## Fixes
- Fixed a link in the task docs [(#598)](https://github.com/async-rs/async-std/pull/598)
- Fixed the `UdpSocket::recv` example [(#603)](https://github.com/async-rs/async-std/pull/603)
- Fixed a link to `task::block_on` [(#608)](https://github.com/async-rs/async-std/pull/608)
- Fixed an incorrect API mention in `task::Builder` [(#612)](https://github.com/async-rs/async-std/pull/612)
- Fixed leftover mentions of `futures-preview` [(#595)](https://github.com/async-rs/async-std/pull/595)
- Fixed a typo in the tutorial [(#614)](https://github.com/async-rs/async-std/pull/614)
- `<TcpStream as Write>::poll_close` now closes the write half of the stream [(#618)](https://github.com/async-rs/async-std/pull/618)
# [1.2.0] - 2019-11-27
[API Documentation](https://docs.rs/async-std/1.2.0/async-std)
This patch includes some minor quality-of-life improvements, introduces a
new `Stream::unzip` API, and adds verbose errors to our networking types.
This means if you can't connect to a socket, you'll never have to wonder again
*which* address it was you couldn't connect to, instead of having to go through
the motions to debug what the address was.
## Example
Unzip a stream of tuples into two collections:
```rust
use async_std::prelude::*;
use async_std::stream;
let s = stream::from_iter(vec![(1,2), (3,4)]);
let (left, right): (Vec<_>, Vec<_>) = s.unzip().await;
assert_eq!(left, [1, 3]);
assert_eq!(right, [2, 4]);
```
## Added
- Added `Stream::unzip` as "unstable".
- Added verbose errors to the networking types.
## Changed
- Enabled CI on master branch.
- `Future::join` and `Future::try_join` can now join futures with different
output types.
## Fixed
- Fixed the docs and `Debug` output of `BufWriter`.
- Fixed a bug in `Stream::throttle` that made it consume too much CPU.
# [1.1.0] - 2019-11-21
[API Documentation](https://docs.rs/async-std/1.1.0/async-std)
This patch introduces a faster scheduler algorithm, `Stream::throttle`, and
stabilizes `task::yield_now`. Additionally we're introducing several more stream
APIs, bringing us to almost complete parity with the standard library.
Furthermore our `path` submodule now returns more context in errors. So if
opening a file fails, async-std will tell you *which* file was failed to open,
making it easier to write and debug programs.
## Examples
```rust
let start = Instant::now();
let mut s = stream::interval(Duration::from_millis(5))
.throttle(Duration::from_millis(10))
.take(2);
s.next().await;
assert!(start.elapsed().as_millis() >= 5);
s.next().await;
assert!(start.elapsed().as_millis() >= 15);
s.next().await;
assert!(start.elapsed().as_millis() >= 25);
```
## Added
- Added `Stream::throttle` as "unstable".
- Added `Stream::count` as "unstable".
- Added `Stream::max` as "unstable".
- Added `Stream::successors` as "unstable".
- Added `Stream::by_ref` as "unstable".
- Added `Stream::partition` as "unstable".
- Added contextual errors to the `path` submodule.
- Added `os::windows::symlink_dir` as "unstable".
- Added `os::windows::symlink_file` as "unstable".
- Stabilized `task::yield_now`.
## Fixes
- We now ignore seek errors when rolling back failed `read` calls on `File`.
- Fixed a bug where `Stream::max_by_key` was returning the wrong result.
- Fixed a bug where `Stream::min_by_key` was returning the wrong result.
## Changed
- Applied various fixes to the tutorial.
- Fixed an issue with Clippy.
- Optimized an internal code generation macro, improving compilation speeds.
- Removed an `Unpin` bound from `stream::Once`.
- Removed various extra internal uses of `pin_mut!`.
- Simplified `Stream::any` and `Stream::all`'s internals.
- The `surf` example is now enabled again.
- Tweaked some streams internals.
- Updated `futures-timer` to 2.0.0, improving compilation speed.
- Upgraded `async-macros` to 2.0.0.
- `Stream::merge` now uses randomized ordering to reduce overall latency.
- The scheduler is now more efficient by keeping a slot for the next task to
run. This is similar to Go's scheduler, and Tokio's scheduler.
- Fixed the documentation of the `channel` types to link back to the `channel`
function.
# [1.0.1] - 2019-11-12
[API Documentation](https://docs.rs/async-std/1.0.1/async-std)
We were seeing a regression in our fs performance, caused by too many
long-running tasks. This patch fixes that regression by being more proactive
about closing down idle threads.
## Changes
- Improved thread startup/shutdown algorithm in `task::spawn_blocking`.
- Fixed a typo in the tutorial.
# [1.0.0] - 2019-11-11
[API Documentation](https://docs.rs/async-std/1.0.0/async-std)
This release marks the `1.0.0` release of async-std; a major milestone for our
development. This release itself mostly includes quality of life improvements
for all of modules, including more consistent API bounds for a lot of our
submodules.
The biggest change is that we're now using the full semver range,
`major.minor.patch`, and any breaking changes to our "stable" APIs will require
an update of the `major` number.
We're excited we've hit this milestone together with you all. Thank you!
## Added
- Added `Future::join` as "unstable", replacing `future::join!`.
- Added `Future::try_join` as "unstable", replacing `future::try_join!`.
- Enabled `stable` and `beta` channel testing on CI.
- Implemented `FromIterator` and `Extend` for `PathBuf`.
- Implemented `FromStream` for `PathBuf`.
- Loosened the trait bounds of `io::copy` on "unstable".
## Changed
- Added a `Sync` bound to `RwLock`, resolving a memory safety issue.
- Fixed a bug in `Stream::take_while` where it could continue after it should've
ended.
- Fixed a bug where our `attributes` Cargo feature wasn't working as intended.
- Improved documentation of `Stream::merge`, documenting ordering guarantees.
- Update doc imports in examples to prefer async-std's types.
- Various quality of life improvements to the `future` submodule.
- Various quality of life improvements to the `path` submodule.
- Various quality of life improvements to the `stream` submodule.
## Removed
- Removed `future::join!` in favor of `Future::join`.
- Removed `future::try_join!` in favor of `Future::try_join`.
# [0.99.12] - 2019-11-07
[API Documentation](https://docs.rs/async-std/0.99.12/async-std)
This patch upgrades us to `futures` 0.3, support for `async/await` on Rust
Stable, performance improvements, and brand new module-level documentation.
## Added
- Added `Future::flatten` as "unstable".
- Added `Future::race` as "unstable" (replaces `future::select!`).
- Added `Future::try_race` as "unstable" (replaces `future::try_select!`).
- Added `Stderr::lock` as "unstable".
- Added `Stdin::lock` as "unstable".
- Added `Stdout::lock` as "unstable".
- Added `Stream::copied` as "unstable".
- Added `Stream::eq` as "unstable".
- Added `Stream::max_by_key` as "unstable".
- Added `Stream::min` as "unstable".
- Added `Stream::ne` as "unstable".
- Added `Stream::position` as "unstable".
- Added `StreamExt` and `FutureExt` as enumerable in the `prelude`.
- Added `TcpListener` and `TcpStream` integration tests.
- Added `stream::from_iter`.
- Added `sync::WakerSet` for internal use.
- Added an example to handle both `IP v4` and `IP v6` connections.
- Added the `default` Cargo feature.
- Added the `attributes` Cargo feature.
- Added the `std` Cargo feature.
## Changed
- Fixed a bug in the blocking threadpool where it didn't spawn more than one thread.
- Fixed a bug with `Stream::merge` where sometimes it ended too soon.
- Fixed a bug with our GitHub actions setup.
- Fixed an issue where our channels could spuriously deadlock.
- Refactored the `task` module.
- Removed a deprecated GitHub action.
- Replaced `futures-preview` with `futures`.
- Replaced `lazy_static` with `once_cell`.
- Replaced all uses of `VecDequeue` in the examples with `stream::from_iter`.
- Simplified `sync::RwLock` using the internal `sync::WakerSet` type.
- Updated the `path` submodule documentation to match std.
- Updated the mod-level documentation to match std.
## Removed
- Removed `future::select!` (replaced by `Future::race`).
- Removed `future::try_select!` (replaced by `Future::try_race`).
# [0.99.11] - 2019-10-29
This patch introduces `async_std::sync::channel`, a novel asynchronous port of
@ -746,21 +338,8 @@ task::blocking(async {
- Initial beta release
[Unreleased]: https://github.com/async-rs/async-std/compare/v1.6.2...HEAD
[1.6.2]: https://github.com/async-rs/async-std/compare/v1.6.1...v1.6.2
[1.6.1]: https://github.com/async-rs/async-std/compare/v1.6.0...v1.6.1
[1.6.0]: https://github.com/async-rs/async-std/compare/v1.5.0...v1.6.0
[1.6.0-beta.2]: https://github.com/async-rs/async-std/compare/v1.6.0-beta.1...v1.6.0-beta.2
[1.6.0-beta.1]: https://github.com/async-rs/async-std/compare/v1.5.0...v1.6.0-beta.1
[1.5.0]: https://github.com/async-rs/async-std/compare/v1.4.0...v1.5.0
[1.4.0]: https://github.com/async-rs/async-std/compare/v1.3.0...v1.4.0
[1.3.0]: https://github.com/async-rs/async-std/compare/v1.2.0...v1.3.0
[1.2.0]: https://github.com/async-rs/async-std/compare/v1.1.0...v1.2.0
[1.1.0]: https://github.com/async-rs/async-std/compare/v1.0.1...v1.1.0
[1.0.1]: https://github.com/async-rs/async-std/compare/v1.0.0...v1.0.1
[1.0.0]: https://github.com/async-rs/async-std/compare/v0.99.12...v1.0.0
[0.99.12]: https://github.com/async-rs/async-std/compare/v0.99.11...v0.99.12
[0.99.11]: https://github.com/async-rs/async-std/compare/v0.99.10...v0.99.11
[Unreleased]: https://github.com/async-rs/async-std/compare/v0.99.11...HEAD
[0.99.10]: https://github.com/async-rs/async-std/compare/v0.99.10...v0.99.11
[0.99.10]: https://github.com/async-rs/async-std/compare/v0.99.9...v0.99.10
[0.99.9]: https://github.com/async-rs/async-std/compare/v0.99.8...v0.99.9
[0.99.8]: https://github.com/async-rs/async-std/compare/v0.99.7...v0.99.8

@ -1,10 +1,9 @@
[package]
name = "async-std"
version = "1.6.2"
version = "0.99.11"
authors = [
"Stjepan Glavina <stjepang@gmail.com>",
"Yoshua Wuyts <yoshuawuyts@gmail.com>",
"Friedel Ziegelmayer <me@dignifiedquire.com>",
"Contributors to async-std",
]
edition = "2018"
@ -22,86 +21,36 @@ features = ["docs"]
rustdoc-args = ["--cfg", "feature=\"docs\""]
[features]
default = [
"std",
"async-task",
"kv-log-macro",
"log",
"num_cpus",
"pin-project-lite",
"smol",
]
docs = ["attributes", "unstable", "default"]
unstable = [
"std",
"futures-timer",
]
attributes = ["async-attributes"]
std = [
"alloc",
"crossbeam-utils",
"futures-core/std",
"futures-io",
"memchr",
"once_cell",
"pin-utils",
"slab",
"wasm-bindgen-futures",
"futures-channel",
"async-mutex",
]
alloc = [
"futures-core/alloc",
"pin-project-lite",
]
tokio02 = ["smol/tokio02"]
docs = ["unstable"]
unstable = ["broadcaster"]
[dependencies]
async-attributes = { version = "1.1.1", optional = true }
async-task = { version = "3.0.0", optional = true }
async-mutex = { version = "1.1.3", optional = true }
crossbeam-utils = { version = "0.7.2", optional = true }
futures-core = { version = "0.3.4", optional = true, default-features = false }
futures-io = { version = "0.3.4", optional = true }
kv-log-macro = { version = "1.0.6", optional = true }
log = { version = "0.4.8", features = ["kv_unstable"], optional = true }
memchr = { version = "2.3.3", optional = true }
num_cpus = { version = "1.12.0", optional = true }
once_cell = { version = "1.3.1", optional = true }
pin-project-lite = { version = "0.1.4", optional = true }
pin-utils = { version = "0.1.0-alpha.4", optional = true }
slab = { version = "0.4.2", optional = true }
futures-timer = { version = "3.0.2", optional = true }
# Devdepencency, but they are not allowed to be optional :/
surf = { version = "1.0.3", optional = true }
[target.'cfg(not(target_os = "unknown"))'.dependencies]
smol = { version = "0.1.17", optional = true }
[target.'cfg(target_arch = "wasm32")'.dependencies]
futures-timer = { version = "3.0.2", optional = true, features = ["wasm-bindgen"] }
wasm-bindgen-futures = { version = "0.4.10", optional = true }
futures-channel = { version = "0.3.4", optional = true }
[target.'cfg(target_arch = "wasm32")'.dev-dependencies]
wasm-bindgen-test = "0.3.10"
async-macros = "1.0.0"
async-task = "1.0.0"
crossbeam-channel = "0.3.9"
crossbeam-deque = "0.7.1"
crossbeam-utils = "0.6.6"
futures-core-preview = "=0.3.0-alpha.19"
futures-io-preview = "=0.3.0-alpha.19"
futures-timer = "1.0.2"
log = { version = "0.4.8", features = ["kv_unstable"] }
memchr = "2.2.1"
mio = "0.6.19"
mio-uds = "0.6.7"
num_cpus = "1.10.1"
once_cell = "1.2.0"
slab = "0.4.2"
kv-log-macro = "1.0.4"
broadcaster = { version = "0.2.6", optional = true, default-features = false, features = ["default-channels"] }
pin-project-lite = "0.1"
[dev-dependencies]
femme = "1.3.0"
rand = "0.7.3"
femme = "1.2.0"
rand = "0.7.2"
# surf = "1.0.2"
tempdir = "0.3.7"
futures = "0.3.4"
rand_xorshift = "0.2.0"
[[test]]
name = "stream"
required-features = ["unstable"]
[[example]]
name = "tcp-ipv4-and-6-echo"
required-features = ["unstable"]
futures-preview = { version = "=0.3.0-alpha.19", features = ["async-await"] }
[[example]]
name = "surf-web"
required-features = ["surf"]
# These are used by the book for examples
futures-channel-preview = "=0.3.0-alpha.19"
futures-util-preview = "=0.3.0-alpha.19"

@ -8,11 +8,6 @@
<br />
<div align="center">
<!-- CI status -->
<a href="https://github.com/async-rs/async-std/actions">
<img src="https://github.com/async-rs/async-std/workflows/CI/badge.svg"
alt="CI Status" />
</a>
<!-- Crates version -->
<a href="https://crates.io/crates/async-std">
<img src="https://img.shields.io/crates/v/async-std.svg?style=flat-square"
@ -66,7 +61,7 @@ syntax.
## Features
- __Modern:__ Built from the ground up for `std::future` and `async/await` with
blazing fast compilation time.
blazing fast compilation times.
- __Fast:__ Our robust allocator and threadpool designs provide ultra-high
throughput with predictably low latency.
- __Intuitive:__ Complete parity with the stdlib means you only need to learn
@ -82,22 +77,17 @@ syntax.
```rust
use async_std::task;
async fn say_hello() {
println!("Hello, world!");
}
fn main() {
task::block_on(say_hello())
task::block_on(async {
println!("Hello, world!");
})
}
```
More examples, including networking and file access, can be found in our
[`examples`] directory and in our [documentation].
[`examples`] directory.
[`examples`]: https://github.com/async-rs/async-std/tree/master/examples
[documentation]: https://docs.rs/async-std#examples
[`task::block_on`]: https://docs.rs/async-std/*/async_std/task/fn.block_on.html
[`"attributes"` feature]: https://docs.rs/async-std/#features
## Philosophy
@ -125,24 +115,6 @@ documentation] on how to enable them.
[cargo-add]: https://github.com/killercup/cargo-edit
[features documentation]: https://docs.rs/async-std/#features
## Ecosystem
* [async-tls](https://crates.io/crates/async-tls) — Async TLS/SSL streams using **Rustls**.
* [async-native-tls](https://crates.io/crates/async-native-tls) — **Native TLS** for Async. Native TLS for futures and async-std.
* [async-tungstenite](https://crates.io/crates/async-tungstenite) — Asynchronous **WebSockets** for async-std, tokio, gio and any std Futures runtime.
* [Tide](https://crates.io/crates/tide) — Serve the web. A modular **web framework** built around async/await.
* [SQLx](https://crates.io/crates/sqlx) — The Rust **SQL** Toolkit. SQLx is a 100% safe Rust library for Postgres and MySQL with compile-time checked queries.
* [Surf](https://crates.io/crates/surf) — Surf the web. Surf is a friendly **HTTP client** built for casual Rustaceans and veterans alike.
* [Xactor](https://crates.io/crates/xactor) — Xactor is a rust actors framework based on async-std.
* [async-graphql](https://crates.io/crates/async-graphql) — A GraphQL server library implemented in rust, with full support for async/await.
## License
<sup>

@ -1,40 +0,0 @@
#![feature(test)]
extern crate test;
use async_std::sync::{Arc, Mutex};
use async_std::task;
use test::Bencher;
#[bench]
fn create(b: &mut Bencher) {
b.iter(|| Mutex::new(()));
}
#[bench]
fn contention(b: &mut Bencher) {
b.iter(|| task::block_on(run(10, 1000)));
}
#[bench]
fn no_contention(b: &mut Bencher) {
b.iter(|| task::block_on(run(1, 10000)));
}
async fn run(task: usize, iter: usize) {
let m = Arc::new(Mutex::new(()));
let mut tasks = Vec::new();
for _ in 0..task {
let m = m.clone();
tasks.push(task::spawn(async move {
for _ in 0..iter {
let _ = m.lock().await;
}
}));
}
for t in tasks {
t.await;
}
}

@ -1,11 +0,0 @@
#![feature(test)]
extern crate test;
use async_std::task;
use test::Bencher;
#[bench]
fn block_on(b: &mut Bencher) {
b.iter(|| task::block_on(async {}));
}

@ -19,9 +19,8 @@
- [Clean Shutdown](./tutorial/clean_shutdown.md)
- [Handling Disconnection](./tutorial/handling_disconnection.md)
- [Implementing a Client](./tutorial/implementing_a_client.md)
- [Async Patterns](./patterns.md)
- [TODO: Async Patterns](./patterns.md)
- [TODO: Collected Small Patterns](./patterns/small-patterns.md)
- [Production-Ready Accept Loop](./patterns/accept-loop.md)
- [Security practices](./security/index.md)
- [Security Disclosures and Policy](./security/policy.md)
- [Glossary](./glossary.md)

@ -4,13 +4,13 @@ Rust has two kinds of types commonly referred to as `Future`:
- the first is `std::future::Future` from Rusts [standard library](https://doc.rust-lang.org/std/future/trait.Future.html).
- the second is `futures::future::Future` from the [futures-rs crate](https://docs.rs/futures/0.3/futures/prelude/trait.Future.html).
- the second is `futures::future::Future` from the [futures-rs crate](https://docs.rs/futures-preview/0.3.0-alpha.17/futures/prelude/trait.Future.html), currently released as `futures-preview`.
The future defined in the [futures-rs](https://docs.rs/futures/0.3/futures/prelude/trait.Future.html) crate was the original implementation of the type. To enable the `async/await` syntax, the core Future trait was moved into Rusts standard library and became `std::future::Future`. In some sense, the `std::future::Future` can be seen as a minimal subset of `futures::future::Future`.
The future defined in the [futures-rs](https://docs.rs/futures-preview/0.3.0-alpha.17/futures/prelude/trait.Future.html) crate was the original implementation of the type. To enable the `async/await` syntax, the core Future trait was moved into Rusts standard library and became `std::future::Future`. In some sense, the `std::future::Future` can be seen as a minimal subset of `futures::future::Future`.
It is critical to understand the difference between `std::future::Future` and `futures::future::Future`, and the approach that `async-std` takes towards them. In itself, `std::future::Future` is not something you want to interact with as a user—except by calling `.await` on it. The inner workings of `std::future::Future` are mostly of interest to people implementing `Future`. Make no mistake—this is very useful! Most of the functionality that used to be defined on `Future` itself has been moved to an extension trait called [`FuturesExt`](https://docs.rs/futures/0.3/futures/future/trait.FutureExt.html). From this information, you might be able to infer that the `futures` library serves as an extension to the core Rust async features.
It is critical to understand the difference between `std::future::Future` and `futures::future::Future`, and the approach that `async-std` takes towards them. In itself, `std::future::Future` is not something you want to interact with as a user—except by calling `.await` on it. The inner workings of `std::future::Future` are mostly of interest to people implementing `Future`. Make no mistake—this is very useful! Most of the functionality that used to be defined on `Future` itself has been moved to an extension trait called [`FuturesExt`](https://docs.rs/futures-preview/0.3.0-alpha.17/futures/future/trait.FutureExt.html). From this information, you might be able to infer that the `futures` library serves as an extension to the core Rust async features.
In the same tradition as `futures`, `async-std` re-exports the core `std::future::Future` type. You can actively opt into the extensions provided by the `futures` crate by adding it to your `Cargo.toml` and importing `FuturesExt`.
In the same tradition as `futures`, `async-std` re-exports the core `std::future::Future` type. You can actively opt into the extensions provided by the `futures-preview` crate by adding it to your `Cargo.toml` and importing `FuturesExt`.
## Interfaces and Stability

@ -1,266 +0,0 @@
# Production-Ready Accept Loop
A production-ready accept loop needs the following things:
1. Handling errors
2. Limiting the number of simultanteous connections to avoid deny-of-service
(DoS) attacks
## Handling errors
There are two kinds of errors in an accept loop:
1. Per-connection errors. The system uses them to notify that there was a
connection in the queue and it's dropped by the peer. Subsequent connections
can be already queued so next connection must be accepted immediately.
2. Resource shortages. When these are encountered it doesn't make sense to
accept the next socket immediately. But the listener stays active, so you server
should try to accept socket later.
Here is the example of a per-connection error (printed in normal and debug mode):
```
Error: Connection reset by peer (os error 104)
Error: Os { code: 104, kind: ConnectionReset, message: "Connection reset by peer" }
```
And the following is the most common example of a resource shortage error:
```
Error: Too many open files (os error 24)
Error: Os { code: 24, kind: Other, message: "Too many open files" }
```
### Testing Application
To test your application for these errors try the following (this works
on unixes only).
Lower limits and start the application:
```
$ ulimit -n 100
$ cargo run --example your_app
Compiling your_app v0.1.0 (/work)
Finished dev [unoptimized + debuginfo] target(s) in 5.47s
Running `target/debug/examples/your_app`
Server is listening on: http://127.0.0.1:1234
```
Then in another console run the [`wrk`] benchmark tool:
```
$ wrk -c 1000 http://127.0.0.1:1234
Running 10s test @ http://localhost:8080/
2 threads and 1000 connections
$ telnet localhost 1234
Trying ::1...
Connected to localhost.
```
Important is to check the following things:
1. The application doesn't crash on error (but may log errors, see below)
2. It's possible to connect to the application again once load is stopped
(few seconds after `wrk`). This is what `telnet` does in example above,
make sure it prints `Connected to <hostname>`.
3. The `Too many open files` error is logged in the appropriate log. This
requires to set "maximum number of simultaneous connections" parameter (see
below) of your application to a value greater then `100` for this example.
4. Check CPU usage of the app while doing a test. It should not occupy 100%
of a single CPU core (it's unlikely that you can exhaust CPU by 1000
connections in Rust, so this means error handling is not right).
#### Testing non-HTTP applications
If it's possible, use the appropriate benchmark tool and set the appropriate
number of connections. For example `redis-benchmark` has a `-c` parameter for
that, if you implement redis protocol.
Alternatively, can still use `wrk`, just make sure that connection is not
immediately closed. If it is, put a temporary timeout before handing
the connection to the protocol handler, like this:
```rust,edition2018
# extern crate async_std;
# use std::time::Duration;
# use async_std::{
# net::{TcpListener, ToSocketAddrs},
# prelude::*,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
#async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
# let listener = TcpListener::bind(addr).await?;
# let mut incoming = listener.incoming();
while let Some(stream) = incoming.next().await {
task::spawn(async {
task::sleep(Duration::from_secs(10)).await; // 1
connection_loop(stream).await;
});
}
# Ok(())
# }
```
1. Make sure the sleep coroutine is inside the spawned task, not in the loop.
[`wrk`]: https://github.com/wg/wrk
### Handling Errors Manually
Here is how basic accept loop could look like:
```rust,edition2018
# extern crate async_std;
# use std::time::Duration;
# use async_std::{
# net::{TcpListener, ToSocketAddrs},
# prelude::*,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
let listener = TcpListener::bind(addr).await?;
let mut incoming = listener.incoming();
while let Some(result) = incoming.next().await {
let stream = match stream {
Err(ref e) if is_connection_error(e) => continue, // 1
Err(e) => {
eprintln!("Error: {}. Pausing for 500ms."); // 3
task::sleep(Duration::from_millis(500)).await; // 2
continue;
}
Ok(s) => s,
};
// body
}
Ok(())
}
```
1. Ignore per-connection errors.
2. Sleep and continue on resource shortage.
3. It's important to log the message, because these errors commonly mean the
misconfiguration of the system and are helpful for operations people running
the application.
Be sure to [test your application](#testing-application).
### External Crates
The crate [`async-listen`] has a helper to achieve this task:
```rust,edition2018
# extern crate async_std;
# extern crate async_listen;
# use std::time::Duration;
# use async_std::{
# net::{TcpListener, ToSocketAddrs},
# prelude::*,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
use async_listen::{ListenExt, error_hint};
async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
let listener = TcpListener::bind(addr).await?;
let mut incoming = listener
.incoming()
.log_warnings(log_accept_error) // 1
.handle_errors(Duration::from_millis(500));
while let Some(socket) = incoming.next().await { // 2
// body
}
Ok(())
}
fn log_accept_error(e: &io::Error) {
eprintln!("Error: {}. Listener paused for 0.5s. {}", e, error_hint(e)) // 3
}
```
1. Logs resource shortages (`async-listen` calls them warnings). If you use
`log` crate or any other in your app this should go to the log.
2. Stream yields sockets without `Result` wrapper after `handle_errors` because
all errors are already handled.
3. Together with the error we print a hint, which explains some errors for end
users. For example, it recommends increasing open file limit and gives
a link.
[`async-listen`]: https://crates.io/crates/async-listen/
Be sure to [test your application](#testing-application).
## Connections Limit
Even if you've applied everything described in
[Handling Errors](#handling-errors) section, there is still a problem.
Let's imagine you have a server that needs to open a file to process
client request. At some point, you might encounter the following situation:
1. There are as many client connection as max file descriptors allowed for
the application.
2. Listener gets `Too many open files` error so it sleeps.
3. Some client sends a request via the previously open connection.
4. Opening a file to serve request fails, because of the same
`Too many open files` error, until some other client drops a connection.
There are many more possible situations, this is just a small illustation that
limiting number of connections is very useful. Generally, it's one of the ways
to control resources used by a server and avoiding some kinds of deny of
service (DoS) attacks.
### `async-listen` crate
Limiting maximum number of simultaneous connections with [`async-listen`]
looks like the following:
```rust,edition2018
# extern crate async_std;
# extern crate async_listen;
# use std::time::Duration;
# use async_std::{
# net::{TcpListener, TcpStream, ToSocketAddrs},
# prelude::*,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
use async_listen::{ListenExt, Token, error_hint};
async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
let listener = TcpListener::bind(addr).await?;
let mut incoming = listener
.incoming()
.log_warnings(log_accept_error)
.handle_errors(Duration::from_millis(500)) // 1
.backpressure(100);
while let Some((token, socket)) = incoming.next().await { // 2
task::spawn(async move {
connection_loop(&token, stream).await; // 3
});
}
Ok(())
}
async fn connection_loop(_token: &Token, stream: TcpStream) { // 4
// ...
}
# fn log_accept_error(e: &io::Error) {
# eprintln!("Error: {}. Listener paused for 0.5s. {}", e, error_hint(e));
# }
```
1. We need to handle errors first, because [`backpressure`] helper expects
stream of `TcpStream` rather than `Result`.
2. The token yielded by a new stream is what is counted by backpressure helper.
I.e. if you drop a token, new connection can be established.
3. We give the connection loop a reference to token to bind token's lifetime to
the lifetime of the connection.
4. The token itsellf in the function can be ignored, hence `_token`
[`backpressure`]: https://docs.rs/async-listen/0.1.2/async_listen/trait.ListenExt.html#method.backpressure
Be sure to [test this behavior](#testing-application).

@ -4,15 +4,16 @@ At this point, we only need to start the broker to get a fully-functioning (in t
```rust,edition2018
# extern crate async_std;
# extern crate futures;
# extern crate futures_channel;
# extern crate futures_util;
use async_std::{
io::BufReader,
io::{self, BufReader},
net::{TcpListener, TcpStream, ToSocketAddrs},
prelude::*,
task,
};
use futures::channel::mpsc;
use futures::sink::SinkExt;
use futures_channel::mpsc;
use futures_util::SinkExt;
use std::{
collections::hash_map::{HashMap, Entry},
sync::Arc,

@ -22,15 +22,16 @@ Let's add waiting to the server:
```rust,edition2018
# extern crate async_std;
# extern crate futures;
# extern crate futures_channel;
# extern crate futures_util;
# use async_std::{
# io::{self, BufReader},
# net::{TcpListener, TcpStream, ToSocketAddrs},
# prelude::*,
# task,
# };
# use futures::channel::mpsc;
# use futures::sink::SinkExt;
# use futures_channel::mpsc;
# use futures_util::SinkExt;
# use std::{
# collections::hash_map::{HashMap, Entry},
# sync::Arc,
@ -155,15 +156,16 @@ And to the broker:
```rust,edition2018
# extern crate async_std;
# extern crate futures;
# extern crate futures_channel;
# extern crate futures_util;
# use async_std::{
# io::{self, BufReader},
# net::{TcpListener, TcpStream, ToSocketAddrs},
# prelude::*,
# task,
# };
# use futures::channel::mpsc;
# use futures::sink::SinkExt;
# use futures_channel::mpsc;
# use futures_util::SinkExt;
# use std::{
# collections::hash_map::{HashMap, Entry},
# sync::Arc,

@ -2,24 +2,25 @@
## Connecting Readers and Writers
So how do we make sure that messages read in `connection_loop` flow into the relevant `connection_writer_loop`?
We should somehow maintain a `peers: HashMap<String, Sender<String>>` map which allows a client to find destination channels.
We should somehow maintain an `peers: HashMap<String, Sender<String>>` map which allows a client to find destination channels.
However, this map would be a bit of shared mutable state, so we'll have to wrap an `RwLock` over it and answer tough questions of what should happen if the client joins at the same moment as it receives a message.
One trick to make reasoning about state simpler comes from the actor model.
We can create a dedicated broker task which owns the `peers` map and communicates with other tasks using channels.
By hiding `peers` inside such an "actor" task, we remove the need for mutexes and also make the serialization point explicit.
We can create a dedicated broker tasks which owns the `peers` map and communicates with other tasks by channels.
By hiding `peers` inside such an "actor" task, we remove the need for mutxes and also make serialization point explicit.
The order of events "Bob sends message to Alice" and "Alice joins" is determined by the order of the corresponding events in the broker's event queue.
```rust,edition2018
# extern crate async_std;
# extern crate futures;
# extern crate futures_channel;
# extern crate futures_util;
# use async_std::{
# net::TcpStream,
# prelude::*,
# task,
# };
# use futures::channel::mpsc;
# use futures::sink::SinkExt;
# use futures_channel::mpsc;
# use futures_util::sink::SinkExt;
# use std::sync::Arc;
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
@ -92,9 +93,9 @@ async fn broker_loop(mut events: Receiver<Event>) -> Result<()> {
}
```
1. The broker task should handle two types of events: a message or an arrival of a new peer.
2. The internal state of the broker is a `HashMap`.
1. Broker should handle two types of events: a message or an arrival of a new peer.
2. Internal state of the broker is a `HashMap`.
Note how we don't need a `Mutex` here and can confidently say, at each iteration of the broker's loop, what is the current set of peers
3. To handle a message, we send it over a channel to each destination
4. To handle a new peer, we first register it in the peer's map ...
4. To handle new peer, we first register it in the peer's map ...
5. ... and then spawn a dedicated task to actually write the messages to the socket.

@ -19,10 +19,11 @@ First, let's add a shutdown channel to the `connection_loop`:
```rust,edition2018
# extern crate async_std;
# extern crate futures;
# extern crate futures_channel;
# extern crate futures_util;
# use async_std::net::TcpStream;
# use futures::channel::mpsc;
# use futures::sink::SinkExt;
# use futures_channel::mpsc;
# use futures_util::SinkExt;
# use std::sync::Arc;
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
@ -60,8 +61,8 @@ async fn connection_loop(mut broker: Sender<Event>, stream: Arc<TcpStream>) -> R
}
```
1. To enforce that no messages are sent along the shutdown channel, we use an uninhabited type.
2. We pass the shutdown channel to the writer task.
1. To enforce that no messages are send along the shutdown channel, we use an uninhabited type.
2. We pass the shutdown channel to the writer task
3. In the reader, we create a `_shutdown_sender` whose only purpose is to get dropped.
In the `connection_writer_loop`, we now need to choose between shutdown and message channels.
@ -69,14 +70,17 @@ We use the `select` macro for this purpose:
```rust,edition2018
# extern crate async_std;
# extern crate futures;
# extern crate futures_channel;
# extern crate futures_util;
# use async_std::{net::TcpStream, prelude::*};
# use futures::channel::mpsc;
use futures::{select, FutureExt};
use futures_channel::mpsc;
use futures_util::{select, FutureExt};
# use std::sync::Arc;
# type Receiver<T> = mpsc::UnboundedReceiver<T>;
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
# type Sender<T> = mpsc::UnboundedSender<T>;
# #[derive(Debug)]
# enum Void {} // 1
@ -110,7 +114,7 @@ async fn connection_writer_loop(
Another problem is that between the moment we detect disconnection in `connection_writer_loop` and the moment when we actually remove the peer from the `peers` map, new messages might be pushed into the peer's channel.
To not lose these messages completely, we'll return the messages channel back to the broker.
This also allows us to establish a useful invariant that the message channel strictly outlives the peer in the `peers` map, and makes the broker itself infallible.
This also allows us to establish a useful invariant that the message channel strictly outlives the peer in the `peers` map, and makes the broker itself infailable.
## Final Code
@ -118,16 +122,16 @@ The final code looks like this:
```rust,edition2018
# extern crate async_std;
# extern crate futures;
# extern crate futures_channel;
# extern crate futures_util;
use async_std::{
io::BufReader,
net::{TcpListener, TcpStream, ToSocketAddrs},
prelude::*,
task,
};
use futures::channel::mpsc;
use futures::sink::SinkExt;
use futures::{select, FutureExt};
use futures_channel::mpsc;
use futures_util::{select, FutureExt, SinkExt};
use std::{
collections::hash_map::{Entry, HashMap},
future::Future,

@ -1,27 +1,29 @@
## Implementing a client
Since the protocol is line-based, implementing a client for the chat is straightforward:
Let's now implement the client for the chat.
Because the protocol is line-based, the implementation is pretty straightforward:
* Lines read from stdin should be sent over the socket.
* Lines read from the socket should be echoed to stdout.
Although async does not significantly affect client performance (as unlike the server, the client interacts solely with one user and only needs limited concurrency), async is still useful for managing concurrency!
The client has to read from stdin and the socket *simultaneously*.
Programming this with threads is cumbersome, especially when implementing a clean shutdown.
With async, the `select!` macro is all that is needed.
Unlike the server, the client needs only limited concurrency, as it interacts with only a single user.
For this reason, async doesn't bring a lot of performance benefits in this case.
However, async is still useful for managing concurrency!
Specifically, the client should *simultaneously* read from stdin and from the socket.
Programming this with threads is cumbersome, especially when implementing clean shutdown.
With async, we can just use the `select!` macro.
```rust,edition2018
# extern crate async_std;
# extern crate futures;
# extern crate futures_util;
use async_std::{
io::{stdin, BufReader},
net::{TcpStream, ToSocketAddrs},
prelude::*,
task,
};
use futures::{select, FutureExt};
use futures_util::{select, FutureExt};
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;

@ -1,14 +1,11 @@
# Tutorial: Writing a chat
Nothing is simpler than creating a chat server, right?
Not quite, chat servers expose you to all the fun of asynchronous programming:
Nothing is as simple as a chat server, right? Not quite, chat servers
already expose you to all the fun of asynchronous programming: how
do you handle clients connecting concurrently. How do you handle them disconnecting?
How will the server handle clients connecting concurrently?
How do you distribute the messages?
How will it handle them disconnecting?
How will it distribute the messages?
This tutorial explains how to write a chat server in `async-std`.
In this tutorial, we will show you how to write one in `async-std`.
You can also find the tutorial in [our repository](https://github.com/async-rs/async-std/blob/master/examples/a-chat).

@ -10,18 +10,14 @@ We need to:
```rust,edition2018
# extern crate async_std;
# use async_std::{
# net::{TcpListener, ToSocketAddrs},
# io::BufReader,
# net::{TcpListener, TcpStream, ToSocketAddrs},
# prelude::*,
# task,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
use async_std::{
io::BufReader,
net::TcpStream,
};
async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
let listener = TcpListener::bind(addr).await?;
let mut incoming = listener.incoming();
@ -50,7 +46,7 @@ async fn connection_loop(stream: TcpStream) -> Result<()> {
Some(idx) => (&line[..idx], line[idx + 1 ..].trim()),
};
let dest: Vec<String> = dest.split(',').map(|name| name.trim().to_string()).collect();
let msg: String = msg.to_string();
let msg: String = msg.trim().to_string();
}
Ok(())
}
@ -111,7 +107,7 @@ We can "fix" it by waiting for the task to be joined, like this:
#
# async move |stream| {
let handle = task::spawn(connection_loop(stream));
handle.await?
handle.await
# };
```
@ -134,7 +130,7 @@ So let's use a helper function for this:
# };
fn spawn_and_log_error<F>(fut: F) -> task::JoinHandle<()>
where
F: Future<Output = Result<()>> + Send + 'static,
F: Future<Output = io::Result<()>> + Send + 'static,
{
task::spawn(async move {
if let Err(e) = fut.await {

@ -13,13 +13,14 @@ if Alice and Charley send two messages to Bob at the same time, Bob will see the
```rust,edition2018
# extern crate async_std;
# extern crate futures;
# extern crate futures_channel;
# extern crate futures_util;
# use async_std::{
# net::TcpStream,
# prelude::*,
# };
use futures::channel::mpsc; // 1
use futures::sink::SinkExt;
use futures_channel::mpsc; // 1
use futures_util::sink::SinkExt;
use std::sync::Arc;
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;

@ -12,7 +12,7 @@ After that, the client can send messages to other clients using the following sy
login1, login2, ... loginN: message
```
Each of the specified clients then receives a `from login: message` message.
Each of the specified clients than receives a `from login: message` message.
A possible session might look like this
@ -38,10 +38,18 @@ $ cargo new a-chat
$ cd a-chat
```
At the moment `async-std` requires Rust nightly, so let's add a rustup override for convenience:
```bash
$ rustup override add nightly
$ rustc --version
rustc 1.38.0-nightly (c4715198b 2019-08-05)
```
Add the following lines to `Cargo.toml`:
```toml
[dependencies]
futures = "0.3.0"
async-std = "1"
futures-preview = { version = "0.3.0-alpha.19", features = [ "async-await" ] }
async-std = "0.99"
```

@ -1,3 +1,6 @@
/* TODO: Once the next version of surf released, re-enable this example.
//! Sends an HTTP request to the Rust website.
use async_std::task;
fn main() -> Result<(), surf::Exception> {
@ -15,3 +18,6 @@ fn main() -> Result<(), surf::Exception> {
Ok(())
})
}
*/
fn main() {}

@ -14,9 +14,8 @@ use async_std::task;
async fn process(stream: TcpStream) -> io::Result<()> {
println!("Accepted from: {}", stream.peer_addr()?);
let mut reader = stream.clone();
let mut writer = stream;
io::copy(&mut reader, &mut writer).await?;
let (reader, writer) = &mut (&stream, &stream);
io::copy(reader, writer).await?;
Ok(())
}

@ -1,45 +0,0 @@
//! TCP echo server, accepting connections both on both ipv4 and ipv6 sockets.
//!
//! To send messages, do:
//!
//! ```sh
//! $ nc 127.0.0.1 8080
//! $ nc ::1 8080
//! ```
use async_std::io;
use async_std::net::{TcpListener, TcpStream};
use async_std::prelude::*;
use async_std::task;
async fn process(stream: TcpStream) -> io::Result<()> {
println!("Accepted from: {}", stream.peer_addr()?);
let mut reader = stream.clone();
let mut writer = stream;
io::copy(&mut reader, &mut writer).await?;
Ok(())
}
fn main() -> io::Result<()> {
task::block_on(async {
let ipv4_listener = TcpListener::bind("127.0.0.1:8080").await?;
println!("Listening on {}", ipv4_listener.local_addr()?);
let ipv6_listener = TcpListener::bind("[::1]:8080").await?;
println!("Listening on {}", ipv6_listener.local_addr()?);
let ipv4_incoming = ipv4_listener.incoming();
let ipv6_incoming = ipv6_listener.incoming();
let mut incoming = ipv4_incoming.merge(ipv6_incoming);
while let Some(stream) = incoming.next().await {
let stream = stream?;
task::spawn(async {
process(stream).await.unwrap();
});
}
Ok(())
})
}

@ -2,16 +2,13 @@ use std::collections::BinaryHeap;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
use crate::stream::{Extend, IntoStream};
impl<T: Ord + Send> stream::Extend<T> for BinaryHeap<T> {
fn extend<'a, S: IntoStream<Item = T> + 'a>(
impl<T: Ord> Extend<T> for BinaryHeap<T> {
fn stream_extend<'a, S: IntoStream<Item = T> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = ()> + 'a>> {
let stream = stream.into_stream();
self.reserve(stream.size_hint().0);

@ -1,22 +1,23 @@
use std::collections::BinaryHeap;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
use crate::stream::{Extend, FromStream, IntoStream};
impl<T: Ord + Send> FromStream<T> for BinaryHeap<T> {
impl<T: Ord> FromStream<T> for BinaryHeap<T> {
#[inline]
fn from_stream<'a, S: IntoStream<Item = T> + 'a>(
fn from_stream<'a, S: IntoStream<Item = T>>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
) -> Pin<Box<dyn core::future::Future<Output = Self> + 'a>>
where
<S as IntoStream>::IntoStream: Send,
<S as IntoStream>::IntoStream: 'a,
{
let stream = stream.into_stream();
Box::pin(async move {
pin_utils::pin_mut!(stream);
let mut out = BinaryHeap::new();
stream::extend(&mut out, stream).await;
out.stream_extend(stream).await;
out
})
}

@ -2,16 +2,13 @@ use std::collections::BTreeMap;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
use crate::stream::{Extend, IntoStream};
impl<K: Ord + Send, V: Send> stream::Extend<(K, V)> for BTreeMap<K, V> {
fn extend<'a, S: IntoStream<Item = (K, V)> + 'a>(
impl<K: Ord, V> Extend<(K, V)> for BTreeMap<K, V> {
fn stream_extend<'a, S: IntoStream<Item = (K, V)> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = ()> + 'a>> {
Box::pin(stream.into_stream().for_each(move |(k, v)| {
self.insert(k, v);
}))

@ -1,22 +1,23 @@
use std::collections::BTreeMap;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
use crate::stream::{Extend, FromStream, IntoStream};
impl<K: Ord + Send, V: Send> FromStream<(K, V)> for BTreeMap<K, V> {
impl<K: Ord, V> FromStream<(K, V)> for BTreeMap<K, V> {
#[inline]
fn from_stream<'a, S: IntoStream<Item = (K, V)> + 'a>(
fn from_stream<'a, S: IntoStream<Item = (K, V)>>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
) -> Pin<Box<dyn core::future::Future<Output = Self> + 'a>>
where
<S as IntoStream>::IntoStream: Send,
<S as IntoStream>::IntoStream: 'a,
{
let stream = stream.into_stream();
Box::pin(async move {
pin_utils::pin_mut!(stream);
let mut out = BTreeMap::new();
stream::extend(&mut out, stream).await;
out.stream_extend(stream).await;
out
})
}

@ -2,16 +2,13 @@ use std::collections::BTreeSet;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
use crate::stream::{Extend, IntoStream};
impl<T: Ord + Send> stream::Extend<T> for BTreeSet<T> {
fn extend<'a, S: IntoStream<Item = T> + 'a>(
impl<T: Ord> Extend<T> for BTreeSet<T> {
fn stream_extend<'a, S: IntoStream<Item = T> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = ()> + 'a>> {
Box::pin(stream.into_stream().for_each(move |item| {
self.insert(item);
}))

@ -1,22 +1,23 @@
use std::collections::BTreeSet;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
use crate::stream::{Extend, FromStream, IntoStream};
impl<T: Ord + Send> FromStream<T> for BTreeSet<T> {
impl<T: Ord> FromStream<T> for BTreeSet<T> {
#[inline]
fn from_stream<'a, S: IntoStream<Item = T> + 'a>(
fn from_stream<'a, S: IntoStream<Item = T>>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
) -> Pin<Box<dyn core::future::Future<Output = Self> + 'a>>
where
<S as IntoStream>::IntoStream: Send,
<S as IntoStream>::IntoStream: 'a,
{
let stream = stream.into_stream();
Box::pin(async move {
pin_utils::pin_mut!(stream);
let mut out = BTreeSet::new();
stream::extend(&mut out, stream).await;
out.stream_extend(stream).await;
out
})
}

@ -3,21 +3,17 @@ use std::hash::{BuildHasher, Hash};
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
use crate::stream::{Extend, IntoStream};
impl<K, V, H> stream::Extend<(K, V)> for HashMap<K, V, H>
impl<K, V, H> Extend<(K, V)> for HashMap<K, V, H>
where
K: Eq + Hash + Send,
V: Send,
H: BuildHasher + Default + Send,
K: Eq + Hash,
H: BuildHasher + Default,
{
fn extend<'a, S: IntoStream<Item = (K, V)> + 'a>(
fn stream_extend<'a, S: IntoStream<Item = (K, V)> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = ()> + 'a>> {
let stream = stream.into_stream();
// The following is adapted from the hashbrown source code:

@ -2,27 +2,27 @@ use std::collections::HashMap;
use std::hash::{BuildHasher, Hash};
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
use crate::stream::{Extend, FromStream, IntoStream};
impl<K, V, H> FromStream<(K, V)> for HashMap<K, V, H>
where
K: Eq + Hash + Send,
H: BuildHasher + Default + Send,
V: Send,
K: Eq + Hash,
H: BuildHasher + Default,
{
#[inline]
fn from_stream<'a, S: IntoStream<Item = (K, V)> + 'a>(
fn from_stream<'a, S: IntoStream<Item = (K, V)>>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
) -> Pin<Box<dyn core::future::Future<Output = Self> + 'a>>
where
<S as IntoStream>::IntoStream: Send,
<S as IntoStream>::IntoStream: 'a,
{
let stream = stream.into_stream();
Box::pin(async move {
pin_utils::pin_mut!(stream);
let mut out = HashMap::with_hasher(Default::default());
stream::extend(&mut out, stream).await;
out.stream_extend(stream).await;
out
})
}

@ -3,20 +3,17 @@ use std::hash::{BuildHasher, Hash};
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
use crate::stream::{Extend, IntoStream};
impl<T, H> stream::Extend<T> for HashSet<T, H>
impl<T, H> Extend<T> for HashSet<T, H>
where
T: Eq + Hash + Send,
H: BuildHasher + Default + Send,
T: Eq + Hash,
H: BuildHasher + Default,
{
fn extend<'a, S: IntoStream<Item = T> + 'a>(
fn stream_extend<'a, S: IntoStream<Item = T> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = ()> + 'a>> {
// The Extend impl for HashSet in the standard library delegates to the internal HashMap.
// Thus, this impl is just a copy of the async Extend impl for HashMap in this crate.

@ -2,26 +2,27 @@ use std::collections::HashSet;
use std::hash::{BuildHasher, Hash};
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
use crate::stream::{Extend, FromStream, IntoStream};
impl<T, H> FromStream<T> for HashSet<T, H>
where
T: Eq + Hash + Send,
H: BuildHasher + Default + Send,
T: Eq + Hash,
H: BuildHasher + Default,
{
#[inline]
fn from_stream<'a, S: IntoStream<Item = T> + 'a>(
fn from_stream<'a, S: IntoStream<Item = T>>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
) -> Pin<Box<dyn core::future::Future<Output = Self> + 'a>>
where
<S as IntoStream>::IntoStream: Send,
<S as IntoStream>::IntoStream: 'a,
{
let stream = stream.into_stream();
Box::pin(async move {
pin_utils::pin_mut!(stream);
let mut out = HashSet::with_hasher(Default::default());
stream::extend(&mut out, stream).await;
out.stream_extend(stream).await;
out
})
}

@ -2,16 +2,13 @@ use std::collections::LinkedList;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
use crate::stream::{Extend, IntoStream};
impl<T: Send> stream::Extend<T> for LinkedList<T> {
fn extend<'a, S: IntoStream<Item = T> + 'a>(
impl<T> Extend<T> for LinkedList<T> {
fn stream_extend<'a, S: IntoStream<Item = T> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = ()> + 'a>> {
let stream = stream.into_stream();
Box::pin(stream.for_each(move |item| self.push_back(item)))
}

@ -1,22 +1,23 @@
use std::collections::LinkedList;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
use crate::stream::{Extend, FromStream, IntoStream};
impl<T: Send> FromStream<T> for LinkedList<T> {
impl<T> FromStream<T> for LinkedList<T> {
#[inline]
fn from_stream<'a, S: IntoStream<Item = T> + 'a>(
fn from_stream<'a, S: IntoStream<Item = T>>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
) -> Pin<Box<dyn core::future::Future<Output = Self> + 'a>>
where
<S as IntoStream>::IntoStream: Send,
<S as IntoStream>::IntoStream: 'a,
{
let stream = stream.into_stream();
Box::pin(async move {
pin_utils::pin_mut!(stream);
let mut out = LinkedList::new();
stream::extend(&mut out, stream).await;
out.stream_extend(stream).await;
out
})
}

@ -2,16 +2,13 @@ use std::collections::VecDeque;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
use crate::stream::{Extend, IntoStream};
impl<T: Send> stream::Extend<T> for VecDeque<T> {
fn extend<'a, S: IntoStream<Item = T> + 'a>(
impl<T> Extend<T> for VecDeque<T> {
fn stream_extend<'a, S: IntoStream<Item = T> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
) -> Pin<Box<dyn Future<Output = ()> + 'a>> {
let stream = stream.into_stream();
self.reserve(stream.size_hint().0);

@ -1,22 +1,23 @@
use std::collections::VecDeque;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
use crate::stream::{Extend, FromStream, IntoStream};
impl<T: Send> FromStream<T> for VecDeque<T> {
impl<T> FromStream<T> for VecDeque<T> {
#[inline]
fn from_stream<'a, S: IntoStream<Item = T> + 'a>(
fn from_stream<'a, S: IntoStream<Item = T>>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
) -> Pin<Box<dyn core::future::Future<Output = Self> + 'a>>
where
<S as IntoStream>::IntoStream: Send,
<S as IntoStream>::IntoStream: 'a,
{
let stream = stream.into_stream();
Box::pin(async move {
pin_utils::pin_mut!(stream);
let mut out = VecDeque::new();
stream::extend(&mut out, stream).await;
out.stream_extend(stream).await;
out
})
}

@ -1,7 +1,6 @@
use crate::io;
use crate::path::{Path, PathBuf};
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use crate::task::blocking;
/// Returns the canonical form of a path.
///
@ -33,10 +32,5 @@ use crate::utils::Context as _;
/// ```
pub async fn canonicalize<P: AsRef<Path>>(path: P) -> io::Result<PathBuf> {
let path = path.as_ref().to_owned();
spawn_blocking(move || {
std::fs::canonicalize(&path)
.map(Into::into)
.context(|| format!("could not canonicalize `{}`", path.display()))
})
.await
blocking::spawn(move || std::fs::canonicalize(&path).map(Into::into)).await
}

@ -1,7 +1,6 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use crate::task::blocking;
/// Copies the contents and permissions of a file to a new location.
///
@ -42,9 +41,5 @@ use crate::utils::Context as _;
pub async fn copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<u64> {
let from = from.as_ref().to_owned();
let to = to.as_ref().to_owned();
spawn_blocking(move || {
std::fs::copy(&from, &to)
.context(|| format!("could not copy `{}` to `{}`", from.display(), to.display()))
})
.await
blocking::spawn(move || std::fs::copy(&from, &to)).await
}

@ -1,7 +1,6 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use crate::task::blocking;
/// Creates a new directory.
///
@ -35,9 +34,5 @@ use crate::utils::Context as _;
/// ```
pub async fn create_dir<P: AsRef<Path>>(path: P) -> io::Result<()> {
let path = path.as_ref().to_owned();
spawn_blocking(move || {
std::fs::create_dir(&path)
.context(|| format!("could not create directory `{}`", path.display()))
})
.await
blocking::spawn(move || std::fs::create_dir(path)).await
}

@ -1,7 +1,6 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use crate::task::blocking;
/// Creates a new directory and all of its parents if they are missing.
///
@ -30,9 +29,5 @@ use crate::utils::Context as _;
/// ```
pub async fn create_dir_all<P: AsRef<Path>>(path: P) -> io::Result<()> {
let path = path.as_ref().to_owned();
spawn_blocking(move || {
std::fs::create_dir_all(&path)
.context(|| format!("could not create directory path `{}`", path.display()))
})
.await
blocking::spawn(move || std::fs::create_dir_all(path)).await
}

@ -2,7 +2,7 @@ use std::future::Future;
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::task::blocking;
/// A builder for creating directories with configurable options.
///
@ -107,7 +107,7 @@ impl DirBuilder {
}
let path = path.as_ref().to_owned();
async move { spawn_blocking(move || builder.create(path)).await }
async move { blocking::spawn(move || builder.create(path)).await }
}
}

@ -5,7 +5,7 @@ use std::sync::Arc;
use crate::fs::{FileType, Metadata};
use crate::io;
use crate::path::PathBuf;
use crate::task::spawn_blocking;
use crate::task::blocking;
/// An entry in a directory.
///
@ -87,7 +87,7 @@ impl DirEntry {
/// ```
pub async fn metadata(&self) -> io::Result<Metadata> {
let inner = self.0.clone();
spawn_blocking(move || inner.metadata()).await
blocking::spawn(move || inner.metadata()).await
}
/// Reads the file type for this entry.
@ -125,7 +125,7 @@ impl DirEntry {
/// ```
pub async fn file_type(&self) -> io::Result<FileType> {
let inner = self.0.clone();
spawn_blocking(move || inner.file_type()).await
blocking::spawn(move || inner.file_type()).await
}
/// Returns the bare name of this entry without the leading path.
@ -158,12 +158,6 @@ impl fmt::Debug for DirEntry {
}
}
impl Clone for DirEntry {
fn clone(&self) -> Self {
DirEntry(self.0.clone())
}
}
cfg_unix! {
use crate::os::unix::fs::DirEntryExt;

@ -12,8 +12,7 @@ use crate::future;
use crate::io::{self, Read, Seek, SeekFrom, Write};
use crate::path::Path;
use crate::prelude::*;
use crate::task::{spawn_blocking, Context, Poll, Waker};
use crate::utils::Context as _;
use crate::task::{self, blocking, Context, Poll, Waker};
/// An open file on the filesystem.
///
@ -113,10 +112,7 @@ impl File {
/// ```
pub async fn open<P: AsRef<Path>>(path: P) -> io::Result<File> {
let path = path.as_ref().to_owned();
let file = spawn_blocking(move || {
std::fs::File::open(&path).context(|| format!("could not open `{}`", path.display()))
})
.await?;
let file = blocking::spawn(move || std::fs::File::open(&path)).await?;
Ok(File::new(file, true))
}
@ -151,11 +147,7 @@ impl File {
/// ```
pub async fn create<P: AsRef<Path>>(path: P) -> io::Result<File> {
let path = path.as_ref().to_owned();
let file = spawn_blocking(move || {
std::fs::File::create(&path)
.context(|| format!("could not create `{}`", path.display()))
})
.await?;
let file = blocking::spawn(move || std::fs::File::create(&path)).await?;
Ok(File::new(file, true))
}
@ -188,7 +180,7 @@ impl File {
})
.await?;
spawn_blocking(move || state.file.sync_all()).await
blocking::spawn(move || state.file.sync_all()).await
}
/// Synchronizes OS-internal buffered contents to disk.
@ -224,7 +216,7 @@ impl File {
})
.await?;
spawn_blocking(move || state.file.sync_data()).await
blocking::spawn(move || state.file.sync_data()).await
}
/// Truncates or extends the file.
@ -257,7 +249,7 @@ impl File {
})
.await?;
spawn_blocking(move || state.file.set_len(size)).await
blocking::spawn(move || state.file.set_len(size)).await
}
/// Reads the file's metadata.
@ -276,7 +268,7 @@ impl File {
/// ```
pub async fn metadata(&self) -> io::Result<Metadata> {
let file = self.file.clone();
spawn_blocking(move || file.metadata()).await
blocking::spawn(move || file.metadata()).await
}
/// Changes the permissions on the file.
@ -305,7 +297,7 @@ impl File {
/// ```
pub async fn set_permissions(&self, perm: Permissions) -> io::Result<()> {
let file = self.file.clone();
spawn_blocking(move || file.set_permissions(perm)).await
blocking::spawn(move || file.set_permissions(perm)).await
}
}
@ -315,7 +307,7 @@ impl Drop for File {
// non-blocking fashion, but our only other option here is losing data remaining in the
// write cache. Good task schedulers should be resilient to occasional blocking hiccups in
// file destructors so we don't expect this to be a common problem in practice.
let _ = smol::block_on(self.flush());
let _ = task::block_on(self.flush());
}
}
@ -673,7 +665,7 @@ impl LockGuard<State> {
if available > 0 || self.cache.is_empty() {
// Copy data from the cache into the buffer.
let n = cmp::min(available, buf.len());
buf[..n].copy_from_slice(&self.cache[start..(start + n)]);
buf[..n].copy_from_slice(&self.cache[start..n]);
// Move the read cursor forward.
self.mode = Mode::Reading(start + n);
@ -700,7 +692,7 @@ impl LockGuard<State> {
self.register(cx);
// Start a read operation asynchronously.
spawn_blocking(move || {
blocking::spawn(move || {
// Read some data from the file into the cache.
let res = {
let State { file, cache, .. } = &mut *self;
@ -741,10 +733,7 @@ impl LockGuard<State> {
if n > 0 {
// Seek `n` bytes backwards. This call should not block because it only changes
// the internal offset into the file and doesn't touch the actual file on disk.
//
// We ignore errors here because special files like `/dev/random` are not
// seekable.
let _ = (&*self.file).seek(SeekFrom::Current(-(n as i64)));
(&*self.file).seek(SeekFrom::Current(-(n as i64)))?;
}
// Switch to idle mode.
@ -812,7 +801,7 @@ impl LockGuard<State> {
self.register(cx);
// Start a write operation asynchronously.
spawn_blocking(move || {
blocking::spawn(move || {
match (&*self.file).write_all(&self.cache) {
Ok(_) => {
// Switch to idle mode.
@ -845,7 +834,7 @@ impl LockGuard<State> {
self.register(cx);
// Start a flush operation asynchronously.
spawn_blocking(move || {
blocking::spawn(move || {
match (&*self.file).flush() {
Ok(()) => {
// Mark the file as flushed.
@ -867,15 +856,3 @@ impl LockGuard<State> {
Poll::Ready(Ok(()))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn async_file_drop() {
crate::task::block_on(async move {
File::open(file!()).await.unwrap();
});
}
}

@ -40,7 +40,7 @@ cfg_docs! {
/// # Ok(()) }) }
/// ```
pub fn is_dir(&self) -> bool {
unreachable!("this impl only appears in the rendered docs")
unimplemented!()
}
/// Returns `true` if this file type represents a regular file.
@ -60,7 +60,7 @@ cfg_docs! {
/// # Ok(()) }) }
/// ```
pub fn is_file(&self) -> bool {
unreachable!("this impl only appears in the rendered docs")
unimplemented!()
}
/// Returns `true` if this file type represents a symbolic link.
@ -78,7 +78,7 @@ cfg_docs! {
/// # Ok(()) }) }
/// ```
pub fn is_symlink(&self) -> bool {
unreachable!("this impl only appears in the rendered docs")
unimplemented!()
}
}
}

@ -1,7 +1,6 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use crate::task::blocking;
/// Creates a hard link on the filesystem.
///
@ -33,14 +32,5 @@ use crate::utils::Context as _;
pub async fn hard_link<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<()> {
let from = from.as_ref().to_owned();
let to = to.as_ref().to_owned();
spawn_blocking(move || {
std::fs::hard_link(&from, &to).context(|| {
format!(
"could not create a hard link from `{}` to `{}`",
from.display(),
to.display()
)
})
})
.await
blocking::spawn(move || std::fs::hard_link(&from, &to)).await
}

@ -1,6 +1,6 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::task::blocking;
/// Reads metadata for a path.
///
@ -34,7 +34,7 @@ use crate::task::spawn_blocking;
/// ```
pub async fn metadata<P: AsRef<Path>>(path: P) -> io::Result<Metadata> {
let path = path.as_ref().to_owned();
spawn_blocking(move || std::fs::metadata(path)).await
blocking::spawn(move || std::fs::metadata(path)).await
}
cfg_not_docs! {
@ -78,7 +78,7 @@ cfg_docs! {
/// # Ok(()) }) }
/// ```
pub fn file_type(&self) -> FileType {
unreachable!("this impl only appears in the rendered docs")
unimplemented!()
}
/// Returns `true` if this metadata is for a regular directory.
@ -98,7 +98,7 @@ cfg_docs! {
/// # Ok(()) }) }
/// ```
pub fn is_dir(&self) -> bool {
unreachable!("this impl only appears in the rendered docs")
unimplemented!()
}
/// Returns `true` if this metadata is for a regular file.
@ -118,7 +118,7 @@ cfg_docs! {
/// # Ok(()) }) }
/// ```
pub fn is_file(&self) -> bool {
unreachable!("this impl only appears in the rendered docs")
unimplemented!()
}
/// Returns the file size in bytes.
@ -136,7 +136,7 @@ cfg_docs! {
/// # Ok(()) }) }
/// ```
pub fn len(&self) -> u64 {
unreachable!("this impl only appears in the rendered docs")
unimplemented!()
}
/// Returns the permissions from this metadata.
@ -154,7 +154,7 @@ cfg_docs! {
/// # Ok(()) }) }
/// ```
pub fn permissions(&self) -> Permissions {
unreachable!("this impl only appears in the rendered docs")
unimplemented!()
}
/// Returns the last modification time.
@ -177,7 +177,7 @@ cfg_docs! {
/// # Ok(()) }) }
/// ```
pub fn modified(&self) -> io::Result<SystemTime> {
unreachable!("this impl only appears in the rendered docs")
unimplemented!()
}
/// Returns the last access time.
@ -200,7 +200,7 @@ cfg_docs! {
/// # Ok(()) }) }
/// ```
pub fn accessed(&self) -> io::Result<SystemTime> {
unreachable!("this impl only appears in the rendered docs")
unimplemented!()
}
/// Returns the creation time.
@ -223,7 +223,7 @@ cfg_docs! {
/// # Ok(()) }) }
/// ```
pub fn created(&self) -> io::Result<SystemTime> {
unreachable!("this impl only appears in the rendered docs")
unimplemented!()
}
}
}

@ -3,13 +3,11 @@
//! This module is an async version of [`std::fs`].
//!
//! [`os::unix::fs`]: ../os/unix/fs/index.html
//! [`os::windows::fs`]: ../os/windows/fs/index.html
//! [`std::fs`]: https://doc.rust-lang.org/std/fs/index.html
//!
//! # Platform-specific extensions
//!
//! * Unix: use the [`os::unix::fs`] module.
//! * Windows: use the [`os::windows::fs`] module.
//!
//! # Examples
//!

@ -3,7 +3,7 @@ use std::future::Future;
use crate::fs::File;
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::task::blocking;
/// A builder for opening files with configurable options.
///
@ -285,7 +285,7 @@ impl OpenOptions {
let path = path.as_ref().to_owned();
let options = self.0.clone();
async move {
let file = spawn_blocking(move || options.open(path)).await?;
let file = blocking::spawn(move || options.open(path)).await?;
Ok(File::new(file, true))
}
}

@ -29,7 +29,7 @@ cfg_docs! {
/// # Ok(()) }) }
/// ```
pub fn readonly(&self) -> bool {
unreachable!("this impl only appears in the rendered docs")
unimplemented!()
}
/// Configures the read-only flag.
@ -50,7 +50,7 @@ cfg_docs! {
/// # Ok(()) }) }
/// ```
pub fn set_readonly(&mut self, readonly: bool) {
unreachable!("this impl only appears in the rendered docs")
unimplemented!()
}
}
}

@ -1,7 +1,6 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use crate::task::blocking;
/// Reads the entire contents of a file as raw bytes.
///
@ -37,8 +36,5 @@ use crate::utils::Context as _;
/// ```
pub async fn read<P: AsRef<Path>>(path: P) -> io::Result<Vec<u8>> {
let path = path.as_ref().to_owned();
spawn_blocking(move || {
std::fs::read(&path).context(|| format!("could not read file `{}`", path.display()))
})
.await
blocking::spawn(move || std::fs::read(path)).await
}

@ -1,12 +1,11 @@
use std::future::Future;
use std::pin::Pin;
use crate::fs::DirEntry;
use crate::future::Future;
use crate::io;
use crate::path::Path;
use crate::stream::Stream;
use crate::task::{spawn_blocking, Context, JoinHandle, Poll};
use crate::utils::Context as _;
use crate::task::{blocking, Context, JoinHandle, Poll};
/// Returns a stream of entries in a directory.
///
@ -46,12 +45,9 @@ use crate::utils::Context as _;
/// ```
pub async fn read_dir<P: AsRef<Path>>(path: P) -> io::Result<ReadDir> {
let path = path.as_ref().to_owned();
spawn_blocking(move || {
std::fs::read_dir(&path)
.context(|| format!("could not read directory `{}`", path.display()))
})
.await
.map(ReadDir::new)
blocking::spawn(move || std::fs::read_dir(path))
.await
.map(ReadDir::new)
}
/// A stream of entries in a directory.
@ -95,7 +91,7 @@ impl Stream for ReadDir {
let mut inner = opt.take().unwrap();
// Start the operation asynchronously.
self.0 = State::Busy(spawn_blocking(move || {
self.0 = State::Busy(blocking::spawn(move || {
let next = inner.next();
(inner, next)
}));

@ -1,7 +1,6 @@
use crate::io;
use crate::path::{Path, PathBuf};
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use crate::task::blocking;
/// Reads a symbolic link and returns the path it points to.
///
@ -29,10 +28,5 @@ use crate::utils::Context as _;
/// ```
pub async fn read_link<P: AsRef<Path>>(path: P) -> io::Result<PathBuf> {
let path = path.as_ref().to_owned();
spawn_blocking(move || {
std::fs::read_link(&path)
.map(Into::into)
.context(|| format!("could not read link `{}`", path.display()))
})
.await
blocking::spawn(move || std::fs::read_link(path).map(Into::into)).await
}

@ -1,7 +1,6 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use crate::task::blocking;
/// Reads the entire contents of a file as a string.
///
@ -38,9 +37,5 @@ use crate::utils::Context as _;
/// ```
pub async fn read_to_string<P: AsRef<Path>>(path: P) -> io::Result<String> {
let path = path.as_ref().to_owned();
spawn_blocking(move || {
std::fs::read_to_string(&path)
.context(|| format!("could not read file `{}`", path.display()))
})
.await
blocking::spawn(move || std::fs::read_to_string(path)).await
}

@ -1,7 +1,6 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use crate::task::blocking;
/// Removes an empty directory.
///
@ -30,9 +29,5 @@ use crate::utils::Context as _;
/// ```
pub async fn remove_dir<P: AsRef<Path>>(path: P) -> io::Result<()> {
let path = path.as_ref().to_owned();
spawn_blocking(move || {
std::fs::remove_dir(&path)
.context(|| format!("could not remove directory `{}`", path.display()))
})
.await
blocking::spawn(move || std::fs::remove_dir(path)).await
}

@ -1,7 +1,6 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use crate::task::blocking;
/// Removes a directory and all of its contents.
///
@ -30,9 +29,5 @@ use crate::utils::Context as _;
/// ```
pub async fn remove_dir_all<P: AsRef<Path>>(path: P) -> io::Result<()> {
let path = path.as_ref().to_owned();
spawn_blocking(move || {
std::fs::remove_dir_all(&path)
.context(|| format!("could not remove directory `{}`", path.display()))
})
.await
blocking::spawn(move || std::fs::remove_dir_all(path)).await
}

@ -1,7 +1,6 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use crate::task::blocking;
/// Removes a file.
///
@ -30,9 +29,5 @@ use crate::utils::Context as _;
/// ```
pub async fn remove_file<P: AsRef<Path>>(path: P) -> io::Result<()> {
let path = path.as_ref().to_owned();
spawn_blocking(move || {
std::fs::remove_file(&path)
.context(|| format!("could not remove file `{}`", path.display()))
})
.await
blocking::spawn(move || std::fs::remove_file(path)).await
}

@ -1,7 +1,6 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use crate::task::blocking;
/// Renames a file or directory to a new location.
///
@ -35,14 +34,5 @@ use crate::utils::Context as _;
pub async fn rename<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<()> {
let from = from.as_ref().to_owned();
let to = to.as_ref().to_owned();
spawn_blocking(move || {
std::fs::rename(&from, &to).context(|| {
format!(
"could not rename `{}` to `{}`",
from.display(),
to.display()
)
})
})
.await
blocking::spawn(move || std::fs::rename(&from, &to)).await
}

@ -1,7 +1,7 @@
use crate::fs::Permissions;
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::task::blocking;
/// Changes the permissions of a file or directory.
///
@ -32,5 +32,5 @@ use crate::task::spawn_blocking;
/// ```
pub async fn set_permissions<P: AsRef<Path>>(path: P, perm: Permissions) -> io::Result<()> {
let path = path.as_ref().to_owned();
spawn_blocking(move || std::fs::set_permissions(path, perm)).await
blocking::spawn(move || std::fs::set_permissions(path, perm)).await
}

@ -1,7 +1,7 @@
use crate::fs::Metadata;
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::task::blocking;
/// Reads metadata for a path without following symbolic links.
///
@ -34,5 +34,5 @@ use crate::task::spawn_blocking;
/// ```
pub async fn symlink_metadata<P: AsRef<Path>>(path: P) -> io::Result<Metadata> {
let path = path.as_ref().to_owned();
spawn_blocking(move || std::fs::symlink_metadata(path)).await
blocking::spawn(move || std::fs::symlink_metadata(path)).await
}

@ -1,7 +1,6 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use crate::task::blocking;
/// Writes a slice of bytes as the new contents of a file.
///
@ -34,9 +33,5 @@ use crate::utils::Context as _;
pub async fn write<P: AsRef<Path>, C: AsRef<[u8]>>(path: P, contents: C) -> io::Result<()> {
let path = path.as_ref().to_owned();
let contents = contents.as_ref().to_owned();
spawn_blocking(move || {
std::fs::write(&path, contents)
.context(|| format!("could not write to file `{}`", path.display()))
})
.await
blocking::spawn(move || std::fs::write(path, contents)).await
}

@ -0,0 +1,169 @@
cfg_unstable! {
mod delay;
use std::time::Duration;
use delay::DelayFuture;
}
extension_trait! {
use std::pin::Pin;
use std::ops::{Deref, DerefMut};
use crate::task::{Context, Poll};
#[doc = r#"
A future represents an asynchronous computation.
A future is a value that may not have finished computing yet. This kind of
"asynchronous value" makes it possible for a thread to continue doing useful
work while it waits for the value to become available.
# The `poll` method
The core method of future, `poll`, *attempts* to resolve the future into a
final value. This method does not block if the value is not ready. Instead,
the current task is scheduled to be woken up when it's possible to make
further progress by `poll`ing again. The `context` passed to the `poll`
method can provide a [`Waker`], which is a handle for waking up the current
task.
When using a future, you generally won't call `poll` directly, but instead
`.await` the value.
[`Waker`]: ../task/struct.Waker.html
"#]
pub trait Future {
#[doc = r#"
The type of value produced on completion.
"#]
type Output;
#[doc = r#"
Attempt to resolve the future to a final value, registering
the current task for wakeup if the value is not yet available.
# Return value
This function returns:
- [`Poll::Pending`] if the future is not ready yet
- [`Poll::Ready(val)`] with the result `val` of this future if it
finished successfully.
Once a future has finished, clients should not `poll` it again.
When a future is not ready yet, `poll` returns `Poll::Pending` and
stores a clone of the [`Waker`] copied from the current [`Context`].
This [`Waker`] is then woken once the future can make progress.
For example, a future waiting for a socket to become
readable would call `.clone()` on the [`Waker`] and store it.
When a signal arrives elsewhere indicating that the socket is readable,
[`Waker::wake`] is called and the socket future's task is awoken.
Once a task has been woken up, it should attempt to `poll` the future
again, which may or may not produce a final value.
Note that on multiple calls to `poll`, only the [`Waker`] from the
[`Context`] passed to the most recent call should be scheduled to
receive a wakeup.
# Runtime characteristics
Futures alone are *inert*; they must be *actively* `poll`ed to make
progress, meaning that each time the current task is woken up, it should
actively re-`poll` pending futures that it still has an interest in.
The `poll` function is not called repeatedly in a tight loop -- instead,
it should only be called when the future indicates that it is ready to
make progress (by calling `wake()`). If you're familiar with the
`poll(2)` or `select(2)` syscalls on Unix it's worth noting that futures
typically do *not* suffer the same problems of "all wakeups must poll
all events"; they are more like `epoll(4)`.
An implementation of `poll` should strive to return quickly, and should
not block. Returning quickly prevents unnecessarily clogging up
threads or event loops. If it is known ahead of time that a call to
`poll` may end up taking awhile, the work should be offloaded to a
thread pool (or something similar) to ensure that `poll` can return
quickly.
# Panics
Once a future has completed (returned `Ready` from `poll`), calling its
`poll` method again may panic, block forever, or cause other kinds of
problems; the `Future` trait places no requirements on the effects of
such a call. However, as the `poll` method is not marked `unsafe`,
Rust's usual rules apply: calls must never cause undefined behavior
(memory corruption, incorrect use of `unsafe` functions, or the like),
regardless of the future's state.
[`Poll::Pending`]: ../task/enum.Poll.html#variant.Pending
[`Poll::Ready(val)`]: ../task/enum.Poll.html#variant.Ready
[`Context`]: ../task/struct.Context.html
[`Waker`]: ../task/struct.Waker.html
[`Waker::wake`]: ../task/struct.Waker.html#method.wake
"#]
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output>;
}
pub trait FutureExt: std::future::Future {
/// Returns a Future that delays execution for a specified time.
///
/// # Examples
///
/// ```
/// # async_std::task::block_on(async {
/// use async_std::prelude::*;
/// use async_std::future;
/// use std::time::Duration;
///
/// let a = future::ready(1).delay(Duration::from_millis(2000));
/// dbg!(a.await);
/// # })
/// ```
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
#[cfg(any(feature = "unstable", feature = "docs"))]
fn delay(self, dur: Duration) -> impl Future<Output = Self::Output> [DelayFuture<Self>]
where
Self: Future + Sized
{
DelayFuture::new(self, dur)
}
}
impl<F: Future + Unpin + ?Sized> Future for Box<F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
unreachable!("this impl only appears in the rendered docs")
}
}
impl<F: Future + Unpin + ?Sized> Future for &mut F {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
unreachable!("this impl only appears in the rendered docs")
}
}
impl<P> Future for Pin<P>
where
P: DerefMut + Unpin,
<P as Deref>::Target: Future,
{
type Output = <<P as Deref>::Target as Future>::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
unreachable!("this impl only appears in the rendered docs")
}
}
impl<F: Future> Future for std::panic::AssertUnwindSafe<F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
unreachable!("this impl only appears in the rendered docs")
}
}
}

@ -1,26 +1,26 @@
use std::future::Future;
use std::pin::Pin;
use std::time::Duration;
use futures_timer::Delay;
use pin_project_lite::pin_project;
use crate::future::Future;
use crate::task::{Context, Poll};
use crate::utils::{timer_after, Timer};
pin_project! {
#[doc(hidden)]
#[allow(missing_debug_implementations)]
#[derive(Debug)]
pub struct DelayFuture<F> {
#[pin]
future: F,
#[pin]
delay: Timer,
delay: Delay,
}
}
impl<F> DelayFuture<F> {
pub fn new(future: F, dur: Duration) -> DelayFuture<F> {
let delay = timer_after(dur);
let delay = Delay::new(dur);
DelayFuture { future, delay }
}

@ -1,52 +0,0 @@
use std::future::Future;
use std::pin::Pin;
use crate::future::IntoFuture;
use crate::task::{ready, Context, Poll};
#[doc(hidden)]
#[allow(missing_debug_implementations)]
pub struct FlattenFuture<Fut1, Fut2> {
state: State<Fut1, Fut2>,
}
#[derive(Debug)]
enum State<Fut1, Fut2> {
First(Fut1),
Second(Fut2),
Empty,
}
impl<Fut1, Fut2> FlattenFuture<Fut1, Fut2> {
pub(crate) fn new(future: Fut1) -> FlattenFuture<Fut1, Fut2> {
FlattenFuture {
state: State::First(future),
}
}
}
impl<Fut1> Future for FlattenFuture<Fut1, <Fut1::Output as IntoFuture>::Future>
where
Fut1: Future,
Fut1::Output: IntoFuture,
{
type Output = <Fut1::Output as IntoFuture>::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let Self { state } = unsafe { self.get_unchecked_mut() };
loop {
match state {
State::First(fut1) => {
let fut2 = ready!(unsafe { Pin::new_unchecked(fut1) }.poll(cx)).into_future();
*state = State::Second(fut2);
}
State::Second(fut2) => {
let v = ready!(unsafe { Pin::new_unchecked(fut2) }.poll(cx));
*state = State::Empty;
return Poll::Ready(v);
}
State::Empty => panic!("polled a completed future"),
}
}
}
}

@ -1,60 +0,0 @@
use std::pin::Pin;
use crate::future::MaybeDone;
use pin_project_lite::pin_project;
use crate::task::{Context, Poll};
use std::future::Future;
pin_project! {
#[allow(missing_docs)]
#[allow(missing_debug_implementations)]
pub struct Join<L, R>
where
L: Future,
R: Future,
{
#[pin] left: MaybeDone<L>,
#[pin] right: MaybeDone<R>,
}
}
impl<L, R> Join<L, R>
where
L: Future,
R: Future,
{
pub(crate) fn new(left: L, right: R) -> Self {
Self {
left: MaybeDone::new(left),
right: MaybeDone::new(right),
}
}
}
impl<L, R> Future for Join<L, R>
where
L: Future,
R: Future,
{
type Output = (L::Output, R::Output);
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let mut left = this.left;
let mut right = this.right;
let is_left_ready = Future::poll(Pin::new(&mut left), cx).is_ready();
if is_left_ready && right.as_ref().output().is_some() {
return Poll::Ready((left.take().unwrap(), right.take().unwrap()));
}
let is_right_ready = Future::poll(Pin::new(&mut right), cx).is_ready();
if is_right_ready && left.as_ref().output().is_some() {
return Poll::Ready((left.take().unwrap(), right.take().unwrap()));
}
Poll::Pending
}
}

@ -1,432 +0,0 @@
cfg_unstable! {
mod delay;
mod flatten;
mod race;
mod try_race;
mod join;
mod try_join;
use std::time::Duration;
use delay::DelayFuture;
use flatten::FlattenFuture;
use crate::future::IntoFuture;
use race::Race;
use try_race::TryRace;
use join::Join;
use try_join::TryJoin;
}
cfg_unstable_default! {
use crate::future::timeout::TimeoutFuture;
}
extension_trait! {
use core::pin::Pin;
use core::ops::{Deref, DerefMut};
use crate::task::{Context, Poll};
#[doc = r#"
A future represents an asynchronous computation.
A future is a value that may not have finished computing yet. This kind of
"asynchronous value" makes it possible for a thread to continue doing useful
work while it waits for the value to become available.
The [provided methods] do not really exist in the trait itself, but they become
available when [`FutureExt`] from the [prelude] is imported:
```
# #[allow(unused_imports)]
use async_std::prelude::*;
```
# The `poll` method
The core method of future, `poll`, *attempts* to resolve the future into a
final value. This method does not block if the value is not ready. Instead,
the current task is scheduled to be woken up when it's possible to make
further progress by `poll`ing again. The `context` passed to the `poll`
method can provide a [`Waker`], which is a handle for waking up the current
task.
When using a future, you generally won't call `poll` directly, but instead
`.await` the value.
[`Waker`]: ../task/struct.Waker.html
[provided methods]: #provided-methods
[`FutureExt`]: ../prelude/trait.FutureExt.html
[prelude]: ../prelude/index.html
"#]
pub trait Future {
#[doc = r#"
The type of value produced on completion.
"#]
type Output;
#[doc = r#"
Attempt to resolve the future to a final value, registering
the current task for wakeup if the value is not yet available.
# Return value
This function returns:
- [`Poll::Pending`] if the future is not ready yet
- [`Poll::Ready(val)`] with the result `val` of this future if it
finished successfully.
Once a future has finished, clients should not `poll` it again.
When a future is not ready yet, `poll` returns `Poll::Pending` and
stores a clone of the [`Waker`] copied from the current [`Context`].
This [`Waker`] is then woken once the future can make progress.
For example, a future waiting for a socket to become
readable would call `.clone()` on the [`Waker`] and store it.
When a signal arrives elsewhere indicating that the socket is readable,
[`Waker::wake`] is called and the socket future's task is awoken.
Once a task has been woken up, it should attempt to `poll` the future
again, which may or may not produce a final value.
Note that on multiple calls to `poll`, only the [`Waker`] from the
[`Context`] passed to the most recent call should be scheduled to
receive a wakeup.
# Runtime characteristics
Futures alone are *inert*; they must be *actively* `poll`ed to make
progress, meaning that each time the current task is woken up, it should
actively re-`poll` pending futures that it still has an interest in.
The `poll` function is not called repeatedly in a tight loop -- instead,
it should only be called when the future indicates that it is ready to
make progress (by calling `wake()`). If you're familiar with the
`poll(2)` or `select(2)` syscalls on Unix it's worth noting that futures
typically do *not* suffer the same problems of "all wakeups must poll
all events"; they are more like `epoll(4)`.
An implementation of `poll` should strive to return quickly, and should
not block. Returning quickly prevents unnecessarily clogging up
threads or event loops. If it is known ahead of time that a call to
`poll` may end up taking awhile, the work should be offloaded to a
thread pool (or something similar) to ensure that `poll` can return
quickly.
# Panics
Once a future has completed (returned `Ready` from `poll`), calling its
`poll` method again may panic, block forever, or cause other kinds of
problems; the `Future` trait places no requirements on the effects of
such a call. However, as the `poll` method is not marked `unsafe`,
Rust's usual rules apply: calls must never cause undefined behavior
(memory corruption, incorrect use of `unsafe` functions, or the like),
regardless of the future's state.
[`Poll::Pending`]: ../task/enum.Poll.html#variant.Pending
[`Poll::Ready(val)`]: ../task/enum.Poll.html#variant.Ready
[`Context`]: ../task/struct.Context.html
[`Waker`]: ../task/struct.Waker.html
[`Waker::wake`]: ../task/struct.Waker.html#method.wake
"#]
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output>;
}
#[doc = r#"
Extension methods for [`Future`].
[`Future`]: ../future/trait.Future.html
"#]
pub trait FutureExt: core::future::Future {
/// Returns a Future that delays execution for a specified time.
///
/// # Examples
///
/// ```
/// # async_std::task::block_on(async {
/// use async_std::prelude::*;
/// use async_std::future;
/// use std::time::Duration;
///
/// let a = future::ready(1).delay(Duration::from_millis(2000));
/// dbg!(a.await);
/// # })
/// ```
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
fn delay(self, dur: Duration) -> impl Future<Output = Self::Output> [DelayFuture<Self>]
where
Self: Sized,
{
DelayFuture::new(self, dur)
}
/// Flatten out the execution of this future when the result itself
/// can be converted into another future.
///
/// # Examples
///
/// ```
/// # async_std::task::block_on(async {
/// use async_std::prelude::*;
///
/// let nested_future = async { async { 1 } };
/// let future = nested_future.flatten();
/// assert_eq!(future.await, 1);
/// # })
/// ```
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
fn flatten(
self,
) -> impl Future<Output = <Self::Output as IntoFuture>::Output>
[FlattenFuture<Self, <Self::Output as IntoFuture>::Future>]
where
Self: Sized,
<Self as Future>::Output: IntoFuture,
{
FlattenFuture::new(self)
}
#[doc = r#"
Waits for one of two similarly-typed futures to complete.
Awaits multiple futures simultaneously, returning the output of the
first future that completes.
This function will return a new future which awaits for either one of both
futures to complete. If multiple futures are completed at the same time,
resolution will occur in the order that they have been passed.
Note that this function consumes all futures passed, and once a future is
completed, all other futures are dropped.
# Examples
```
# async_std::task::block_on(async {
use async_std::prelude::*;
use async_std::future;
let a = future::pending();
let b = future::ready(1u8);
let c = future::ready(2u8);
let f = a.race(b).race(c);
assert_eq!(f.await, 1u8);
# });
```
"#]
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
fn race<F>(
self,
other: F,
) -> impl Future<Output = <Self as std::future::Future>::Output> [Race<Self, F>]
where
Self: std::future::Future + Sized,
F: std::future::Future<Output = <Self as std::future::Future>::Output>,
{
Race::new(self, other)
}
#[doc = r#"
Waits for one of two similarly-typed fallible futures to complete.
Awaits multiple futures simultaneously, returning all results once complete.
`try_race` is similar to [`race`], but keeps going if a future
resolved to an error until all futures have been resolved. In which case
an error is returned.
The ordering of which value is yielded when two futures resolve
simultaneously is intentionally left unspecified.
[`race`]: #method.race
# Examples
```
# fn main() -> std::io::Result<()> { async_std::task::block_on(async {
#
use async_std::prelude::*;
use async_std::future;
use std::io::{Error, ErrorKind};
let a = future::pending::<Result<_, Error>>();
let b = future::ready(Err(Error::from(ErrorKind::Other)));
let c = future::ready(Ok(1u8));
let f = a.try_race(b).try_race(c);
assert_eq!(f.await?, 1u8);
#
# Ok(()) }) }
```
"#]
#[cfg(feature = "unstable")]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
fn try_race<F, T, E>(
self,
other: F
) -> impl Future<Output = <Self as std::future::Future>::Output> [TryRace<Self, F>]
where
Self: std::future::Future<Output = Result<T, E>> + Sized,
F: std::future::Future<Output = <Self as std::future::Future>::Output>,
{
TryRace::new(self, other)
}
#[doc = r#"
Waits for two similarly-typed futures to complete.
Awaits multiple futures simultaneously, returning the output of the
futures once both complete.
This function returns a new future which polls both futures
concurrently.
# Examples
```
# async_std::task::block_on(async {
use async_std::prelude::*;
use async_std::future;
let a = future::ready(1u8);
let b = future::ready(2u16);
let f = a.join(b);
assert_eq!(f.await, (1u8, 2u16));
# });
```
"#]
#[cfg(any(feature = "unstable", feature = "docs"))]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
fn join<F>(
self,
other: F
) -> impl Future<Output = (<Self as std::future::Future>::Output, <F as std::future::Future>::Output)> [Join<Self, F>]
where
Self: std::future::Future + Sized,
F: std::future::Future,
{
Join::new(self, other)
}
#[doc = r#"
Waits for two similarly-typed fallible futures to complete.
Awaits multiple futures simultaneously, returning all results once
complete.
`try_join` is similar to [`join`], but returns an error immediately
if a future resolves to an error.
[`join`]: #method.join
# Examples
```
# fn main() -> std::io::Result<()> { async_std::task::block_on(async {
#
use async_std::prelude::*;
use async_std::future;
let a = future::ready(Err::<u8, &str>("Error"));
let b = future::ready(Ok(1u8));
let f = a.try_join(b);
assert_eq!(f.await, Err("Error"));
let a = future::ready(Ok::<u8, String>(1u8));
let b = future::ready(Ok::<u16, String>(2u16));
let f = a.try_join(b);
assert_eq!(f.await, Ok((1u8, 2u16)));
#
# Ok(()) }) }
```
"#]
#[cfg(any(feature = "unstable", feature = "docs"))]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
fn try_join<F, A, B, E>(
self,
other: F
) -> impl Future<Output = Result<(A, B), E>> [TryJoin<Self, F>]
where
Self: std::future::Future<Output = Result<A, E>> + Sized,
F: std::future::Future<Output = Result<B, E>>,
{
TryJoin::new(self, other)
}
#[doc = r#"
Waits for both the future and a timeout, if the timeout completes before
the future, it returns an TimeoutError.
# Example
```
# async_std::task::block_on(async {
#
use std::time::Duration;
use async_std::prelude::*;
use async_std::future;
let fut = future::ready(0);
let dur = Duration::from_millis(100);
let res = fut.timeout(dur).await;
assert!(res.is_ok());
let fut = future::pending::<()>();
let dur = Duration::from_millis(100);
let res = fut.timeout(dur).await;
assert!(res.is_err())
#
# });
```
"#]
#[cfg(any(all(feature = "default", feature = "unstable"), feature = "docs"))]
#[cfg_attr(feature = "docs", doc(cfg(unstable)))]
fn timeout(self, dur: Duration) -> impl Future<Output = Self::Output> [TimeoutFuture<Self>]
where Self: Sized
{
TimeoutFuture::new(self, dur)
}
}
impl<F: Future + Unpin + ?Sized> Future for Box<F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
unreachable!("this impl only appears in the rendered docs")
}
}
impl<F: Future + Unpin + ?Sized> Future for &mut F {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
unreachable!("this impl only appears in the rendered docs")
}
}
impl<P> Future for Pin<P>
where
P: DerefMut + Unpin,
<P as Deref>::Target: Future,
{
type Output = <<P as Deref>::Target as Future>::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
unreachable!("this impl only appears in the rendered docs")
}
}
impl<F: Future> Future for std::panic::AssertUnwindSafe<F> {
type Output = F::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
unreachable!("this impl only appears in the rendered docs")
}
}
}

@ -1,57 +0,0 @@
use std::future::Future;
use std::pin::Pin;
use crate::future::MaybeDone;
use pin_project_lite::pin_project;
use crate::task::{Context, Poll};
pin_project! {
#[allow(missing_docs)]
#[allow(missing_debug_implementations)]
pub struct Race<L, R>
where
L: Future,
R: Future<Output = L::Output>
{
#[pin] left: MaybeDone<L>,
#[pin] right: MaybeDone<R>,
}
}
impl<L, R> Race<L, R>
where
L: Future,
R: Future<Output = L::Output>,
{
pub(crate) fn new(left: L, right: R) -> Self {
Self {
left: MaybeDone::new(left),
right: MaybeDone::new(right),
}
}
}
impl<L, R> Future for Race<L, R>
where
L: Future,
R: Future<Output = L::Output>,
{
type Output = L::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let mut left = this.left;
if Future::poll(Pin::new(&mut left), cx).is_ready() {
return Poll::Ready(left.take().unwrap());
}
let mut right = this.right;
if Future::poll(Pin::new(&mut right), cx).is_ready() {
return Poll::Ready(right.take().unwrap());
}
Poll::Pending
}
}

@ -1,72 +0,0 @@
use std::pin::Pin;
use crate::future::MaybeDone;
use pin_project_lite::pin_project;
use crate::task::{Context, Poll};
use std::future::Future;
pin_project! {
#[allow(missing_docs)]
#[allow(missing_debug_implementations)]
pub struct TryJoin<L, R>
where
L: Future,
R: Future,
{
#[pin] left: MaybeDone<L>,
#[pin] right: MaybeDone<R>,
}
}
impl<L, R> TryJoin<L, R>
where
L: Future,
R: Future,
{
pub(crate) fn new(left: L, right: R) -> Self {
Self {
left: MaybeDone::new(left),
right: MaybeDone::new(right),
}
}
}
impl<L, R, A, B, E> Future for TryJoin<L, R>
where
L: Future<Output = Result<A, E>>,
R: Future<Output = Result<B, E>>,
{
type Output = Result<(A, B), E>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let mut left = this.left;
let mut right = this.right;
if Future::poll(Pin::new(&mut left), cx).is_ready() {
if left.as_ref().output().unwrap().is_err() {
return Poll::Ready(Err(left.take().unwrap().err().unwrap()));
} else if right.as_ref().output().is_some() {
return Poll::Ready(Ok((
left.take().unwrap().ok().unwrap(),
right.take().unwrap().ok().unwrap(),
)));
}
}
if Future::poll(Pin::new(&mut right), cx).is_ready() {
if right.as_ref().output().unwrap().is_err() {
return Poll::Ready(Err(right.take().unwrap().err().unwrap()));
} else if left.as_ref().output().is_some() {
return Poll::Ready(Ok((
left.take().unwrap().ok().unwrap(),
right.take().unwrap().ok().unwrap(),
)));
}
}
Poll::Pending
}
}

@ -1,66 +0,0 @@
use std::pin::Pin;
use crate::future::MaybeDone;
use pin_project_lite::pin_project;
use crate::task::{Context, Poll};
use std::future::Future;
pin_project! {
#[allow(missing_docs)]
#[allow(missing_debug_implementations)]
pub struct TryRace<L, R>
where
L: Future,
R: Future<Output = L::Output>
{
#[pin] left: MaybeDone<L>,
#[pin] right: MaybeDone<R>,
}
}
impl<L, R> TryRace<L, R>
where
L: Future,
R: Future<Output = L::Output>,
{
pub(crate) fn new(left: L, right: R) -> Self {
Self {
left: MaybeDone::new(left),
right: MaybeDone::new(right),
}
}
}
impl<L, R, T, E> Future for TryRace<L, R>
where
L: Future<Output = Result<T, E>>,
R: Future<Output = L::Output>,
{
type Output = L::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let mut left_errored = false;
// Check if the left future is ready & successful. Continue if not.
let mut left = this.left;
if Future::poll(Pin::new(&mut left), cx).is_ready() {
if left.as_ref().output().unwrap().is_ok() {
return Poll::Ready(left.take().unwrap());
} else {
left_errored = true;
}
}
// Check if the right future is ready & successful. Return err if left
// future also resolved to err. Continue if not.
let mut right = this.right;
let is_ready = Future::poll(Pin::new(&mut right), cx).is_ready();
if is_ready && (right.as_ref().output().unwrap().is_ok() || left_errored) {
return Poll::Ready(right.take().unwrap());
}
Poll::Pending
}
}

@ -1,4 +1,4 @@
use std::future::Future;
use crate::future::Future;
/// Convert a type into a `Future`.
///
@ -45,6 +45,7 @@ pub trait IntoFuture {
impl<T: Future> IntoFuture for T {
type Output = T::Output;
type Future = T;
fn into_future(self) -> Self::Future {

@ -1,79 +0,0 @@
//! A type that wraps a future to keep track of its completion status.
//!
//! This implementation was taken from the original `macro_rules` `join/try_join`
//! macros in the `futures-preview` crate.
use std::future::Future;
use std::mem;
use std::pin::Pin;
use std::task::{Context, Poll};
use futures_core::ready;
/// A future that may have completed.
#[derive(Debug)]
pub(crate) enum MaybeDone<Fut: Future> {
/// A not-yet-completed future
Future(Fut),
/// The output of the completed future
Done(Fut::Output),
/// The empty variant after the result of a [`MaybeDone`] has been
/// taken using the [`take`](MaybeDone::take) method.
Gone,
}
impl<Fut: Future> MaybeDone<Fut> {
/// Create a new instance of `MaybeDone`.
pub(crate) fn new(future: Fut) -> MaybeDone<Fut> {
Self::Future(future)
}
/// Returns an [`Option`] containing a reference to the output of the future.
/// The output of this method will be [`Some`] if and only if the inner
/// future has been completed and [`take`](MaybeDone::take)
/// has not yet been called.
#[inline]
pub(crate) fn output(self: Pin<&Self>) -> Option<&Fut::Output> {
let this = self.get_ref();
match this {
MaybeDone::Done(res) => Some(res),
_ => None,
}
}
/// Attempt to take the output of a `MaybeDone` without driving it
/// towards completion.
#[inline]
pub(crate) fn take(self: Pin<&mut Self>) -> Option<Fut::Output> {
unsafe {
let this = self.get_unchecked_mut();
match this {
MaybeDone::Done(_) => {}
MaybeDone::Future(_) | MaybeDone::Gone => return None,
};
if let MaybeDone::Done(output) = mem::replace(this, MaybeDone::Gone) {
Some(output)
} else {
unreachable!()
}
}
}
}
impl<Fut: Future> Future for MaybeDone<Fut> {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let res = unsafe {
match Pin::as_mut(&mut self).get_unchecked_mut() {
MaybeDone::Future(a) => ready!(Pin::new_unchecked(a).poll(cx)),
MaybeDone::Done(_) => return Poll::Ready(()),
MaybeDone::Gone => panic!("MaybeDone polled after value taken"),
}
};
self.set(MaybeDone::Done(res));
Poll::Ready(())
}
}

@ -4,71 +4,62 @@
//!
//! Often it's desireable to await multiple futures as if it was a single
//! future. The `join` family of operations converts multiple futures into a
//! single future that returns all of their outputs. The `race` family of
//! single future that returns all of their outputs. The `select` family of
//! operations converts multiple future into a single future that returns the
//! first output.
//!
//! For operating on futures the following functions can be used:
//! For operating on futures the following macros can be used:
//!
//! | Name | Return signature | When does it return? |
//! | --- | --- | --- |
//! | [`Future::join`] | `(T1, T2)` | Wait for all to complete
//! | [`Future::race`] | `T` | Return on first value
//! | Name | Return signature | When does it return? |
//! | --- | --- | --- |
//! | `future::join` | `(T1, T2)` | Wait for all to complete
//! | `future::select` | `T` | Return on first value
//!
//! ## Fallible Futures Concurrency
//!
//! For operating on futures that return `Result` additional `try_` variants of
//! the functions mentioned before can be used. These functions are aware of `Result`,
//! the macros mentioned before can be used. These macros are aware of `Result`,
//! and will behave slightly differently from their base variants.
//!
//! In the case of `try_join`, if any of the futures returns `Err` all
//! futures are dropped and an error is returned. This is referred to as
//! "short-circuiting".
//!
//! In the case of `try_race`, instead of returning the first future that
//! In the case of `try_select`, instead of returning the first future that
//! completes it returns the first future that _successfully_ completes. This
//! means `try_race` will keep going until any one of the futures returns
//! means `try_select` will keep going until any one of the futures returns
//! `Ok`, or _all_ futures have returned `Err`.
//!
//! However sometimes it can be useful to use the base variants of the functions
//! However sometimes it can be useful to use the base variants of the macros
//! even on futures that return `Result`. Here is an overview of operations that
//! work on `Result`, and their respective semantics:
//!
//! | Name | Return signature | When does it return? |
//! | --- | --- | --- |
//! | [`Future::join`] | `(Result<T, E>, Result<T, E>)` | Wait for all to complete
//! | [`Future::try_join`] | `Result<(T1, T2), E>` | Return on first `Err`, wait for all to complete
//! | [`Future::race`] | `Result<T, E>` | Return on first value
//! | [`Future::try_race`] | `Result<T, E>` | Return on first `Ok`, reject on last Err
//!
//! [`Future::join`]: trait.Future.html#method.join
//! [`Future::try_join`]: trait.Future.html#method.try_join
//! [`Future::race`]: trait.Future.html#method.race
//! [`Future::try_race`]: trait.Future.html#method.try_race
cfg_alloc! {
pub use future::Future;
pub(crate) mod future;
}
//! | Name | Return signature | When does it return? |
//! | --- | --- | --- |
//! | `future::join` | `(Result<T, E>, Result<T, E>)` | Wait for all to complete
//! | `future::try_join` | `Result<(T1, T2), E>` | Return on first `Err`, wait for all to complete
//! | `future::select` | `Result<T, E>` | Return on first value
//! | `future::try_select` | `Result<T, E>` | Return on first `Ok`, reject on last Err
cfg_std! {
pub use pending::pending;
pub use poll_fn::poll_fn;
pub use ready::ready;
#[doc(inline)]
pub use async_macros::{join, try_join};
mod pending;
mod poll_fn;
mod ready;
}
#[cfg(any(feature = "unstable", feature = "default"))]
pub use future::Future;
pub use pending::pending;
pub use poll_fn::poll_fn;
pub use ready::ready;
pub use timeout::{timeout, TimeoutError};
#[cfg(any(feature = "unstable", feature = "default"))]
pub(crate) mod future;
mod pending;
mod poll_fn;
mod ready;
mod timeout;
cfg_unstable! {
#[doc(inline)]
pub use async_macros::{select, try_select};
pub use into_future::IntoFuture;
pub(crate) use maybe_done::MaybeDone;
mod into_future;
mod maybe_done;
}

@ -1,7 +1,7 @@
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use crate::future::Future;
use crate::task::{Context, Poll};
/// Never resolves to a value.

@ -1,6 +1,6 @@
use std::pin::Pin;
use std::future::Future;
use crate::future::Future;
use crate::task::{Context, Poll};
/// Creates a new future wrapping around a function returning [`Poll`].

@ -1,13 +1,13 @@
use std::error::Error;
use std::fmt;
use std::future::Future;
use std::pin::Pin;
use std::time::Duration;
use futures_timer::Delay;
use pin_project_lite::pin_project;
use crate::future::Future;
use crate::task::{Context, Poll};
use crate::utils::{timer_after, Timer};
/// Awaits a future or times out after a duration of time.
///
@ -33,26 +33,20 @@ pub async fn timeout<F, T>(dur: Duration, f: F) -> Result<T, TimeoutError>
where
F: Future<Output = T>,
{
TimeoutFuture::new(f, dur).await
let f = TimeoutFuture {
future: f,
delay: Delay::new(dur),
};
f.await
}
pin_project! {
/// A future that times out after a duration of time.
pub struct TimeoutFuture<F> {
struct TimeoutFuture<F> {
#[pin]
future: F,
#[pin]
delay: Timer,
}
}
impl<F> TimeoutFuture<F> {
#[allow(dead_code)]
pub(super) fn new(future: F, dur: Duration) -> TimeoutFuture<F> {
TimeoutFuture {
future,
delay: timer_after(dur),
}
delay: Delay,
}
}

@ -25,19 +25,17 @@ extension_trait! {
[`std::io::BufRead`].
The [provided methods] do not really exist in the trait itself, but they become
available when [`BufReadExt`] from the [prelude] is imported:
available when the prelude is imported:
```
# #[allow(unused_imports)]
use async_std::io::prelude::*;
use async_std::prelude::*;
```
[`std::io::BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html
[`futures::io::AsyncBufRead`]:
https://docs.rs/futures/0.3/futures/io/trait.AsyncBufRead.html
https://docs.rs/futures-preview/0.3.0-alpha.17/futures/io/trait.AsyncBufRead.html
[provided methods]: #provided-methods
[`BufReadExt`]: ../io/prelude/trait.BufReadExt.html
[prelude]: ../prelude/index.html
"#]
pub trait BufRead {
#[doc = r#"
@ -64,11 +62,6 @@ extension_trait! {
fn consume(self: Pin<&mut Self>, amt: usize);
}
#[doc = r#"
Extension methods for [`BufRead`].
[`BufRead`]: ../trait.BufRead.html
"#]
pub trait BufReadExt: futures_io::AsyncBufRead {
#[doc = r#"
Reads all bytes into `buf` until the delimiter `byte` or EOF is reached.

@ -1,9 +1,9 @@
use std::mem;
use std::pin::Pin;
use std::str;
use std::future::Future;
use super::read_until_internal;
use crate::future::Future;
use crate::io::{self, BufRead};
use crate::task::{Context, Poll};
@ -37,12 +37,8 @@ impl<T: BufRead + Unpin + ?Sized> Future for ReadLineFuture<'_, T> {
))
}))
} else {
#[allow(clippy::debug_assert_with_mut_call)]
{
debug_assert!(buf.is_empty());
debug_assert_eq!(*read, 0);
}
debug_assert!(buf.is_empty());
debug_assert_eq!(*read, 0);
// Safety: `bytes` is a valid UTF-8 because `str::from_utf8` returned `Ok`.
mem::swap(unsafe { buf.as_mut_vec() }, bytes);
Poll::Ready(ret)

@ -1,7 +1,7 @@
use std::pin::Pin;
use std::future::Future;
use super::read_until_internal;
use crate::future::Future;
use crate::io::{self, BufRead};
use crate::task::{Context, Poll};

@ -4,9 +4,11 @@ use std::{cmp, fmt};
use pin_project_lite::pin_project;
use crate::io::{self, BufRead, Read, Seek, SeekFrom, DEFAULT_BUF_SIZE};
use crate::io::{self, BufRead, Read, Seek, SeekFrom};
use crate::task::{Context, Poll};
const DEFAULT_CAPACITY: usize = 8 * 1024;
pin_project! {
/// Adds buffering to any reader.
///
@ -70,7 +72,7 @@ impl<R: io::Read> BufReader<R> {
/// # Ok(()) }) }
/// ```
pub fn new(inner: R) -> BufReader<R> {
BufReader::with_capacity(DEFAULT_BUF_SIZE, inner)
BufReader::with_capacity(DEFAULT_CAPACITY, inner)
}
/// Creates a new buffered reader with the specified capacity.

@ -1,11 +1,14 @@
use std::fmt;
use std::pin::Pin;
use futures_core::ready;
use pin_project_lite::pin_project;
use crate::io::write::WriteExt;
use crate::io::{self, Seek, SeekFrom, Write, DEFAULT_BUF_SIZE};
use crate::task::{Context, Poll, ready};
use crate::io::{self, Seek, SeekFrom, Write};
use crate::task::{Context, Poll};
const DEFAULT_CAPACITY: usize = 8 * 1024;
pin_project! {
/// Wraps a writer and buffers its output.
@ -22,14 +25,14 @@ pin_project! {
/// times. It also provides no advantage when writing to a destination that is
/// in memory, like a `Vec<u8>`.
///
/// Unlike the `BufWriter` type in `std`, this type does not write out the
/// contents of its buffer when it is dropped. Therefore, it is absolutely
/// critical that users explicitly flush the buffer before dropping a
/// `BufWriter`.
/// When the `BufWriter` is dropped, the contents of its buffer will be written
/// out. However, any errors that happen in the process of flushing the buffer
/// when the writer is dropped will be ignored. Code that wishes to handle such
/// errors must manually call [`flush`] before the writer is dropped.
///
/// This type is an async version of [`std::io::BufWriter`].
/// This type is an async version of [`std::io::BufReader`].
///
/// [`std::io::BufWriter`]: https://doc.rust-lang.org/std/io/struct.BufWriter.html
/// [`std::io::BufReader`]: https://doc.rust-lang.org/std/io/struct.BufReader.html
///
/// # Examples
///
@ -61,13 +64,10 @@ pin_project! {
/// use async_std::prelude::*;
///
/// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").await?);
///
/// for i in 0..10 {
/// let arr = [i+1];
/// stream.write(&arr).await?;
/// };
///
/// stream.flush().await?;
/// #
/// # Ok(()) }) }
/// ```
@ -88,32 +88,8 @@ pin_project! {
}
}
/// An error returned by `into_inner` which combines an error that
/// happened while writing out the buffer, and the buffered writer object
/// which may be used to recover from the condition.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// use async_std::io::BufWriter;
/// use async_std::net::TcpStream;
///
/// let buf_writer = BufWriter::new(TcpStream::connect("127.0.0.1:34251").await?);
///
/// // unwrap the TcpStream and flush the buffer
/// let stream = match buf_writer.into_inner().await {
/// Ok(s) => s,
/// Err(e) => {
/// // Here, e is an IntoInnerError
/// panic!("An error occurred");
/// }
/// };
/// #
/// # Ok(()) }) }
///```
#[derive(Debug)]
pub struct IntoInnerError<W>(W, crate::io::Error);
pub struct IntoInnerError<W>(W, std::io::Error);
impl<W: Write> BufWriter<W> {
/// Creates a new `BufWriter` with a default buffer capacity. The default is currently 8 KB,
@ -132,7 +108,7 @@ impl<W: Write> BufWriter<W> {
/// # Ok(()) }) }
/// ```
pub fn new(inner: W) -> BufWriter<W> {
BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner)
BufWriter::with_capacity(DEFAULT_CAPACITY, inner)
}
/// Creates a new `BufWriter` with the specified buffer capacity.
@ -328,7 +304,7 @@ impl<W: Write> Write for BufWriter<W> {
impl<W: Write + fmt::Debug> fmt::Debug for BufWriter<W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BufWriter")
f.debug_struct("BufReader")
.field("writer", &self.inner)
.field("buf", &self.buf)
.finish()

@ -1,11 +1,10 @@
use std::future::Future;
use std::pin::Pin;
use pin_project_lite::pin_project;
use crate::future::Future;
use crate::io::{self, BufRead, BufReader, Read, Write};
use crate::task::{Context, Poll};
use crate::utils::Context as _;
/// Copies the entire contents of a reader into a writer.
///
@ -44,7 +43,6 @@ use crate::utils::Context as _;
/// #
/// # Ok(()) }) }
/// ```
#[cfg(any(feature = "docs", not(feature = "unstable")))]
pub async fn copy<R, W>(reader: &mut R, writer: &mut W) -> io::Result<u64>
where
R: Read + Unpin + ?Sized,
@ -91,92 +89,5 @@ where
writer,
amt: 0,
};
future.await.context(|| String::from("io::copy failed"))
}
/// Copies the entire contents of a reader into a writer.
///
/// This function will continuously read data from `reader` and then
/// write it into `writer` in a streaming fashion until `reader`
/// returns EOF.
///
/// On success, the total number of bytes that were copied from
/// `reader` to `writer` is returned.
///
/// If youre wanting to copy the contents of one file to another and youre
/// working with filesystem paths, see the [`fs::copy`] function.
///
/// This function is an async version of [`std::io::copy`].
///
/// [`std::io::copy`]: https://doc.rust-lang.org/std/io/fn.copy.html
/// [`fs::copy`]: ../fs/fn.copy.html
///
/// # Errors
///
/// This function will return an error immediately if any call to `read` or
/// `write` returns an error. All instances of `ErrorKind::Interrupted` are
/// handled by this function and the underlying operation is retried.
///
/// # Examples
///
/// ```
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::io;
///
/// let mut reader: &[u8] = b"hello";
/// let mut writer = io::stdout();
///
/// io::copy(&mut reader, &mut writer).await?;
/// #
/// # Ok(()) }) }
/// ```
#[cfg(all(feature = "unstable", not(feature = "docs")))]
pub async fn copy<R, W>(reader: R, writer: W) -> io::Result<u64>
where
R: Read + Unpin,
W: Write + Unpin,
{
pin_project! {
struct CopyFuture<R, W> {
#[pin]
reader: R,
#[pin]
writer: W,
amt: u64,
}
}
impl<R, W> Future for CopyFuture<R, W>
where
R: BufRead,
W: Write + Unpin,
{
type Output = io::Result<u64>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
loop {
let buffer = futures_core::ready!(this.reader.as_mut().poll_fill_buf(cx))?;
if buffer.is_empty() {
futures_core::ready!(this.writer.as_mut().poll_flush(cx))?;
return Poll::Ready(Ok(*this.amt));
}
let i = futures_core::ready!(this.writer.as_mut().poll_write(cx, buffer))?;
if i == 0 {
return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
}
*this.amt += i as u64;
this.reader.as_mut().consume(i);
}
}
}
let future = CopyFuture {
reader: BufReader::new(reader),
writer,
amt: 0,
};
future.await.context(|| String::from("io::copy failed"))
future.await
}

@ -19,8 +19,8 @@
//! [`File`]s:
//!
//! ```no_run
//! use async_std::fs::File;
//! use async_std::prelude::*;
//! use async_std::fs::File;
//!
//! # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
//! #
@ -47,9 +47,9 @@
//! coming from:
//!
//! ```no_run
//! use async_std::fs::File;
//! use async_std::io::prelude::*;
//! use async_std::io::SeekFrom;
//! use async_std::prelude::*;
//! use async_std::fs::File;
//!
//! # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
//! #
@ -82,9 +82,9 @@
//! methods to any reader:
//!
//! ```no_run
//! use async_std::fs::File;
//! use async_std::io::prelude::*;
//! use async_std::io::BufReader;
//! use async_std::prelude::*;
//! use async_std::fs::File;
//!
//! # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
//! #
@ -104,9 +104,9 @@
//! to [`write`][`Write::write`]:
//!
//! ```no_run
//! use async_std::fs::File;
//! use async_std::io::prelude::*;
//! use async_std::io::BufWriter;
//! use async_std::fs::File;
//!
//! # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
//! #
@ -116,8 +116,8 @@
//!
//! // write a byte to the buffer
//! writer.write(&[42]).await?;
//!
//! } // the buffer is flushed once writer goes out of scope
//! //
//! #
//! # Ok(()) }) }
//! ```
@ -179,9 +179,9 @@
//! lines:
//!
//! ```no_run
//! use async_std::fs::File;
//! use async_std::io::BufReader;
//! use async_std::prelude::*;
//! use async_std::io::BufReader;
//! use async_std::fs::File;
//!
//! # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
//! #
@ -269,62 +269,45 @@
//! [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
//! [`.unwrap()`]: https://doc.rust-lang.org/std/result/enum.Result.html#method.unwrap
const DEFAULT_BUF_SIZE: usize = 8 * 1024;
cfg_std! {
#[doc(inline)]
pub use std::io::{Error, ErrorKind, IoSlice, IoSliceMut, Result, SeekFrom};
pub use buf_read::{BufRead, Lines, Split};
pub use buf_reader::BufReader;
pub use buf_writer::{BufWriter, IntoInnerError};
pub use copy::copy;
pub use cursor::Cursor;
pub use empty::{empty, Empty};
pub use read::*;
pub use repeat::{repeat, Repeat};
pub use seek::Seek;
pub use sink::{sink, Sink};
pub use write::Write;
pub mod prelude;
#[doc(inline)]
pub use std::io::{Error, ErrorKind, IoSlice, IoSliceMut, Result, SeekFrom};
pub(crate) mod buf_read;
pub(crate) mod read;
pub(crate) mod seek;
pub(crate) mod write;
pub(crate) mod utils;
pub use buf_read::{BufRead, Lines};
pub use buf_reader::BufReader;
pub use buf_writer::BufWriter;
pub use copy::copy;
pub use cursor::Cursor;
pub use empty::{empty, Empty};
pub use read::Read;
pub use repeat::{repeat, Repeat};
pub use seek::Seek;
pub use sink::{sink, Sink};
pub use stderr::{stderr, Stderr};
pub use stdin::{stdin, Stdin};
pub use stdout::{stdout, Stdout};
pub use timeout::timeout;
pub use write::Write;
mod buf_reader;
mod buf_writer;
mod copy;
mod cursor;
mod empty;
mod repeat;
mod sink;
}
// For use in the print macros.
#[doc(hidden)]
pub use stdio::{_eprint, _print};
cfg_default! {
// For use in the print macros.
#[doc(hidden)]
#[cfg(not(target_os = "unknown"))]
pub use stdio::{_eprint, _print};
pub mod prelude;
#[cfg(not(target_os = "unknown"))]
pub use stderr::{stderr, Stderr};
#[cfg(not(target_os = "unknown"))]
pub use stdin::{stdin, Stdin};
#[cfg(not(target_os = "unknown"))]
pub use stdout::{stdout, Stdout};
pub use timeout::timeout;
pub(crate) mod buf_read;
pub(crate) mod read;
pub(crate) mod seek;
pub(crate) mod write;
mod timeout;
#[cfg(not(target_os = "unknown"))]
mod stderr;
#[cfg(not(target_os = "unknown"))]
mod stdin;
#[cfg(not(target_os = "unknown"))]
mod stdio;
#[cfg(not(target_os = "unknown"))]
mod stdout;
}
mod buf_reader;
mod buf_writer;
mod copy;
mod cursor;
mod empty;
mod repeat;
mod sink;
mod stderr;
mod stdin;
mod stdio;
mod stdout;
mod timeout;

@ -1,4 +1,4 @@
//! The async I/O prelude.
//! The async I/O Prelude
//!
//! The purpose of this module is to alleviate imports of many common I/O traits
//! by adding a glob import to the top of I/O heavy modules:
@ -17,11 +17,11 @@ pub use crate::io::Seek;
#[doc(no_inline)]
pub use crate::io::Write;
#[doc(inline)]
pub use crate::io::buf_read::BufReadExt;
#[doc(inline)]
pub use crate::io::read::ReadExt;
#[doc(inline)]
pub use crate::io::seek::SeekExt;
#[doc(inline)]
pub use crate::io::write::WriteExt;
#[doc(hidden)]
pub use crate::io::buf_read::BufReadExt as _;
#[doc(hidden)]
pub use crate::io::read::ReadExt as _;
#[doc(hidden)]
pub use crate::io::seek::SeekExt as _;
#[doc(hidden)]
pub use crate::io::write::WriteExt as _;

@ -32,7 +32,7 @@ impl<T: Read + Unpin> Stream for Bytes<T> {
}
}
#[cfg(all(test, default))]
#[cfg(test)]
mod tests {
use crate::io;
use crate::prelude::*;

@ -165,7 +165,7 @@ impl<T: BufRead, U: BufRead> BufRead for Chain<T, U> {
}
}
#[cfg(all(test, default))]
#[cfg(test)]
mod tests {
use crate::io;
use crate::prelude::*;

@ -17,10 +17,6 @@ use std::mem;
use crate::io::IoSliceMut;
pub use bytes::Bytes;
pub use chain::Chain;
pub use take::Take;
extension_trait! {
use std::pin::Pin;
use std::ops::{Deref, DerefMut};
@ -35,7 +31,7 @@ extension_trait! {
[`std::io::Read`].
Methods other than [`poll_read`] and [`poll_read_vectored`] do not really exist in the
trait itself, but they become available when [`ReadExt`] from the [prelude] is imported:
trait itself, but they become available when the prelude is imported:
```
# #[allow(unused_imports)]
@ -44,11 +40,9 @@ extension_trait! {
[`std::io::Read`]: https://doc.rust-lang.org/std/io/trait.Read.html
[`futures::io::AsyncRead`]:
https://docs.rs/futures/0.3/futures/io/trait.AsyncRead.html
https://docs.rs/futures-preview/0.3.0-alpha.17/futures/io/trait.AsyncRead.html
[`poll_read`]: #tymethod.poll_read
[`poll_read_vectored`]: #method.poll_read_vectored
[`ReadExt`]: ../io/prelude/trait.ReadExt.html
[prelude]: ../prelude/index.html
"#]
pub trait Read {
#[doc = r#"
@ -72,11 +66,6 @@ extension_trait! {
}
}
#[doc = r#"
Extension methods for [`Read`].
[`Read`]: ../trait.Read.html
"#]
pub trait ReadExt: futures_io::AsyncRead {
#[doc = r#"
Reads some bytes from the byte stream.
@ -278,7 +267,7 @@ extension_trait! {
This function returns a new instance of `Read` which will read at most
`limit` bytes, after which it will always return EOF ([`Ok(0)`]). Any
read errors will not count towards the number of bytes read and future
calls to [`read`] may succeed.
calls to [`read()`] may succeed.
# Examples
@ -286,7 +275,7 @@ extension_trait! {
[`File`]: ../fs/struct.File.html
[`Ok(0)`]: ../../std/result/enum.Result.html#variant.Ok
[`read`]: tymethod.read
[`read()`]: tymethod.read
```no_run
# fn main() -> std::io::Result<()> { async_std::task::block_on(async {
@ -305,11 +294,11 @@ extension_trait! {
# Ok(()) }) }
```
"#]
fn take(self, limit: u64) -> Take<Self>
fn take(self, limit: u64) -> take::Take<Self>
where
Self: Sized,
{
Take { inner: self, limit }
take::Take { inner: self, limit }
}
#[doc = r#"
@ -381,8 +370,8 @@ extension_trait! {
# Ok(()) }) }
```
"#]
fn bytes(self) -> Bytes<Self> where Self: Sized {
Bytes { inner: self }
fn bytes(self) -> bytes::Bytes<Self> where Self: Sized {
bytes::Bytes { inner: self }
}
#[doc = r#"
@ -417,8 +406,8 @@ extension_trait! {
# Ok(()) }) }
```
"#]
fn chain<R: Read>(self, next: R) -> Chain<Self, R> where Self: Sized {
Chain { first: self, second: next, done_first: false }
fn chain<R: Read>(self, next: R) -> chain::Chain<Self, R> where Self: Sized {
chain::Chain { first: self, second: next, done_first: false }
}
}
@ -477,13 +466,13 @@ unsafe fn initialize<R: futures_io::AsyncRead>(_reader: &R, buf: &mut [u8]) {
std::ptr::write_bytes(buf.as_mut_ptr(), 0, buf.len())
}
#[cfg(all(test, not(target_os = "unknown")))]
#[cfg(test)]
mod tests {
use crate::io;
use crate::prelude::*;
#[test]
fn test_read_by_ref() {
fn test_read_by_ref() -> io::Result<()> {
crate::task::block_on(async {
let mut f = io::Cursor::new(vec![0u8, 1, 2, 3, 4, 5, 6, 7, 8]);
let mut buffer = Vec::new();
@ -493,13 +482,14 @@ mod tests {
let reference = f.by_ref();
// read at most 5 bytes
assert_eq!(reference.take(5).read_to_end(&mut buffer).await.unwrap(), 5);
assert_eq!(reference.take(5).read_to_end(&mut buffer).await?, 5);
assert_eq!(&buffer, &[0, 1, 2, 3, 4])
} // drop our &mut reference so we can use f again
// original file still usable, read the rest
assert_eq!(f.read_to_end(&mut other_buffer).await.unwrap(), 4);
assert_eq!(f.read_to_end(&mut other_buffer).await?, 4);
assert_eq!(&other_buffer, &[5, 6, 7, 8]);
});
Ok(())
})
}
}

@ -1,6 +1,6 @@
use std::pin::Pin;
use std::future::Future;
use crate::future::Future;
use crate::io::{self, Read};
use crate::task::{Context, Poll};

@ -1,7 +1,7 @@
use std::mem;
use std::pin::Pin;
use std::future::Future;
use crate::future::Future;
use crate::io::{self, Read};
use crate::task::{Context, Poll};

@ -1,6 +1,6 @@
use std::pin::Pin;
use std::future::Future;
use crate::future::Future;
use crate::io::{self, Read};
use crate::task::{Context, Poll};

@ -1,9 +1,9 @@
use std::mem;
use std::pin::Pin;
use std::str;
use std::future::Future;
use super::read_to_end_internal;
use crate::future::Future;
use crate::io::{self, Read};
use crate::task::{Context, Poll};
@ -37,11 +37,7 @@ impl<T: Read + Unpin + ?Sized> Future for ReadToStringFuture<'_, T> {
))
}))
} else {
#[allow(clippy::debug_assert_with_mut_call)]
{
debug_assert!(buf.is_empty());
}
debug_assert!(buf.is_empty());
// Safety: `bytes` is a valid UTF-8 because `str::from_utf8` returned `Ok`.
mem::swap(unsafe { buf.as_mut_vec() }, bytes);
Poll::Ready(ret)

@ -1,6 +1,6 @@
use std::pin::Pin;
use std::future::Future;
use crate::future::Future;
use crate::io::{self, IoSliceMut, Read};
use crate::task::{Context, Poll};

@ -218,7 +218,7 @@ impl<T: BufRead> BufRead for Take<T> {
}
}
#[cfg(all(test, not(target_os = "unknown")))]
#[cfg(test)]
mod tests {
use crate::io;
use crate::prelude::*;

@ -18,7 +18,7 @@ extension_trait! {
[`std::io::Seek`].
The [provided methods] do not really exist in the trait itself, but they become
available when [`SeekExt`] the [prelude] is imported:
available when the prelude is imported:
```
# #[allow(unused_imports)]
@ -27,10 +27,8 @@ extension_trait! {
[`std::io::Seek`]: https://doc.rust-lang.org/std/io/trait.Seek.html
[`futures::io::AsyncSeek`]:
https://docs.rs/futures/0.3/futures/io/trait.AsyncSeek.html
https://docs.rs/futures-preview/0.3.0-alpha.17/futures/io/trait.AsyncSeek.html
[provided methods]: #provided-methods
[`SeekExt`]: ../io/prelude/trait.SeekExt.html
[prelude]: ../prelude/index.html
"#]
pub trait Seek {
#[doc = r#"
@ -43,11 +41,6 @@ extension_trait! {
) -> Poll<io::Result<u64>>;
}
#[doc = r#"
Extension methods for [`Seek`].
[`Seek`]: ../trait.Seek.html
"#]
pub trait SeekExt: futures_io::AsyncSeek {
#[doc = r#"
Seeks to a new position in a byte stream.

@ -1,6 +1,6 @@
use std::pin::Pin;
use std::future::Future;
use crate::future::Future;
use crate::io::{self, Seek, SeekFrom};
use crate::task::{Context, Poll};

@ -1,9 +1,9 @@
use std::pin::Pin;
use std::sync::Mutex;
use std::future::Future;
use crate::future::Future;
use crate::io::{self, Write};
use crate::task::{spawn_blocking, Context, JoinHandle, Poll};
use crate::task::{blocking, Context, JoinHandle, Poll};
/// Constructs a new handle to the standard error of the current process.
///
@ -89,12 +89,11 @@ enum Operation {
impl Write for Stderr {
fn poll_write(
self: Pin<&mut Self>,
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
let mut state_guard = self.0.lock().unwrap();
let state = &mut *state_guard;
let state = &mut *self.0.lock().unwrap();
loop {
match state {
@ -125,7 +124,7 @@ impl Write for Stderr {
inner.buf[..buf.len()].copy_from_slice(buf);
// Start the operation asynchronously.
*state = State::Busy(spawn_blocking(move || {
*state = State::Busy(blocking::spawn(move || {
let res = std::io::Write::write(&mut inner.stderr, &inner.buf);
inner.last_op = Some(Operation::Write(res));
State::Idle(Some(inner))
@ -138,9 +137,8 @@ impl Write for Stderr {
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let mut state_guard = self.0.lock().unwrap();
let state = &mut *state_guard;
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let state = &mut *self.0.lock().unwrap();
loop {
match state {
@ -154,7 +152,7 @@ impl Write for Stderr {
let mut inner = opt.take().unwrap();
// Start the operation asynchronously.
*state = State::Busy(spawn_blocking(move || {
*state = State::Busy(blocking::spawn(move || {
let res = std::io::Write::flush(&mut inner.stderr);
inner.last_op = Some(Operation::Flush(res));
State::Idle(Some(inner))

@ -1,11 +1,9 @@
use std::future::Future;
use std::pin::Pin;
use std::sync::Mutex;
use crate::future;
use crate::future::{self, Future};
use crate::io::{self, Read};
use crate::task::{spawn_blocking, Context, JoinHandle, Poll};
use crate::utils::Context as _;
use crate::task::{blocking, Context, JoinHandle, Poll};
/// Constructs a new handle to the standard input of the current process.
///
@ -129,7 +127,7 @@ impl Stdin {
let mut inner = opt.take().unwrap();
// Start the operation asynchronously.
*state = State::Busy(spawn_blocking(move || {
*state = State::Busy(blocking::spawn(move || {
inner.line.clear();
let res = inner.stdin.read_line(&mut inner.line);
inner.last_op = Some(Operation::ReadLine(res));
@ -143,18 +141,16 @@ impl Stdin {
}
})
.await
.context(|| String::from("could not read line on stdin"))
}
}
impl Read for Stdin {
fn poll_read(
self: Pin<&mut Self>,
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
let mut state_guard = self.0.lock().unwrap();
let state = &mut *state_guard;
let state = &mut *self.0.lock().unwrap();
loop {
match state {
@ -184,7 +180,7 @@ impl Read for Stdin {
}
// Start the operation asynchronously.
*state = State::Busy(spawn_blocking(move || {
*state = State::Busy(blocking::spawn(move || {
let res = std::io::Read::read(&mut inner.stdin, &mut inner.buf);
inner.last_op = Some(Operation::Read(res));
State::Idle(Some(inner))

@ -1,9 +1,9 @@
use std::pin::Pin;
use std::sync::Mutex;
use std::future::Future;
use crate::future::Future;
use crate::io::{self, Write};
use crate::task::{spawn_blocking, Context, JoinHandle, Poll};
use crate::task::{blocking, Context, JoinHandle, Poll};
/// Constructs a new handle to the standard output of the current process.
///
@ -89,12 +89,11 @@ enum Operation {
impl Write for Stdout {
fn poll_write(
self: Pin<&mut Self>,
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
let mut state_guard = self.0.lock().unwrap();
let state = &mut *state_guard;
let state = &mut *self.0.lock().unwrap();
loop {
match state {
@ -125,7 +124,7 @@ impl Write for Stdout {
inner.buf[..buf.len()].copy_from_slice(buf);
// Start the operation asynchronously.
*state = State::Busy(spawn_blocking(move || {
*state = State::Busy(blocking::spawn(move || {
let res = std::io::Write::write(&mut inner.stdout, &inner.buf);
inner.last_op = Some(Operation::Write(res));
State::Idle(Some(inner))
@ -138,9 +137,8 @@ impl Write for Stdout {
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let mut state_guard = self.0.lock().unwrap();
let state = &mut *state_guard;
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let state = &mut *self.0.lock().unwrap();
loop {
match state {
@ -154,7 +152,7 @@ impl Write for Stdout {
let mut inner = opt.take().unwrap();
// Start the operation asynchronously.
*state = State::Busy(spawn_blocking(move || {
*state = State::Busy(blocking::spawn(move || {
let res = std::io::Write::flush(&mut inner.stdout);
inner.last_op = Some(Operation::Flush(res));
State::Idle(Some(inner))

@ -1,12 +1,12 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use futures_timer::Delay;
use pin_project_lite::pin_project;
use crate::future::Future;
use crate::io;
use crate::utils::{timer_after, Timer};
/// Awaits an I/O future or times out after a duration of time.
///
@ -37,7 +37,7 @@ where
F: Future<Output = io::Result<T>>,
{
Timeout {
timeout: timer_after(dur),
timeout: Delay::new(dur),
future: f,
}
.await
@ -53,7 +53,7 @@ pin_project! {
#[pin]
future: F,
#[pin]
timeout: Timer,
timeout: Delay,
}
}

@ -1,42 +0,0 @@
use crate::utils::Context;
use std::{error::Error as StdError, fmt, io};
/// Wrap `std::io::Error` with additional message
///
/// Keeps the original error kind and stores the original I/O error as `source`.
impl<T> Context for Result<T, std::io::Error> {
fn context(self, message: impl Fn() -> String) -> Self {
self.map_err(|e| VerboseError::wrap(e, message()))
}
}
#[derive(Debug)]
pub(crate) struct VerboseError {
source: io::Error,
message: String,
}
impl VerboseError {
pub(crate) fn wrap(source: io::Error, message: impl Into<String>) -> io::Error {
io::Error::new(
source.kind(),
VerboseError {
source,
message: message.into(),
},
)
}
}
impl fmt::Display for VerboseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.message)
}
}
impl StdError for VerboseError {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
Some(&self.source)
}
}

@ -1,6 +1,6 @@
use std::pin::Pin;
use std::future::Future;
use crate::future::Future;
use crate::io::{self, Write};
use crate::task::{Context, Poll};

@ -26,7 +26,7 @@ extension_trait! {
Methods other than [`poll_write`], [`poll_write_vectored`], [`poll_flush`], and
[`poll_close`] do not really exist in the trait itself, but they become available when
[`WriteExt`] from the [prelude] is imported:
the prelude is imported:
```
# #[allow(unused_imports)]
@ -35,13 +35,11 @@ extension_trait! {
[`std::io::Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
[`futures::io::AsyncWrite`]:
https://docs.rs/futures/0.3/futures/io/trait.AsyncWrite.html
https://docs.rs/futures-preview/0.3.0-alpha.17/futures/io/trait.AsyncWrite.html
[`poll_write`]: #tymethod.poll_write
[`poll_write_vectored`]: #method.poll_write_vectored
[`poll_flush`]: #tymethod.poll_flush
[`poll_close`]: #tymethod.poll_close
[`WriteExt`]: ../io/prelude/trait.WriteExt.html
[prelude]: ../prelude/index.html
"#]
pub trait Write {
#[doc = r#"
@ -76,11 +74,6 @@ extension_trait! {
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>>;
}
#[doc = r#"
Extension methods for [`Write`].
[`Write`]: ../trait.Write.html
"#]
pub trait WriteExt: futures_io::AsyncWrite {
#[doc = r#"
Writes some bytes into the byte stream.

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save