Compare commits

..

4 commits

Author SHA1 Message Date
Florian Gilcher
67fd54c138
Make rustfmt happy 2019-08-15 15:41:41 +02:00
Florian Gilcher
adeabe51d2
Update examples/integrate-thread.rs
Co-Authored-By: Yoshua Wuyts <yoshuawuyts+github@gmail.com>
2019-08-15 13:11:18 +02:00
Florian Gilcher
a9b970c8ec
Name threads after what they do 2019-08-15 10:29:55 +02:00
Florian Gilcher
f38923d0cc
Add a small example on how to integrate threads 2019-08-14 18:31:31 +02:00
333 changed files with 6039 additions and 26760 deletions

View file

@ -1,3 +0,0 @@
Our contribution policy can be found at [async.rs/contribute][policy].
[policy]: https://async.rs/contribute/

View file

@ -1,183 +0,0 @@
name: CI
on:
pull_request:
push:
branches:
- master
- staging
- trying
env:
RUSTFLAGS: -Dwarnings
jobs:
build_and_test:
name: Build and test
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macOS-latest]
rust: [nightly, beta, stable]
steps:
- uses: actions/checkout@master
- name: Install ${{ matrix.rust }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.rust }}
override: true
- name: Cache cargo registry
uses: actions/cache@v2
with:
path: ~/.cargo/registry
key: ${{ matrix.os }}-${{ matrix.rust }}-cargo-registry-${{ hashFiles('**/Cargo.toml') }}
- name: Cache cargo index
uses: actions/cache@v2
with:
path: ~/.cargo/git
key: ${{ matrix.os }}-${{ matrix.rust }}-cargo-index-${{ hashFiles('**/Cargo.toml') }}
- name: Cache cargo build
uses: actions/cache@v2
with:
path: target
key: ${{ matrix.os }}-${{ matrix.rust }}-cargo-build-target-${{ hashFiles('**/Cargo.toml') }}
- name: check
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --tests
- name: check unstable
uses: actions-rs/cargo@v1
with:
command: check
args: --features unstable --all --bins --examples --tests
- name: check wasm
uses: actions-rs/cargo@v1
with:
command: check
target: wasm32-unknown-unknown
override: true
args: --features unstable --all --bins --tests
- name: check bench
uses: actions-rs/cargo@v1
if: matrix.rust == 'nightly'
with:
command: check
args: --benches
- name: check std only
uses: actions-rs/cargo@v1
with:
command: check
args: --no-default-features --features std
- name: check attributes
uses: actions-rs/cargo@v1
with:
command: check
args: --features attributes
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --all --features "unstable attributes"
build__with_no_std:
name: Build with no-std
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: setup
run: |
rustup default nightly
rustup target add thumbv7m-none-eabi
- name: check no_std
uses: actions-rs/cargo@v1
with:
command: check
args: --no-default-features --features alloc --target thumbv7m-none-eabi -Z avoid-dev-deps
check_tokio_02_feature:
name: Check tokio02 feature
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: check tokio02
uses: actions-rs/cargo@v1
with:
command: check
args: --all --features tokio02
cross:
name: Cross compile
runs-on: ubuntu-latest
strategy:
matrix:
target:
- i686-unknown-linux-gnu
- powerpc-unknown-linux-gnu
- powerpc64-unknown-linux-gnu
- mips-unknown-linux-gnu
- arm-linux-androideabi
steps:
- uses: actions/checkout@master
- name: Install nightly
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
- name: Install cross
run: cargo install cross
- name: check
run: cross check --all --target ${{ matrix.target }}
- name: check unstable
run: cross check --all --features unstable --target ${{ matrix.target }}
- name: test
run: cross test --all --features unstable --target ${{ matrix.target }}
check_fmt_and_docs:
name: Checking fmt and docs
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly
override: true
components: rustfmt
- name: setup
run: |
rustup component add rustfmt
test -x $HOME/.cargo/bin/mdbook || ./ci/install-mdbook.sh
rustc --version
- name: mdbook
run: |
mdbook build docs
- name: fmt
run: cargo fmt --all -- --check
- name: Docs
run: cargo doc --features docs

23
.travis.yml Normal file
View file

@ -0,0 +1,23 @@
language: rust
env:
- RUSTFLAGS="-D warnings"
before_script:
- rustup component add rustfmt
matrix:
fast_finish: true
include:
- rust: nightly
os: linux
- rust: nightly
os: osx
# - rust: nightly-x86_64-pc-windows-msvc
# os: windows
script:
- cargo fmt --all -- --check
- cargo check --all --benches --bins --examples --tests
- cargo test --all
- cargo doc --features docs

View file

@ -1,771 +0,0 @@
# Changelog
All notable changes to async-std will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://book.async.rs/overview/stability-guarantees.html).
## [Unreleased]
# [1.6.2] - 2020-06-19
## Added
- Add `UdpSocket::peer_addr` ([#816](https://github.com/async-rs/async-std/pull/816))
## Changed
## Fixed
- Ensure the reactor is running for sockets and timers ([#819](https://github.com/async-rs/async-std/pull/819)).
- Avoid excessive polling in `flatten` and `flat_map` ([#701](https://github.com/async-rs/async-std/pull/701))
# [1.6.1] - 2020-06-11
## Added
- Added `tokio02` feature flag, to allow compatability usage with tokio@0.2 ([#804](https://github.com/async-rs/async-std/pull/804)).
## Changed
- Removed unstable `stdio` lock methods, due to their unsoundness ([#807](https://github.com/async-rs/async-std/pull/807)).
## Fixed
- Fixed wrong slice index for file reading ([#802](https://github.com/async-rs/async-std/pull/802)).
- Fixed recursive calls to `block_on` ([#799](https://github.com/async-rs/async-std/pull/799)) and ([#809](https://github.com/async-rs/async-std/pull/809)).
- Remove `default` feature requirement for the `unstable` feature ([#806](https://github.com/async-rs/async-std/pull/806)).
# [1.6.0] - 2020-05-22
See `1.6.0-beta.1` and `1.6.0-beta.2`.
# [1.6.0-beta.2] - 2020-05-19
## Added
- Added an environment variable to configure the thread pool size of the runtime. ([#774](https://github.com/async-rs/async-std/pull/774))
- Implement `Clone` for `UnixStream` ([#772](https://github.com/async-rs/async-std/pull/772))
## Changed
- For `wasm`, switched underlying `Timer` implementation to [`futures-timer`](https://github.com/async-rs/futures-timer). ([#776](https://github.com/async-rs/async-std/pull/776))
## Fixed
- Use `smol::block_on` to handle drop of `File`, avoiding nested executor panic. ([#768](https://github.com/async-rs/async-std/pull/768))
# [1.6.0-beta.1] - 2020-05-07
## Added
- Added `task::spawn_local`. ([#757](https://github.com/async-rs/async-std/pull/757))
- Added out of the box support for `wasm`. ([#757](https://github.com/async-rs/async-std/pull/757))
- Added `JoinHandle::cancel` ([#757](https://github.com/async-rs/async-std/pull/757))
- Added `sync::Condvar` ([#369](https://github.com/async-rs/async-std/pull/369))
- Added `sync::Sender::try_send` and `sync::Receiver::try_recv` ([#585](https://github.com/async-rs/async-std/pull/585))
- Added `no_std` support for `task`, `future` and `stream` ([#680](https://github.com/async-rs/async-std/pull/680))
## Changed
- Switched underlying runtime to [`smol`](https://github.com/stjepang/smol/). ([#757](https://github.com/async-rs/async-std/pull/757))
- Switched implementation of `sync::Barrier` to use `sync::Condvar` like `std` does. ([#581](https://github.com/async-rs/async-std/pull/581))
## Fixed
- Allow compilation on 32 bit targets, by using `AtomicUsize` for `TaskId`. ([#756](https://github.com/async-rs/async-std/pull/756))
# [1.5.0] - 2020-02-03
[API Documentation](https://docs.rs/async-std/1.5.0/async-std)
This patch includes various quality of life improvements to async-std.
Including improved performance, stability, and the addition of various
`Clone` impls that replace the use of `Arc` in many cases.
## Added
- Added links to various ecosystem projects from the README ([#660](https://github.com/async-rs/async-std/pull/660))
- Added an example on `FromStream` for `Result<T, E>` ([#643](https://github.com/async-rs/async-std/pull/643))
- Added `stream::pending` as "unstable" ([#615](https://github.com/async-rs/async-std/pull/615))
- Added an example of `stream::timeout` to document the error flow ([#675](https://github.com/async-rs/async-std/pull/675))
- Implement `Clone` for `DirEntry` ([#682](https://github.com/async-rs/async-std/pull/682))
- Implement `Clone` for `TcpStream` ([#689](https://github.com/async-rs/async-std/pull/689))
## Changed
- Removed internal comment on `stream::Interval` ([#645](https://github.com/async-rs/async-std/pull/645))
- The "unstable" feature can now be used without requiring the "default" feature ([#647](https://github.com/async-rs/async-std/pull/647))
- Removed unnecessary trait bound on `stream::FlatMap` ([#651](https://github.com/async-rs/async-std/pull/651))
- Updated the "broadcaster" dependency used by "unstable" to `1.0.0` ([#681](https://github.com/async-rs/async-std/pull/681))
- Updated `async-task` to 1.2.1 ([#676](https://github.com/async-rs/async-std/pull/676))
- `task::block_on` now parks after a single poll, improving performance in many cases ([#684](https://github.com/async-rs/async-std/pull/684))
- Improved reading flow of the "client" part of the async-std tutorial ([#550](https://github.com/async-rs/async-std/pull/550))
- Use `take_while` instead of `scan` in `impl` of `Product`, `Sum` and `FromStream` ([#667](https://github.com/async-rs/async-std/pull/667))
- `TcpStream::connect` no longer uses a thread from the threadpool, improving performance ([#687](https://github.com/async-rs/async-std/pull/687))
## Fixed
- Fixed crate documentation typo ([#655](https://github.com/async-rs/async-std/pull/655))
- Fixed documentation for `UdpSocket::recv` ([#648](https://github.com/async-rs/async-std/pull/648))
- Fixed documentation for `UdpSocket::send` ([#671](https://github.com/async-rs/async-std/pull/671))
- Fixed typo in stream documentation ([#650](https://github.com/async-rs/async-std/pull/650))
- Fixed typo on `sync::JoinHandle` documentation ([#659](https://github.com/async-rs/async-std/pull/659))
- Removed use of `std::error::Error::description` which failed CI ([#661](https://github.com/async-rs/async-std/pull/662))
- Removed the use of rustfmt's unstable `format_code_in_doc_comments` option which failed CI ([#685](https://github.com/async-rs/async-std/pull/685))
- Fixed a code typo in the `task::sleep` example ([#688](https://github.com/async-rs/async-std/pull/688))
# [1.4.0] - 2019-12-20
[API Documentation](https://docs.rs/async-std/1.4.0/async-std)
This patch adds `Future::timeout`, providing a method counterpart to the
`future::timeout` free function. And includes several bug fixes around missing
APIs. Notably we're not shipping our new executor yet, first announced [on our
blog](https://async.rs/blog/stop-worrying-about-blocking-the-new-async-std-runtime/).
## Examples
```rust
use async_std::prelude::*;
use async_std::future;
use std::time::Duration;
let fut = future::pending::<()>(); // This future will never resolve.
let res = fut.timeout(Duration::from_millis(100)).await;
assert!(res.is_err()); // The future timed out, returning an err.
```
## Added
- Added `Future::timeout` as "unstable" [(#600)](https://github.com/async-rs/async-std/pull/600)
## Fixes
- Fixed a doc test and enabled it on CI [(#597)](https://github.com/async-rs/async-std/pull/597)
- Fixed a rendering issue with the `stream` submodule documentation [(#621)](https://github.com/async-rs/async-std/pull/621)
- `Write::write_fmt`'s future is now correctly marked as `#[must_use]` [(#628)](https://github.com/async-rs/async-std/pull/628)
- Fixed the missing `io::Bytes` export [(#633)](https://github.com/async-rs/async-std/pull/633)
- Fixed the missing `io::Chain` export [(#633)](https://github.com/async-rs/async-std/pull/633)
- Fixed the missing `io::Take` export [(#633)](https://github.com/async-rs/async-std/pull/633)
# [1.3.0] - 2019-12-12
[API Documentation](https://docs.rs/async-std/1.3.0/async-std)
This patch introduces `Stream::delay`, more methods on `DoubleEndedStream`,
and improves compile times. `Stream::delay` is a new API that's similar to
[`task::sleep`](https://docs.rs/async-std/1.2.0/async_std/task/fn.sleep.html),
but can be passed as part of as stream, rather than as a separate block. This is
useful for examples, or when manually debugging race conditions.
## Examples
```rust
let start = Instant::now();
let mut s = stream::from_iter(vec![0u8, 1]).delay(Duration::from_millis(200));
// The first time will take more than 200ms due to delay.
s.next().await;
assert!(start.elapsed().as_millis() >= 200);
// There will be no delay after the first time.
s.next().await;
assert!(start.elapsed().as_millis() <= 210);
```
## Added
- Added `Stream::delay` as "unstable" [(#309)](https://github.com/async-rs/async-std/pull/309)
- Added `DoubleEndedStream::next_back` as "unstable" [(#562)](https://github.com/async-rs/async-std/pull/562)
- Added `DoubleEndedStream::nth_back` as "unstable" [(#562)](https://github.com/async-rs/async-std/pull/562)
- Added `DoubleEndedStream::rfind` as "unstable" [(#562)](https://github.com/async-rs/async-std/pull/562)
- Added `DoubleEndedStream::rfold` as "unstable" [(#562)](https://github.com/async-rs/async-std/pull/562)
- Added `DoubleEndedStream::try_rfold` as "unstable" [(#562)](https://github.com/async-rs/async-std/pull/562)
- `stream::Once` now implements `DoubleEndedStream` [(#562)](https://github.com/async-rs/async-std/pull/562)
- `stream::FromIter` now implements `DoubleEndedStream` [(#562)](https://github.com/async-rs/async-std/pull/562)
## Changed
- Removed our dependency on `async-macros`, speeding up compilation [(#610)](https://github.com/async-rs/async-std/pull/610)
## Fixes
- Fixed a link in the task docs [(#598)](https://github.com/async-rs/async-std/pull/598)
- Fixed the `UdpSocket::recv` example [(#603)](https://github.com/async-rs/async-std/pull/603)
- Fixed a link to `task::block_on` [(#608)](https://github.com/async-rs/async-std/pull/608)
- Fixed an incorrect API mention in `task::Builder` [(#612)](https://github.com/async-rs/async-std/pull/612)
- Fixed leftover mentions of `futures-preview` [(#595)](https://github.com/async-rs/async-std/pull/595)
- Fixed a typo in the tutorial [(#614)](https://github.com/async-rs/async-std/pull/614)
- `<TcpStream as Write>::poll_close` now closes the write half of the stream [(#618)](https://github.com/async-rs/async-std/pull/618)
# [1.2.0] - 2019-11-27
[API Documentation](https://docs.rs/async-std/1.2.0/async-std)
This patch includes some minor quality-of-life improvements, introduces a
new `Stream::unzip` API, and adds verbose errors to our networking types.
This means if you can't connect to a socket, you'll never have to wonder again
*which* address it was you couldn't connect to, instead of having to go through
the motions to debug what the address was.
## Example
Unzip a stream of tuples into two collections:
```rust
use async_std::prelude::*;
use async_std::stream;
let s = stream::from_iter(vec![(1,2), (3,4)]);
let (left, right): (Vec<_>, Vec<_>) = s.unzip().await;
assert_eq!(left, [1, 3]);
assert_eq!(right, [2, 4]);
```
## Added
- Added `Stream::unzip` as "unstable".
- Added verbose errors to the networking types.
## Changed
- Enabled CI on master branch.
- `Future::join` and `Future::try_join` can now join futures with different
output types.
## Fixed
- Fixed the docs and `Debug` output of `BufWriter`.
- Fixed a bug in `Stream::throttle` that made it consume too much CPU.
# [1.1.0] - 2019-11-21
[API Documentation](https://docs.rs/async-std/1.1.0/async-std)
This patch introduces a faster scheduler algorithm, `Stream::throttle`, and
stabilizes `task::yield_now`. Additionally we're introducing several more stream
APIs, bringing us to almost complete parity with the standard library.
Furthermore our `path` submodule now returns more context in errors. So if
opening a file fails, async-std will tell you *which* file was failed to open,
making it easier to write and debug programs.
## Examples
```rust
let start = Instant::now();
let mut s = stream::interval(Duration::from_millis(5))
.throttle(Duration::from_millis(10))
.take(2);
s.next().await;
assert!(start.elapsed().as_millis() >= 5);
s.next().await;
assert!(start.elapsed().as_millis() >= 15);
s.next().await;
assert!(start.elapsed().as_millis() >= 25);
```
## Added
- Added `Stream::throttle` as "unstable".
- Added `Stream::count` as "unstable".
- Added `Stream::max` as "unstable".
- Added `Stream::successors` as "unstable".
- Added `Stream::by_ref` as "unstable".
- Added `Stream::partition` as "unstable".
- Added contextual errors to the `path` submodule.
- Added `os::windows::symlink_dir` as "unstable".
- Added `os::windows::symlink_file` as "unstable".
- Stabilized `task::yield_now`.
## Fixes
- We now ignore seek errors when rolling back failed `read` calls on `File`.
- Fixed a bug where `Stream::max_by_key` was returning the wrong result.
- Fixed a bug where `Stream::min_by_key` was returning the wrong result.
## Changed
- Applied various fixes to the tutorial.
- Fixed an issue with Clippy.
- Optimized an internal code generation macro, improving compilation speeds.
- Removed an `Unpin` bound from `stream::Once`.
- Removed various extra internal uses of `pin_mut!`.
- Simplified `Stream::any` and `Stream::all`'s internals.
- The `surf` example is now enabled again.
- Tweaked some streams internals.
- Updated `futures-timer` to 2.0.0, improving compilation speed.
- Upgraded `async-macros` to 2.0.0.
- `Stream::merge` now uses randomized ordering to reduce overall latency.
- The scheduler is now more efficient by keeping a slot for the next task to
run. This is similar to Go's scheduler, and Tokio's scheduler.
- Fixed the documentation of the `channel` types to link back to the `channel`
function.
# [1.0.1] - 2019-11-12
[API Documentation](https://docs.rs/async-std/1.0.1/async-std)
We were seeing a regression in our fs performance, caused by too many
long-running tasks. This patch fixes that regression by being more proactive
about closing down idle threads.
## Changes
- Improved thread startup/shutdown algorithm in `task::spawn_blocking`.
- Fixed a typo in the tutorial.
# [1.0.0] - 2019-11-11
[API Documentation](https://docs.rs/async-std/1.0.0/async-std)
This release marks the `1.0.0` release of async-std; a major milestone for our
development. This release itself mostly includes quality of life improvements
for all of modules, including more consistent API bounds for a lot of our
submodules.
The biggest change is that we're now using the full semver range,
`major.minor.patch`, and any breaking changes to our "stable" APIs will require
an update of the `major` number.
We're excited we've hit this milestone together with you all. Thank you!
## Added
- Added `Future::join` as "unstable", replacing `future::join!`.
- Added `Future::try_join` as "unstable", replacing `future::try_join!`.
- Enabled `stable` and `beta` channel testing on CI.
- Implemented `FromIterator` and `Extend` for `PathBuf`.
- Implemented `FromStream` for `PathBuf`.
- Loosened the trait bounds of `io::copy` on "unstable".
## Changed
- Added a `Sync` bound to `RwLock`, resolving a memory safety issue.
- Fixed a bug in `Stream::take_while` where it could continue after it should've
ended.
- Fixed a bug where our `attributes` Cargo feature wasn't working as intended.
- Improved documentation of `Stream::merge`, documenting ordering guarantees.
- Update doc imports in examples to prefer async-std's types.
- Various quality of life improvements to the `future` submodule.
- Various quality of life improvements to the `path` submodule.
- Various quality of life improvements to the `stream` submodule.
## Removed
- Removed `future::join!` in favor of `Future::join`.
- Removed `future::try_join!` in favor of `Future::try_join`.
# [0.99.12] - 2019-11-07
[API Documentation](https://docs.rs/async-std/0.99.12/async-std)
This patch upgrades us to `futures` 0.3, support for `async/await` on Rust
Stable, performance improvements, and brand new module-level documentation.
## Added
- Added `Future::flatten` as "unstable".
- Added `Future::race` as "unstable" (replaces `future::select!`).
- Added `Future::try_race` as "unstable" (replaces `future::try_select!`).
- Added `Stderr::lock` as "unstable".
- Added `Stdin::lock` as "unstable".
- Added `Stdout::lock` as "unstable".
- Added `Stream::copied` as "unstable".
- Added `Stream::eq` as "unstable".
- Added `Stream::max_by_key` as "unstable".
- Added `Stream::min` as "unstable".
- Added `Stream::ne` as "unstable".
- Added `Stream::position` as "unstable".
- Added `StreamExt` and `FutureExt` as enumerable in the `prelude`.
- Added `TcpListener` and `TcpStream` integration tests.
- Added `stream::from_iter`.
- Added `sync::WakerSet` for internal use.
- Added an example to handle both `IP v4` and `IP v6` connections.
- Added the `default` Cargo feature.
- Added the `attributes` Cargo feature.
- Added the `std` Cargo feature.
## Changed
- Fixed a bug in the blocking threadpool where it didn't spawn more than one thread.
- Fixed a bug with `Stream::merge` where sometimes it ended too soon.
- Fixed a bug with our GitHub actions setup.
- Fixed an issue where our channels could spuriously deadlock.
- Refactored the `task` module.
- Removed a deprecated GitHub action.
- Replaced `futures-preview` with `futures`.
- Replaced `lazy_static` with `once_cell`.
- Replaced all uses of `VecDequeue` in the examples with `stream::from_iter`.
- Simplified `sync::RwLock` using the internal `sync::WakerSet` type.
- Updated the `path` submodule documentation to match std.
- Updated the mod-level documentation to match std.
## Removed
- Removed `future::select!` (replaced by `Future::race`).
- Removed `future::try_select!` (replaced by `Future::try_race`).
# [0.99.11] - 2019-10-29
This patch introduces `async_std::sync::channel`, a novel asynchronous port of
the ultra-fast Crossbeam channels. This has been one of the most anticipated
features for async-std, and we're excited to be providing a first version of
this!
In addition to channels, this patch has the regular list of new methods, types,
and doc fixes.
## Examples
__Send and receive items from a channel__
```rust
// Create a bounded channel with a max-size of 1
let (s, r) = channel(1);
// This call returns immediately because there is enough space in the channel.
s.send(1).await;
task::spawn(async move {
// This call blocks the current task because the channel is full.
// It will be able to complete only after the first message is received.
s.send(2).await;
});
// Receive items from the channel
task::sleep(Duration::from_secs(1)).await;
assert_eq!(r.recv().await, Some(1));
assert_eq!(r.recv().await, Some(2));
```
## Added
- Added `Future::delay` as "unstable"
- Added `Stream::flat_map` as "unstable"
- Added `Stream::flatten` as "unstable"
- Added `Stream::product` as "unstable"
- Added `Stream::sum` as "unstable"
- Added `Stream::min_by_key`
- Added `Stream::max_by`
- Added `Stream::timeout` as "unstable"
- Added `sync::channel` as "unstable".
- Added doc links from instantiated structs to the methods that create them.
- Implemented `Extend` + `FromStream` for `PathBuf`.
## Changed
- Fixed an issue with `block_on` so it works even when nested.
- Fixed issues with our Clippy check on CI.
- Replaced our uses of `cfg_if` with our own macros, simplifying the codebase.
- Updated the homepage link in `Cargo.toml` to point to [async.rs](https://async.rs).
- Updated the module-level documentation for `stream` and `sync`.
- Various typos and grammar fixes.
- Removed redundant file flushes, improving the performance of `File` operations
## Removed
Nothing was removed in this release.
# [0.99.10] - 2019-10-16
This patch stabilizes several core concurrency macros, introduces async versions
of `Path` and `PathBuf`, and adds almost 100 other commits.
## Examples
__Asynchronously read directories from the filesystem__
```rust
use async_std::fs;
use async_std::path::Path;
use async_std::prelude::*;
let path = Path::new("/laputa");
let mut dir = fs::read_dir(&path).await.unwrap();
while let Some(entry) = dir.next().await {
if let Ok(entry) = entry {
println!("{:?}", entry.path());
}
}
```
__Cooperatively reschedule the current task on the executor__
```rust
use async_std::prelude::*;
use async_std::task;
task::spawn(async {
let x = fibonnacci(1000); // Do expensive work
task::yield_now().await; // Allow other tasks to run
x + fibonnacci(100) // Do more work
})
```
__Create an interval stream__
```rust
use async_std::prelude::*;
use async_std::stream;
use std::time::Duration;
let mut interval = stream::interval(Duration::from_secs(4));
while let Some(_) = interval.next().await {
println!("prints every four seconds");
}
```
## Added
- Added `FutureExt` to the `prelude`, allowing us to extend `Future`
- Added `Stream::cmp`
- Added `Stream::ge`
- Added `Stream::last`
- Added `Stream::le`
- Added `Stream::lt`
- Added `Stream::merge` as "unstable", replacing `stream::join!`
- Added `Stream::partial_cmp`
- Added `Stream::take_while`
- Added `Stream::try_fold`
- Added `future::IntoFuture` as "unstable"
- Added `io::BufRead::split`
- Added `io::Write::write_fmt`
- Added `print!`, `println!`, `eprint!`, `eprintln!` macros as "unstable"
- Added `process` as "unstable", re-exporting std types only for now
- Added `std::net` re-exports to the `net` submodule
- Added `std::path::PathBuf` with all associated methods
- Added `std::path::Path` with all associated methods
- Added `stream::ExactSizeStream` as "unstable"
- Added `stream::FusedStream` as "unstable"
- Added `stream::Product`
- Added `stream::Sum`
- Added `stream::from_fn`
- Added `stream::interval` as "unstable"
- Added `stream::repeat_with`
- Added `task::spawn_blocking` as "unstable", replacing `task::blocking`
- Added `task::yield_now`
- Added `write!` and `writeln!` macros as "unstable"
- Stabilized `future::join!` and `future::try_join!`
- Stabilized `future::timeout`
- Stabilized `path`
- Stabilized `task::ready!`
## Changed
- Fixed `BufWriter::into_inner` so it calls `flush` before yielding
- Refactored `io::BufWriter` internals
- Refactored `net::ToSocketAddrs` internals
- Removed Travis CI entirely
- Rewrote the README.md
- Stabilized `io::Cursor`
- Switched bors over to use GitHub actions
- Updated the `io` documentation to match std's `io` docs
- Updated the `task` documentation to match std's `thread` docs
## Removed
- Removed the "unstable" `stream::join!` in favor of `Stream::merge`
- Removed the "unstable" `task::blocking` in favor of `task::spawn_blocking`
# [0.99.9] - 2019-10-08
This patch upgrades our `futures-rs` version, allowing us to build on the 1.39
beta. Additionally we've introduced `map` and `for_each` to `Stream`. And we've
added about a dozen new `FromStream` implementations for `std` types, bringing
us up to par with std's `FromIterator` implementations.
And finally we've added a new "unstable" `task::blocking` function which can be
used to convert blocking code into async code using a threadpool. We've been
using this internally for a while now to async-std to power our `fs` and
`net::SocketAddr` implementations. With this patch userland code now finally has
access to this too.
## Example
__Create a stream of tuples, and collect into a hashmap__
```rust
let a = stream::once(1u8);
let b = stream::once(0u8);
let s = a.zip(b);
let map: HashMap<u8, u8> = s.collect().await;
assert_eq!(map.get(&1), Some(&0u8));
```
__Spawn a blocking task on a dedicated threadpool__
```rust
task::blocking(async {
println!("long-running task here");
}).await;
```
## Added
- Added `stream::Stream::map`
- Added `stream::Stream::for_each`
- Added `stream::Stream::try_for_each`
- Added `task::blocking` as "unstable"
- Added `FromStream` for all `std::{option, collections, result, string, sync}` types.
- Added the `path` submodule as "unstable".
## Changed
- Updated `futures-preview` to `0.3.0-alpha.19`, allowing us to build on `rustc 1.39.0-beta`.
- As a consequence of this upgrade, all of our concrete stream implementations
now make use of `Stream::size_hint` to optimize internal allocations.
- We now use GitHub Actions through [actions-rs](https://github.com/actions-rs),
in addition to Travis CI. We intend to fully switch in the near future.
- Fixed a bug introduced in 0.99.6 where Unix Domain Listeners would sometimes become unresponsive.
- Updated our `sync::Barrier` docs to match std.
- Updated our `stream::FromStream` docs to match std's `FromIterator`.
# [0.99.8] - 2019-09-28
## Added
- Added README to examples directory.
- Added concurrency documentation to the futures submodule.
- Added `io::Read::take` method.
- Added `io::Read::by_ref` method.
- Added `io::Read::chain` method.
## Changed
- Pin futures-preview to `0.3.0-alpha.18`, to avoid rustc upgrade problems.
- Simplified extension traits using a macro.
- Use the `broadcast` module with `std::sync::Mutex`, reducing dependencies.
# [0.99.7] - 2019-09-26
## Added
- Added `future::join` macro as "unstable"
- Added `future::select` macro as "unstable"
- Added `future::try_join` macro as "unstable"
- Added `future::try_select` macro as "unstable"
- Added `io::BufWriter` struct
- Added `stream::Extend` trait
- Added `stream::Stream::chain` method
- Added `stream::Stream::filter` method
- Added `stream::Stream::inspect` method
- Added `stream::Stream::skip_while` method
- Added `stream::Stream::skip` method
- Added `stream::Stream::step_by` method
- Added `sync::Arc` struct from stdlib
- Added `sync::Barrier` struct as "unstable"
- Added `sync::Weak` struct from stdlib
- Added `task::ready` macro as "unstable"
## Changed
- Correctly marked the `pin` submodule as "unstable" in the docs
- Updated tutorial to have certain functions suffixed with `_loop`
- `io` traits are now re-exports of futures-rs types, allowing them to be
implemented
- `stream` traits are now re-exports of futures-rs types, allowing them to be
implemented
- `prelude::*` now needs to be in scope for functions `io` and `stream` traits
to work
# [0.99.6] - 2019-09-19
## Added
- Added `stream::Stream::collect` as "unstable"
- Added `stream::Stream::enumerate`
- Added `stream::Stream::fuse`
- Added `stream::Stream::fold`
- Added `stream::Stream::scan`
- Added `stream::Stream::zip`
- Added `stream::join` macro as "unstable"
- Added `stream::DoubleEndedStream` as "unstable"
- Added `stream::FromStream` trait as "unstable"
- Added `stream::IntoStream` trait as "unstable"
- Added `io::Cursor` as "unstable"
- Added `io::BufRead::consume` method
- Added `io::repeat`
- Added `io::Slice` and `io::SliceMut`
- Added documentation for feature flags
- Added `pin` submodule as "unstable"
- Added the ability to `collect` a stream of `Result<T, E>`s into a
`Result<impl FromStream<T>, E>`
## Changed
- Refactored the scheduling algorithm of our executor to use work stealing
- Refactored the network driver, removing 400 lines of code
- Removed the `Send` bound from `task::block_on`
- Removed `Unpin` bound from `impl<T: futures::stream::Stream> Stream for T`
# [0.99.5] - 2019-09-12
## Added
- Added tests for `io::timeout`
- Added `io::BufRead::fill_buf`, an `async fn` counterpart to `poll_fill_buf`
- Added `fs::create_dir_all`
- Added `future::timeout`, a free function to time out futures after a threshold
- Added `io::prelude`
- Added `net::ToSocketAddrs`, a non-blocking version of std's `ToSocketAddrs`
- Added `stream::Stream::all`
- Added `stream::Stream::filter_map`
- Added `stream::Stream::find_map`
- Added `stream::Stream::find`
- Added `stream::Stream::min_by`
- Added `stream::Stream::nth`
## Changed
- Polished the text and examples of the tutorial
- `cargo fmt` on all examples
- Simplified internals of `TcpStream::connect_to`
- Modularized our CI setup, enabled a rustfmt fallback, and improved caching
- Reduced our dependency on the `futures-rs` crate, improving compilation times
- Split `io::Read`, `io::Write`, `io::BufRead`, and `stream::Stream` into
multiple files
- `fs::File` now flushes more often to prevent flushes during `seek`
- Updated all dependencies
- Fixed a bug in the conversion of `File` into raw handle
- Fixed compilation errors on the latest nightly
## Removed
# [0.99.4] - 2019-08-21
## Changes
- Many small changes in the book, mostly typos
- Documentation fixes correcting examples
- Now works with recent nightly with stabilised async/await (> 2019-08-21)
# [0.99.3] - 2019-08-16
- Initial beta release
[Unreleased]: https://github.com/async-rs/async-std/compare/v1.6.2...HEAD
[1.6.2]: https://github.com/async-rs/async-std/compare/v1.6.1...v1.6.2
[1.6.1]: https://github.com/async-rs/async-std/compare/v1.6.0...v1.6.1
[1.6.0]: https://github.com/async-rs/async-std/compare/v1.5.0...v1.6.0
[1.6.0-beta.2]: https://github.com/async-rs/async-std/compare/v1.6.0-beta.1...v1.6.0-beta.2
[1.6.0-beta.1]: https://github.com/async-rs/async-std/compare/v1.5.0...v1.6.0-beta.1
[1.5.0]: https://github.com/async-rs/async-std/compare/v1.4.0...v1.5.0
[1.4.0]: https://github.com/async-rs/async-std/compare/v1.3.0...v1.4.0
[1.3.0]: https://github.com/async-rs/async-std/compare/v1.2.0...v1.3.0
[1.2.0]: https://github.com/async-rs/async-std/compare/v1.1.0...v1.2.0
[1.1.0]: https://github.com/async-rs/async-std/compare/v1.0.1...v1.1.0
[1.0.1]: https://github.com/async-rs/async-std/compare/v1.0.0...v1.0.1
[1.0.0]: https://github.com/async-rs/async-std/compare/v0.99.12...v1.0.0
[0.99.12]: https://github.com/async-rs/async-std/compare/v0.99.11...v0.99.12
[0.99.11]: https://github.com/async-rs/async-std/compare/v0.99.10...v0.99.11
[0.99.10]: https://github.com/async-rs/async-std/compare/v0.99.9...v0.99.10
[0.99.9]: https://github.com/async-rs/async-std/compare/v0.99.8...v0.99.9
[0.99.8]: https://github.com/async-rs/async-std/compare/v0.99.7...v0.99.8
[0.99.7]: https://github.com/async-rs/async-std/compare/v0.99.6...v0.99.7
[0.99.6]: https://github.com/async-rs/async-std/compare/v0.99.5...v0.99.6
[0.99.5]: https://github.com/async-rs/async-std/compare/v0.99.4...v0.99.5
[0.99.4]: https://github.com/async-rs/async-std/compare/v0.99.3...v0.99.4
[0.99.3]: https://github.com/async-rs/async-std/tree/v0.99.3

View file

@ -1,107 +1,39 @@
[package]
name = "async-std"
version = "1.6.2"
authors = [
"Stjepan Glavina <stjepang@gmail.com>",
"Yoshua Wuyts <yoshuawuyts@gmail.com>",
"Friedel Ziegelmayer <me@dignifiedquire.com>",
"Contributors to async-std",
]
version = "0.1.0"
authors = ["Stjepan Glavina <stjepang@gmail.com>"]
edition = "2018"
license = "Apache-2.0/MIT"
repository = "https://github.com/async-rs/async-std"
homepage = "https://async.rs"
repository = "https://github.com/stjepang/async-std"
homepage = "https://github.com/stjepang/async-std"
documentation = "https://docs.rs/async-std"
description = "Async version of the Rust standard library"
keywords = ["async", "await", "future", "std", "task"]
categories = ["asynchronous", "concurrency", "network-programming"]
readme = "README.md"
[package.metadata.docs.rs]
features = ["docs"]
rustdoc-args = ["--cfg", "feature=\"docs\""]
rustdoc-args = ["--features docs"]
[features]
default = [
"std",
"async-task",
"kv-log-macro",
"log",
"num_cpus",
"pin-project-lite",
"smol",
]
docs = ["attributes", "unstable", "default"]
unstable = [
"std",
"futures-timer",
]
attributes = ["async-attributes"]
std = [
"alloc",
"crossbeam-utils",
"futures-core/std",
"futures-io",
"memchr",
"once_cell",
"pin-utils",
"slab",
"wasm-bindgen-futures",
"futures-channel",
"async-mutex",
]
alloc = [
"futures-core/alloc",
"pin-project-lite",
]
tokio02 = ["smol/tokio02"]
docs = []
[dependencies]
async-attributes = { version = "1.1.1", optional = true }
async-task = { version = "3.0.0", optional = true }
async-mutex = { version = "1.1.3", optional = true }
crossbeam-utils = { version = "0.7.2", optional = true }
futures-core = { version = "0.3.4", optional = true, default-features = false }
futures-io = { version = "0.3.4", optional = true }
kv-log-macro = { version = "1.0.6", optional = true }
log = { version = "0.4.8", features = ["kv_unstable"], optional = true }
memchr = { version = "2.3.3", optional = true }
num_cpus = { version = "1.12.0", optional = true }
once_cell = { version = "1.3.1", optional = true }
pin-project-lite = { version = "0.1.4", optional = true }
pin-utils = { version = "0.1.0-alpha.4", optional = true }
slab = { version = "0.4.2", optional = true }
futures-timer = { version = "3.0.2", optional = true }
# Devdepencency, but they are not allowed to be optional :/
surf = { version = "1.0.3", optional = true }
[target.'cfg(not(target_os = "unknown"))'.dependencies]
smol = { version = "0.1.17", optional = true }
[target.'cfg(target_arch = "wasm32")'.dependencies]
futures-timer = { version = "3.0.2", optional = true, features = ["wasm-bindgen"] }
wasm-bindgen-futures = { version = "0.4.10", optional = true }
futures-channel = { version = "0.3.4", optional = true }
[target.'cfg(target_arch = "wasm32")'.dev-dependencies]
wasm-bindgen-test = "0.3.10"
async-task = { git = "ssh://git@github.com/async-rs/async-task.git" }
cfg-if = "0.1.9"
crossbeam-channel = "0.3.9"
futures-preview = "0.3.0-alpha.17"
futures-timer = "0.3.0"
lazy_static = "1.3.0"
log = { version = "0.4.8", features = ["kv_unstable"] }
memchr = "2.2.1"
mio = "0.6.19"
mio-uds = "0.6.7"
num_cpus = "1.10.0"
pin-utils = "0.1.0-alpha.4"
slab = "0.4.2"
[dev-dependencies]
femme = "1.3.0"
rand = "0.7.3"
femme = "1.1.0"
# surf = { git = "ssh://github.com/yoshuawuyts/surf" }
tempdir = "0.3.7"
futures = "0.3.4"
rand_xorshift = "0.2.0"
[[test]]
name = "stream"
required-features = ["unstable"]
[[example]]
name = "tcp-ipv4-and-6-echo"
required-features = ["unstable"]
[[example]]
name = "surf-web"
required-features = ["surf"]

181
README.md
View file

@ -1,159 +1,64 @@
<h1 align="center">async-std</h1>
<div align="center">
<strong>
Async version of the Rust standard library
</strong>
</div>
# Async version of the Rust standard library
<br />
<!-- [![Build Status](https://travis-ci.org/stjepang/async-std.svg?branch=master)]( -->
<!-- https://travis-ci.org/stjepang/async-std) -->
<!-- [![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)]( -->
<!-- https://github.com/stjepang/async-std) -->
<!-- [![Cargo](https://img.shields.io/crates/v/async-std.svg)]( -->
<!-- https://crates.io/crates/async-std) -->
<!-- [![Documentation](https://docs.rs/async-std/badge.svg)]( -->
<!-- https://docs.rs/async-std) -->
[![chat](https://img.shields.io/discord/598880689856970762.svg?logo=discord)](https://discord.gg/JvZeVNe)
<div align="center">
<!-- CI status -->
<a href="https://github.com/async-rs/async-std/actions">
<img src="https://github.com/async-rs/async-std/workflows/CI/badge.svg"
alt="CI Status" />
</a>
<!-- Crates version -->
<a href="https://crates.io/crates/async-std">
<img src="https://img.shields.io/crates/v/async-std.svg?style=flat-square"
alt="Crates.io version" />
</a>
<!-- Downloads -->
<a href="https://crates.io/crates/async-std">
<img src="https://img.shields.io/crates/d/async-std.svg?style=flat-square"
alt="Download" />
</a>
<!-- docs.rs docs -->
<a href="https://docs.rs/async-std">
<img src="https://img.shields.io/badge/docs-latest-blue.svg?style=flat-square"
alt="docs.rs docs" />
</a>
<a href="https://discord.gg/JvZeVNe">
<img src="https://img.shields.io/discord/598880689856970762.svg?logo=discord&style=flat-square"
alt="chat" />
</a>
</div>
<div align="center">
<h3>
<a href="https://docs.rs/async-std">
API Docs
</a>
<span> | </span>
<a href="https://book.async.rs">
Book
</a>
<span> | </span>
<a href="https://github.com/async-rs/async-std/releases">
Releases
</a>
<span> | </span>
<a href="https://async.rs/contribute">
Contributing
</a>
</h3>
</div>
<br/>
This crate provides an async version of [`std`]. It provides all the interfaces
you are used to, but in an async version and ready for Rust's `async`/`await`
syntax.
This crate is an async version of [`std`].
[`std`]: https://doc.rust-lang.org/std/index.html
## Features
## Quickstart
- __Modern:__ Built from the ground up for `std::future` and `async/await` with
blazing fast compilation time.
- __Fast:__ Our robust allocator and threadpool designs provide ultra-high
throughput with predictably low latency.
- __Intuitive:__ Complete parity with the stdlib means you only need to learn
APIs once.
- __Clear:__ [Detailed documentation][docs] and [accessible guides][book] mean
using async Rust was never easier.
Clone the repo:
[docs]: https://docs.rs/async-std
[book]: https://book.async.rs
```
git clone git@github.com:stjepang/async-std.git && cd async-std
```
## Examples
Read the docs:
```
cargo doc --features docs --open
```
Check out the [examples](examples). To run an example:
```
cargo run --example hello-world
```
## Hello world
```rust
#![feature(async_await)]
use async_std::task;
async fn say_hello() {
println!("Hello, world!");
}
fn main() {
task::block_on(say_hello())
task::block_on(async {
println!("Hello, world!");
})
}
```
More examples, including networking and file access, can be found in our
[`examples`] directory and in our [documentation].
[`examples`]: https://github.com/async-rs/async-std/tree/master/examples
[documentation]: https://docs.rs/async-std#examples
[`task::block_on`]: https://docs.rs/async-std/*/async_std/task/fn.block_on.html
[`"attributes"` feature]: https://docs.rs/async-std/#features
## Philosophy
We believe Async Rust should be as easy to pick up as Sync Rust. We also believe
that the best API is the one you already know. And finally, we believe that
providing an asynchronous counterpart to the standard library is the best way
stdlib provides a reliable basis for both performance and productivity.
Async-std is the embodiment of that vision. It combines single-allocation task
creation, with an adaptive lock-free executor, threadpool and network driver to
create a smooth system that processes work at a high pace with low latency,
using Rust's familiar stdlib API.
## Installation
With [cargo add][cargo-add] installed run:
```sh
$ cargo add async-std
```
We also provide a set of "unstable" features with async-std. See the [features
documentation] on how to enable them.
[cargo-add]: https://github.com/killercup/cargo-edit
[features documentation]: https://docs.rs/async-std/#features
## Ecosystem
* [async-tls](https://crates.io/crates/async-tls) — Async TLS/SSL streams using **Rustls**.
* [async-native-tls](https://crates.io/crates/async-native-tls) — **Native TLS** for Async. Native TLS for futures and async-std.
* [async-tungstenite](https://crates.io/crates/async-tungstenite) — Asynchronous **WebSockets** for async-std, tokio, gio and any std Futures runtime.
* [Tide](https://crates.io/crates/tide) — Serve the web. A modular **web framework** built around async/await.
* [SQLx](https://crates.io/crates/sqlx) — The Rust **SQL** Toolkit. SQLx is a 100% safe Rust library for Postgres and MySQL with compile-time checked queries.
* [Surf](https://crates.io/crates/surf) — Surf the web. Surf is a friendly **HTTP client** built for casual Rustaceans and veterans alike.
* [Xactor](https://crates.io/crates/xactor) — Xactor is a rust actors framework based on async-std.
* [async-graphql](https://crates.io/crates/async-graphql) — A GraphQL server library implemented in rust, with full support for async/await.
## License
<sup>
Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
</sup>
Licensed under either of
<br/>
* Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
#### Contribution
<sub>
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
be dual licensed as above, without any additional terms or conditions.
</sub>
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.

View file

@ -1,40 +0,0 @@
#![feature(test)]
extern crate test;
use async_std::sync::{Arc, Mutex};
use async_std::task;
use test::Bencher;
#[bench]
fn create(b: &mut Bencher) {
b.iter(|| Mutex::new(()));
}
#[bench]
fn contention(b: &mut Bencher) {
b.iter(|| task::block_on(run(10, 1000)));
}
#[bench]
fn no_contention(b: &mut Bencher) {
b.iter(|| task::block_on(run(1, 10000)));
}
async fn run(task: usize, iter: usize) {
let m = Arc::new(Mutex::new(()));
let mut tasks = Vec::new();
for _ in 0..task {
let m = m.clone();
tasks.push(task::spawn(async move {
for _ in 0..iter {
let _ = m.lock().await;
}
}));
}
for t in tasks {
t.await;
}
}

View file

@ -1,11 +0,0 @@
#![feature(test)]
extern crate test;
use async_std::task;
use test::Bencher;
#[bench]
fn block_on(b: &mut Bencher) {
b.iter(|| task::block_on(async {}));
}

View file

@ -1,4 +1,4 @@
#![feature(test)]
#![feature(async_await, test)]
extern crate test;

View file

@ -1,7 +0,0 @@
status = [
"Build and test (ubuntu-latest, nightly)",
"Build and test (windows-latest, nightly)",
"Build and test (macOS-latest, nightly)",
"Checking fmt and docs",
"Clippy check",
]

View file

@ -1,19 +0,0 @@
set -euxo pipefail
# Based on the Rust-Embedded WG's book CI
# https://github.com/rust-embedded/book/blob/master/ci/install.sh
main() {
# Note - this will only accept releases tagged with v0.3.x
local tag=$(git ls-remote --tags --refs --exit-code \
https://github.com/rust-lang-nursery/mdbook \
| cut -d/ -f3 \
| grep -E '^v0\.3\.[0-9]+$' \
| sort --version-sort \
| tail -n1)
curl -LSfs https://japaric.github.io/trust/install.sh | \
sh -s -- --git rust-lang-nursery/mdbook --tag $tag
}
main

View file

@ -4,10 +4,3 @@ language = "en"
multilingual = false
src = "src"
title = "Async programming in Rust with async-std"
[build]
create-missing = false
[output.html]
git-repository-url = "https://github.com/async-rs/async-std"
git-repository-icon = "fa-github"

View file

@ -1,7 +1,7 @@
# Summary
- [Introduction](./introduction.md)
- [Welcome to `async-std`!](./overview/async-std.md)
- [Overview](./overview.md)
- [`async-std`](./overview/async-std.md)
- [`std::future` and `futures-rs`](./overview/std-and-library-futures.md)
- [Stability guarantees](./overview/stability-guarantees.md)
- [Async concepts using async-std](./concepts.md)
@ -9,19 +9,15 @@
- [Tasks](./concepts/tasks.md)
- [Async read/write](./concepts/async-read-write.md)
- [Streams and Channels](./concepts/streams.md)
- [Tutorial: Implementing a chat](./tutorial/index.md)
- [Specification and Getting started](./tutorial/specification.md)
- [Writing an Accept Loop](./tutorial/accept_loop.md)
- [Receiving Messages](./tutorial/receiving_messages.md)
- [Sending Messages](./tutorial/sending_messages.md)
- [Connecting Readers and Writers](./tutorial/connecting_readers_and_writers.md)
- [All Together](./tutorial/all_together.md)
- [Clean Shutdown](./tutorial/clean_shutdown.md)
- [Handling Disconnection](./tutorial/handling_disconnection.md)
- [Implementing a Client](./tutorial/implementing_a_client.md)
- [Tutorials](./tutorials/index.md)
- [Integrating std::thread](./tutorials/integrating-std-thread.md)
- [Async Patterns](./patterns.md)
- [TODO: Collected Small Patterns](./patterns/small-patterns.md)
- [Production-Ready Accept Loop](./patterns/accept-loop.md)
- [Fork/Join](./patterns/fork-join.md)
- [Accepting requests](./patterns/accepting-concurrent-requests.md)
- [Proper Shutdown](./patterns/proper-shutdown.md)
- [Background Tasks](./patterns/background-tasks.md)
- [Testing](./patterns/testing.md)
- [Collected Small Patterns](./patterns/small-patterns.md)
- [Security practices](./security/index.md)
- [Security Disclosures and Policy](./security/policy.md)
- [Security disclosures and policy](./security/policy.md)
- [Glossary](./glossary.md)

View file

@ -4,11 +4,11 @@
However, there are good reasons for that perception. Futures have three concepts at their base that seem to be a constant source of confusion: deferred computation, asynchronicity and independence of execution strategy.
These concepts are not hard, but something many people are not used to. This base confusion is amplified by many implementations oriented on details. Most explanations of these implementations also target advanced users, and can be hard for beginners. We try to provide both easy-to-understand primitives and approachable overviews of the concepts.
These concepts are not hard, but something many people are not used to. This base confusion is amplified by many implementations oriented on details and hard to understand. Most explanations of these implementations also target advanced users. We both try to provide easy to understand primitives and approachable overviews of the concepts.
Futures are a concept that abstracts over how code is run. By themselves, they do nothing. This is a weird concept in an imperative language, where usually one thing happens after the other - right now.
So how do Futures run? You decide! Futures do nothing without the piece of code _executing_ them. This part is called an _executor_. An _executor_ decides _when_ and _how_ to execute your futures. The `async-std::task` module provides you with an interface to such an executor.
So how do Futures run? You decide! Futures do nothing without the piece of code _executing_ them. This part is called an _executor_. An _executor_ decides _when_ and _how_ to execute your futures. The `async-std::task` module provides you with and interface to such an executor.
Let's start with a little bit of motivation, though.

View file

@ -1 +1 @@
# TODO: Async read/write
# Async read/write

View file

@ -6,138 +6,113 @@ Futures abstract over *computation*. They describe the "what", independent of th
## Send and Sync
Luckily, concurrent Rust already has two well-known and effective concepts abstracting over sharing between concurrent parts of a program: `Send` and `Sync`. Notably, both the `Send` and `Sync` traits abstract over *strategies* of concurrent work, compose neatly, and don't prescribe an implementation.
Luckily, concurrent Rust already has two well-known and effective concepts abstracting over sharing between Rust concurrent parts of a program: Send and Sync. Notably, both the Send and Sync traits abstract over *strategies* of concurrent work, compose neatly, and don't prescribe an implementation.
As a quick summary:
As a quick summary, `Send` abstracts over passing data in a computation over to another concurrent computation (let's call it the receiver), losing access to it on the sender side. In many programming languages, this strategy is commonly implemented, but missing support from the language side expects you to keep up this behaviour yourself. This is a regular source of bugs: senders keeping handles to sent things around and maybe even working with them after sending. Rust mitigates this problem by making this behaviour known. Types can be `Send` or not (by implementing the appropriate marker trait), allowing or disallowing sending them around.
- `Send` abstracts over *passing data* in a computation to another concurrent computation (let's call it the receiver), losing access to it on the sender side. In many programming languages, this strategy is commonly implemented, but missing support from the language side, and expects you to enforce the "losing access" behaviour yourself. This is a regular source of bugs: senders keeping handles to sent things around and maybe even working with them after sending. Rust mitigates this problem by making this behaviour known. Types can be `Send` or not (by implementing the appropriate marker trait), allowing or disallowing sending them around, and the ownership and borrowing rules prevent subsequent access.
- `Sync` is about *sharing data* between two concurrent parts of a program. This is another common pattern: as writing to a memory location or reading while another party is writing is inherently unsafe, this access needs to be moderated through synchronisation.[^1] There are many common ways for two parties to agree on not using the same part in memory at the same time, for example mutexes and spinlocks. Again, Rust gives you the option of (safely!) not caring. Rust gives you the ability to express that something *needs* synchronisation while not being specific about the *how*.
Note how we avoided any word like *"thread"*, but instead opted for "computation". The full power of `Send` and `Sync` is that they relieve you of the burden of knowing *what* shares. At the point of implementation, you only need to know which method of sharing is appropriate for the type at hand. This keeps reasoning local and is not influenced by whatever implementation the user of that type later uses.
Note how we avoided any word like *"thread"*, but instead opted for "computation". The full power of `Send` (and subsequently also `Sync`) is that they relieve you of the burden of knowing *what* shares. At the point of implementation, you only need to know which method of sharing is appropriate for the type at hand. This keeps reasoning local and is not influenced by whatever implementation the user of that type later uses.
`Sync` is about sharing data between two concurrent parts of a program. This is another common pattern: as writing to a memory location or reading while another party is writing is inherently unsafe, this access needs to be moderated through synchronisation.[^1] There are many common ways of two parties to agree on not using the same part in memory at the same time, for example mutexes and spinlocks. Again, Rust gives you the option of (safely!) not caring. Rust gives you the ability to express that something *needs* synchronisation while not being specific about the *how*.
`Send` and `Sync` can be composed in interesting fashions, but that's beyond the scope here. You can find examples in the [Rust Book][rust-book-sync].
[rust-book-sync]: https://doc.rust-lang.org/stable/book/ch16-04-extensible-concurrency-sync-and-send.html
To sum up: Rust gives us the ability to safely abstract over important properties of concurrent programs, their data sharing. It does so in a very lightweight fashion; the language itself only knows about the two markers `Send` and `Sync` and helps us a little by deriving them itself, when possible. The rest is a library concern.
To sum up: Rust gives us the ability to safely abstract over important properties of concurrent programs: their data sharing. It does so in a very lightweight fashion: the language itself only knows about the two markers `Send` and `Sync` and helps us a little by deriving them itself, when possible. The rest is a library concern.
## An easy view of computation
While computation is a subject to write a whole [book](https://computationbook.com/) about, a very simplified view suffices for us: A sequence of composable operations which can branch based on a decision, run to succession and yield a result or yield an error
While computation is a subject to write a whole [book](https://computationbook.com/) about, a very simplified view of them suffices for us:
- computation is a sequence of composable operations
- they can branch based on a decision
- they either run to succession and yield a result or they can yield an error
## Deferring computation
As mentioned above, `Send` and `Sync` are about data. But programs are not only about data, they also talk about *computing* the data. And that's what [`Futures`][futures] do. We are going to have a close look at how that works in the next chapter. Let's look at what Futures allow us to express, in English. Futures go from this plan:
As mentioned above `Send` and `Sync` are about data. But programs are not only about data, they also talk about *computing* the data. And that's what \[Futures\][futures] do. We are going to have a close look at how that works in the next chapter. Let's look at what Futures allow us to express, in English. Futures go from this plan:
- Do X
- If X succeeded, do Y
- If X succeeds, do Y
towards:
towards
- Start doing X
- Once X succeeds, start doing Y
Remember the talk about "deferred computation" in the intro? That's all it is. Instead of telling the computer what to execute and decide upon *now*, you tell it what to start doing and how to react on potential events in the... well... `Future`.
[futures]: https://doc.rust-lang.org/std/future/trait.Future.html
Remember the talk about "deferred computation" in the intro? That's all it is. Instead of telling the computer what to execute and decide upon *now*, you tell it what to start doing and how to react on potential events the... well... `Future`.
## Orienting towards the beginning
Let's have a look at a simple function, specifically the return value:
```rust,edition2018
# use std::{fs::File, io, io::prelude::*};
#
fn read_file(path: &str) -> io::Result<String> {
let mut file = File::open(path)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
Ok(contents)
}
```
fn read_file(path: &str) -> Result<String, io::Error> {
let mut file = File.open(path)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
contents
}
You can call that at any time, so you are in full control on when you call it. But here's the problem: the moment you call it, you transfer control to the called function until it returns a value - eventually.
Note that this return value talks about the past. The past has a drawback: all decisions have been made. It has an advantage: the outcome is visible. We can unwrap the results of the program's past computation, and then decide what to do with it.
You can call that at any time, so you are in full control on when you call it. But here's the problem: the moment you call it, you transfer control to the called function. It returns a value.
Note that this return value talks about the past. The past has a drawback: all decisions have been made. It has an advantage: the outcome is visible. We can unwrap the presents of program past and then decide what to do with it.
But we wanted to abstract over *computation* and let someone else choose how to run it. That's fundamentally incompatible with looking at the results of previous computation all the time. So, let's find a type that *describes* a computation without running it. Let's look at the function again:
But here's a problem: we wanted to abstract over *computation* to be allowed to let someone else choose how to run it. That's fundamentally incompatible with looking at the results of previous computation all the time. So, let's find a type that describes a computation without running it. Let's look at the function again:
```rust,edition2018
# use std::{fs::File, io, io::prelude::*};
#
fn read_file(path: &str) -> io::Result<String> {
let mut file = File::open(path)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
Ok(contents)
}
```
fn read_file(path: &str) -> Result<String, io::Error> {
let mut file = File.open(path)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
contents
}
Speaking in terms of time, we can only take action *before* calling the function or *after* the function returned. This is not desirable, as it takes from us the ability to do something *while* it runs. When working with parallel code, this would take from us the ability to start a parallel task while the first runs (because we gave away control).
This is the moment where we could reach for [threads](https://en.wikipedia.org/wiki/Thread_). But threads are a very specific concurrency primitive and we said that we are searching for an abstraction.
What we are searching is something that represents ongoing work towards a result in the future. Whenever we say `something` in Rust, we almost always mean a trait. Let's start with an incomplete definition of the `Future` trait:
What we are searching for is something that represents ongoing work towards a result in the future. Whenever we say "something" in Rust, we almost always mean a trait. Let's start with an incomplete definition of the `Future` trait:
```rust,edition2018
# use std::{pin::Pin, task::{Context, Poll}};
#
trait Future {
type Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output>;
}
```
Looking at it closely, we see the following:
- It is generic over the `Output`.
- It provides a function called `poll`, which allows us to check on the state of the current computation.
- (Ignore `Pin` and `Context` for now, you don't need them for high-level understanding.)
trait Future {
type Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output>;
}
Ignore `Pin` and `Context` for now, you don't need them for high-level understanding. Looking at it closely, we see the following: it is generic over the `Output`. It provides a function called `poll`, which allows us to check on the state of the current computation.
Every call to `poll()` can result in one of these two cases:
1. The computation is done, `poll` will return [`Poll::Ready`](https://doc.rust-lang.org/std/task/enum.Poll.html#variant.Ready)
2. The computation has not finished executing, it will return [`Poll::Pending`](https://doc.rust-lang.org/std/task/enum.Poll.html#variant.Pending)
1. The future is done, `poll` will return `[Poll::Ready](https://doc.rust-lang.org/std/task/enum.Poll.html#variant.Ready)`
2. The future has not finished executing, it will return `[Poll::Pending](https://doc.rust-lang.org/std/task/enum.Poll.html#variant.Pending)`
This allows us to externally check if a `Future` still has unfinished work, or is finally done and can give us the value. The most simple (but not efficient) way would be to just constantly poll futures in a loop. There are optimisations possible, and this is what a good runtime does for you.
Note that calling `poll` again after case 1 happened may result in confusing behaviour. See the [futures-docs](https://doc.rust-lang.org/std/future/trait.Future.html) for details.
This allows us to externally check if a `Future` has finished doing its work, or is finally done and can give us the value. The most simple way (but not efficient) would be to just constantly poll futures in a loop. There's optimistions here, and this is what a good runtime is does for you.
Note that calling `poll` after case 1 happened may result in confusing behaviour. See the [futures-docs](https://doc.rust-lang.org/std/future/trait.Future.html) for details.
## Async
While the `Future` trait has existed in Rust for a while, it was inconvenient to build and describe them. For this, Rust now has a special syntax: `async`. The example from above, implemented with `async-std`, would look like this:
While the `Future` trait has existed in Rust for a while, it was inconvenient to build and describe them. For this, Rust now has a special syntax: `async`. The example from above, implemented in `async-std`, would look like this:
```rust,edition2018
# extern crate async_std;
# use async_std::{fs::File, io, io::prelude::*};
#
async fn read_file(path: &str) -> io::Result<String> {
let mut file = File::open(path).await?;
let mut contents = String::new();
file.read_to_string(&mut contents).await?;
Ok(contents)
}
```
use async_std::fs::File;
async fn read_file(path: &str) -> Result<String, io::Error> {
let mut file = File.open(path).await?;
let mut contents = String::new();
file.read_to_string(&mut contents).await?;
contents
}
Amazingly little difference, right? All we did is label the function `async` and insert 2 special commands: `.await`.
This `async` function sets up a deferred computation. When this function is called, it will produce a `Future<Output = io::Result<String>>` instead of immediately returning a `io::Result<String>`. (Or, more precisely, generate a type for you that implements `Future<Output = io::Result<String>>`.)
This function sets up a deferred computation. When this function is called, it will produce a `Future<Output=String>` instead of immediately returning a String. (Or, more precisely, generate a type for you that implements `Future<Output=String>`.)
## What does `.await` do?
The `.await` postfix does exactly what it says on the tin: the moment you use it, the code will wait until the requested action (e.g. opening a file or reading all data in it) is finished. The `.await?` is not special, it's just the application of the `?` operator to the result of `.await`. So, what is gained over the initial code example? We're getting futures and then immediately waiting for them?
The `.await` postfix does exactly what it says on the tin: the moment you use it, the code will wait until the requested action (e.g. opening a file or reading all data in it) is finished. `.await?` is not special, its just the application of the `?` operator to the result of `.await`. So, what is gained over the initial code example? Were getting futures and then immediately waiting for them?
The `.await` points act as a marker. Here, the code will wait for a `Future` to produce its value. How will a future finish? You don't need to care! The marker allows the component (usually called the “runtime”) in charge of *executing* this piece of code to take care of all the other things it has to do while the computation finishes. It will come back to this point when the operation you are doing in the background is done. This is why this style of programming is also called *evented programming*. We are waiting for *things to happen* (e.g. a file to be opened) and then react (by starting to read).
The `.await` points act as a marker. Here, the code will wait for a `Future` to produce its value. How will a future finish? You dont need to care! The marker allows the code later *executing* this piece of code (usually called the “runtime”) when it can take some time to care about all the other things it has to do. It will come back to this point when the operation you are doing in the background is done. This is why this style of programming is also called *evented programming*. We are waiting for *things to happen* (e.g. a file to be opened) and then react (by starting to read).
When executing 2 or more of these functions at the same time, our runtime system is then able to fill the wait time with handling *all the other events* currently going on.
## Conclusion
Working from values, we searched for something that expresses *working towards a value available later*. From there, we talked about the concept of polling.
Working from values, we searched for something that expresses *working towards a value available sometime later*. From there, we talked about the concept of polling.
A `Future` is any data type that does not represent a value, but the ability to *produce a value at some point in the future*. Implementations of this are very varied and detailed depending on use-case, but the interface is simple.
A `Future` is any data type that does not represent a value, but the ability to *produce a value at some point in the future*. Implementations of this are very varied and detailled depending on use-case, but the interface is simple.
Next, we will introduce you to `tasks`, which we will use to actually *run* Futures.
Next, we will introduce you to `tasks`, which we need to actually *run* Futures.
[^1]: Two parties reading while it is guaranteed that no one is writing is always safe.
[futures]: https://rust-lang.github.io/async-book/02_execution/02_future.html

View file

@ -1 +1 @@
# TODO: Streams
# Streams

View file

@ -1,150 +1,83 @@
# Tasks
Now that we know what Futures are, we now want to run them!
Now that we know what Futures are, we want to run them!
In `async-std`, the `tasks` (TODO: link) module is responsible for this. The simplest way is using the `block_on` function:
In `async-std`, the [`tasks`][tasks] module is responsible for this. The simplest way is using the `block_on` function:
```rust
use async_std::fs::File;
use async_std::task;
```rust,edition2018
# extern crate async_std;
use async_std::{fs::File, io, prelude::*, task};
async fn read_file(path: &str) -> io::Result<String> {
let mut file = File::open(path).await?;
async fn read_file(path: &str) -> Result<String, io::Error> {
let mut file = File.open(path).await?;
let mut contents = String::new();
file.read_to_string(&mut contents).await?;
Ok(contents)
file.read_to_string(&mut contents).await?;
contents
}
fn main() {
let reader_task = task::spawn(async {
let result = read_file("data.csv").await;
let task = task::spawn(async {
let result = read_file("data.csv");
match result {
Ok(s) => println!("{}", s),
Err(e) => println!("Error reading file: {:?}", e)
}
});
println!("Started task!");
task::block_on(reader_task);
task::block_on(task);
println!("Stopped task!");
}
```
This asks the runtime baked into `async_std` to execute the code that reads a file. Let's go one by one, though, inside to outside.
This asks the runtime baked into `async_std` to execute the code that reads a file. Lets go one by one, though, inside to outside.
```rust,edition2018
# extern crate async_std;
# use async_std::{fs::File, io, prelude::*, task};
#
# async fn read_file(path: &str) -> io::Result<String> {
# let mut file = File::open(path).await?;
# let mut contents = String::new();
# file.read_to_string(&mut contents).await?;
# Ok(contents)
# }
#
```rust
async {
let result = read_file("data.csv").await;
let result = read_file("data.csv");
match result {
Ok(s) => println!("{}", s),
Err(e) => println!("Error reading file: {:?}", e)
}
};
}
```
This is an `async` *block*. Async blocks are necessary to call `async` functions, and will instruct the compiler to include all the relevant instructions to do so. In Rust, all blocks return a value and `async` blocks happen to return a value of the kind `Future`.
But let's get to the interesting part:
But lets get to the interesting part:
```rust
task::spawn(async { })
```rust,edition2018
# extern crate async_std;
# use async_std::task;
task::spawn(async { });
```
`spawn` takes a `Future` and starts running it on a `Task`. It returns a `JoinHandle`. Futures in Rust are sometimes called *cold* Futures. You need something that starts running them. To run a Future, there may be some additional bookkeeping required, e.g. whether it's running or finished, where it is being placed in memory and what the current state is. This bookkeeping part is abstracted away in a `Task`.
`spawn` takes a Future and starts running it on a `Task`. It returns a `JoinHandle`. Futures in Rust are sometimes called *cold* Futures. You need something that starts running them. To run a Future, there may be some additional bookkeeping required, e.g. if its running or finished, where it is being placed in memory and what the current state is. This bookkeeping part is abstracted away in a `Task`. A `Task` is similar to a `Thread`, with some minor differences: it will be scheduled by the program instead of the operating system kernel and if it encounters a point where it needs to wait, the program itself responsible for waking it up again. Well talk a little bit about that later. An `async_std` task can also has a name and an ID, just like a thread.
A `Task` is similar to a `Thread`, with some minor differences: it will be scheduled by the program instead of the operating system kernel, and if it encounters a point where it needs to wait, the program itself is responsible for waking it up again. We'll talk a little bit about that later. An `async_std` task can also have a name and an ID, just like a thread.
For now, it is enough to know that once you `spawn`ed a task, it will continue running in the background. The `JoinHandle` in itself is a future that will finish once the `Task` ran to conclusion. Much like with `threads` and the `join` function, we can now call `block_on` on the handle to *block* the program (or the calling thread, to be specific) to wait for it to finish.
For now, it is enough to know that once you have `spawn`ed a task, it will continue running in the background. The `JoinHandle` is itself a future that will finish once the `Task` has run to conclusion. Much like with `threads` and the `join` function, we can now call `block_on` on the handle to *block* the program (or the calling thread, to be specific) and wait for it to finish.
## Tasks in `async_std`
Tasks in `async_std` are one of the core abstractions. Much like Rust's `thread`s, they provide some practical functionality over the raw concept. `Tasks` have a relationship to the runtime, but they are in themselves separate. `async_std` tasks have a number of desirable properties:
Tasks in `async_std` are one of the core abstractions. Much like Rusts `thread`s, they provide some practical functionality over the raw concept. `Tasks` have a relationship to the runtime, but they are in themselves separate. `async_std` tasks have a number of desirable properties:
- They are allocated in one single allocation
- They are single-allocated
- All tasks have a *backchannel*, which allows them to propagate results and errors to the spawning task through the `JoinHandle`
- They carry useful metadata for debugging
- The carry desirable metadata for debugging
- They support task local storage
`async_std`s task API handles setup and teardown of a backing runtime for you and doesn't rely on a runtime being explicitly started.
`async_std` s task api handles setup and teardown of a backing runtime for you and doesnt rely on a runtime being started.
## Blocking
`Task`s are assumed to run _concurrently_, potentially by sharing a thread of execution. This means that operations blocking an _operating system thread_, such as `std::thread::sleep` or io function from Rust's `std` library will _stop execution of all tasks sharing this thread_. Other libraries (such as database drivers) have similar behaviour. Note that _blocking the current thread_ is not in and of itself bad behaviour, just something that does not mix well with the concurrent execution model of `async-std`. Essentially, never do this:
```rust,edition2018
# extern crate async_std;
# use async_std::task;
fn main() {
task::block_on(async {
// this is std::fs, which blocks
std::fs::read_to_string("test_file");
})
}
```
If you want to mix operation kinds, consider putting such blocking operations on a separate `thread`.
TODO: fill me in
## Errors and panics
Tasks report errors through normal patterns: If they are fallible, their `Output` should be of kind `Result<T,E>`.
TODO: fill me in
In case of `panic`, behaviour differs depending on whether there's a reasonable part that addresses the `panic`. If not, the program _aborts_.
In practice, that means that `block_on` propagates panics to the blocking component:
```rust,edition2018,should_panic
# extern crate async_std;
# use async_std::task;
fn main() {
task::block_on(async {
panic!("test");
});
}
```
```text
thread 'async-task-driver' panicked at 'test', examples/panic.rs:8:9
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace.
```
While panicing a spawned task will abort:
```rust,edition2018,should_panic
# extern crate async_std;
# use async_std::task;
# use std::time::Duration;
task::spawn(async {
panic!("test");
});
task::block_on(async {
task::sleep(Duration::from_millis(10000)).await;
})
```
```text
thread 'async-task-driver' panicked at 'test', examples/panic.rs:8:9
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace.
Aborted (core dumped)
```
That might seem odd at first, but the other option would be to silently ignore panics in spawned tasks. The current behaviour can be changed by catching panics in the spawned task and reacting with custom behaviour. This gives users the choice of panic handling strategy.
## Conclusion
`async_std` comes with a useful `Task` type that works with an API similar to `std::thread`. It covers error and panic behaviour in a structured and defined way.
Tasks are separate concurrent units and sometimes they need to communicate. That's where `Stream`s come in.
[tasks]: https://docs.rs/async-std/latest/async_std/task/index.html
Tasks are separate concurrent units and sometimes they need to communicate. Thats where `Stream`s come in.

View file

@ -2,6 +2,6 @@
### blocking
"blocked" generally refers to conditions that keep a task from doing its work. For example, it might need data to be sent by a client before continuing. When tasks become blocked, usually, other tasks are scheduled.
"blocked" generally refers to conditions that keep a task from doing its work. For example, it might need data to be sent by a client before continuing. When tasks becomes blocked, usually, other tasks are scheduled.
Sometimes you hear that you should never call "blocking functions" in an async context. What this refers to is functions that block the current thread and do not yield control back. This keeps the executor from using this thread to schedule another task.
Sometimes you hear that you should never call "blocking functions" in an async context. What this refers to is functions that block the current thread and do not yield control back. This keeps the executor from using this thread to schedule another task.

View file

@ -1,165 +0,0 @@
<svg width="1374px" height="1285px" viewBox="0 0 1374 1285" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<path d="M706,852 C716.938667,852 727.68106,851.159654 738.166143,849.539998 C838.322249,834.068644 915,747.488846 915,643 C915,527.572487 821.427513,434 706,434 C590.572487,434 497,527.572487 497,643 C497,652.323817 497.610544,661.505035 498.793833,670.505853 C508.320616,742.972354 554.186072,805.526844 619.024399,833.098604 C661.497141,851.159654 661.497141,852 706,852 Z" id="path-1"></path>
<filter x="-4.7%" y="-4.2%" width="109.3%" height="109.3%" filterUnits="objectBoundingBox" id="filter-2">
<feMorphology radius="12.5" operator="dilate" in="SourceAlpha" result="shadowSpreadOuter1"></feMorphology>
<feOffset dx="0" dy="2" in="shadowSpreadOuter1" result="shadowOffsetOuter1"></feOffset>
<feGaussianBlur stdDeviation="2" in="shadowOffsetOuter1" result="shadowBlurOuter1"></feGaussianBlur>
<feComposite in="shadowBlurOuter1" in2="SourceAlpha" operator="out" result="shadowBlurOuter1"></feComposite>
<feColorMatrix values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.5 0" type="matrix" in="shadowBlurOuter1"></feColorMatrix>
</filter>
<linearGradient x1="28.4109829%" y1="24.2192512%" x2="92.1248069%" y2="81.7846075%" id="linearGradient-3">
<stop stop-color="#EEEEEE" stop-opacity="0" offset="0.0950168919%"></stop>
<stop stop-color="#D8D8D8" offset="100%"></stop>
</linearGradient>
<linearGradient x1="50%" y1="0%" x2="50%" y2="100%" id="linearGradient-4">
<stop stop-color="#A40F41" offset="0%"></stop>
<stop stop-color="#C5309A" offset="100%"></stop>
</linearGradient>
<linearGradient x1="50%" y1="0%" x2="50%" y2="100%" id="linearGradient-5">
<stop stop-color="#950404" offset="0%"></stop>
<stop stop-color="#C5309A" offset="100%"></stop>
</linearGradient>
<linearGradient x1="50%" y1="0%" x2="50%" y2="100%" id="linearGradient-6">
<stop stop-color="#891D5C" offset="0%"></stop>
<stop stop-color="#C5309A" offset="100%"></stop>
</linearGradient>
<linearGradient x1="116.46259%" y1="60.7046394%" x2="10.0323017%" y2="7.76958074%" id="linearGradient-7">
<stop stop-color="#2AAD82" offset="0%"></stop>
<stop stop-color="#22736C" offset="100%"></stop>
</linearGradient>
<linearGradient x1="116.46259%" y1="62.4127457%" x2="10.0323017%" y2="1.03100256%" id="linearGradient-8">
<stop stop-color="#2AAD82" offset="0%"></stop>
<stop stop-color="#22736C" offset="100%"></stop>
</linearGradient>
<linearGradient x1="50%" y1="0%" x2="50%" y2="100%" id="linearGradient-9">
<stop stop-color="#C5304D" offset="0%"></stop>
<stop stop-color="#C5309A" offset="100%"></stop>
</linearGradient>
<linearGradient x1="50%" y1="0.0950168919%" x2="50%" y2="95.2897013%" id="linearGradient-10">
<stop stop-color="#D8D8D8" offset="0%"></stop>
<stop stop-color="#EEEEEE" stop-opacity="0" offset="99.9049831%"></stop>
</linearGradient>
<linearGradient x1="50%" y1="0%" x2="50%" y2="100%" id="linearGradient-11">
<stop stop-color="#C55430" offset="0%"></stop>
<stop stop-color="#C5309A" offset="100%"></stop>
</linearGradient>
<linearGradient x1="68.0933006%" y1="7.03456501%" x2="32.4391635%" y2="89.9179583%" id="linearGradient-12">
<stop stop-color="#D8D8D8" offset="0%"></stop>
<stop stop-color="#EEEEEE" stop-opacity="0" offset="99.9049831%"></stop>
</linearGradient>
<linearGradient x1="25.1838215%" y1="31.7409739%" x2="82.2374523%" y2="65.7834917%" id="linearGradient-13">
<stop stop-color="#7C110B" offset="0%"></stop>
<stop stop-color="#C5309A" offset="100%"></stop>
</linearGradient>
<linearGradient x1="50%" y1="0%" x2="50%" y2="100%" id="linearGradient-14">
<stop stop-color="#831662" offset="0%"></stop>
<stop stop-color="#C5309A" offset="100%"></stop>
</linearGradient>
<linearGradient x1="95.960242%" y1="50.5583424%" x2="20.3195277%" y2="51.5758201%" id="linearGradient-15">
<stop stop-color="#EEEEEE" stop-opacity="0" offset="0.0950168919%"></stop>
<stop stop-color="#D8D8D8" offset="100%"></stop>
</linearGradient>
</defs>
<g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<rect id="Rectangle" stroke="#979797" stroke-width="0.9" fill="#D8D8D8" x="73.45" y="64.45" width="1226.1" height="1156.1"></rect>
<g id="Oval">
<use fill="black" fill-opacity="1" filter="url(#filter-2)" xlink:href="#path-1"></use>
<use stroke="#4754BA" stroke-width="25" fill="#D8D8D8" fill-rule="evenodd" xlink:href="#path-1"></use>
</g>
<path d="M704.5,909 C723.935147,909 742.880708,906.903834 761.122935,902.925248 C879.993749,876.999815 969,771.144169 969,644.5 C969,498.420684 850.579316,380 704.5,380 C558.420684,380 440,498.420684 440,644.5 C440,790.579316 558.420684,909 704.5,909 Z" id="Oval" stroke="#A40F41" stroke-width="25"></path>
<path d="M1018.74219,776.146484 C994.962875,739.938054 974.06244,714.007065 956.040884,698.353516 C938.019327,682.699966 906.672366,663.248794 862,640" id="Path-9" stroke="url(#linearGradient-3)" stroke-width="40" transform="translate(940.371094, 708.073242) rotate(65.000000) translate(-940.371094, -708.073242) "></path>
<path d="M884.958903,727.874599 C859.197627,817.028156 922.142746,808.316545 931.623958,803.100093" id="Path" stroke="url(#linearGradient-4)" stroke-width="25" transform="translate(905.217997, 766.985957) rotate(201.000000) translate(-905.217997, -766.985957) "></path>
<path d="M895.811647,408.795066 C854.364478,470.148546 887.964613,543.019134 954.612408,543.379666" id="Path" stroke="url(#linearGradient-5)" stroke-width="25" transform="translate(916.124955, 476.087366) rotate(115.000000) translate(-916.124955, -476.087366) "></path>
<path d="M619.570862,1085.1537 C796.854266,1085.1537 940.570862,941.437104 940.570862,764.153699 C940.570862,586.870295 796.854266,443.153699 619.570862,443.153699 C594.140006,443.153699 569.399861,446.110981 545.67556,451.70041" id="Oval" stroke="#950404" stroke-width="25" transform="translate(743.123211, 764.153699) rotate(73.000000) translate(-743.123211, -764.153699) "></path>
<path d="M502.869788,690.410552 C638.771845,653.065191 738.613203,528.619973 738.613203,380.855556 C738.613203,317.165187 720.064298,257.807147 688.073896,207.888845" id="Oval" stroke="#891D5C" stroke-width="25" transform="translate(620.741495, 449.149699) rotate(228.000000) translate(-620.741495, -449.149699) "></path>
<path d="M421.520294,540 C380.073125,601.353481 417.22664,673.13139 480.044509,676.688247" id="Path" stroke="url(#linearGradient-6)" stroke-width="25" transform="translate(442.022255, 608.344123) rotate(-21.000000) translate(-442.022255, -608.344123) "></path>
<g id="Group-5" transform="translate(813.000000, 459.000000)">
<circle id="Oval" fill="#C5309A" cx="27" cy="27" r="27"></circle>
<text id="T" font-family="Helvetica-Bold, Helvetica" font-size="36" font-weight="bold" fill="#000000">
<tspan x="16" y="42">T</tspan>
</text>
</g>
<g id="Group-5" transform="translate(871.000000, 694.000000)">
<circle id="Oval" fill="#C5309A" cx="27" cy="27" r="27"></circle>
<text id="T" font-family="Helvetica-Bold, Helvetica" font-size="36" font-weight="bold" fill="#000000">
<tspan x="16" y="42">T</tspan>
</text>
</g>
<path d="M856.833213,230.918522 C811.064617,214.483463 761.730372,205.528841 710.304663,205.528841 C470.889224,205.528841 276.804663,399.613402 276.804663,639.028841" id="Oval" stroke="#C55430" stroke-width="25" transform="translate(566.818938, 422.278841) rotate(1.000000) translate(-566.818938, -422.278841) "></path>
<path d="M181.140977,873.010134 C311.563924,873.010134 428.534531,815.413818 508.005111,724.268874" id="Oval" stroke="#C55430" stroke-width="25" transform="translate(344.573044, 798.639504) rotate(91.000000) translate(-344.573044, -798.639504) "></path>
<path d="M572.45778,289.560329 C526.61156,307.031684 485.058992,333.203921 449.818409,366.058709 C376.098321,434.78793 330,532.760545 330,641.5 C330,808.206124 438.346257,949.606624 588.463312,999.12604" id="Oval" stroke="#7C110B" stroke-width="25"></path>
<path d="M590,1000.5 C641.396689,1014.38673 718,1040.22266 753,1071 C788,1101.77734 809.269977,1138.25544 846,1204.14218" id="Line-3" stroke="url(#linearGradient-7)" stroke-width="25" stroke-linecap="square"></path>
<path d="M395,1017 C446.396689,1030.88673 505.230023,1056.71513 530.5,1079.58984 C555.769977,1102.46456 586.769977,1146.84529 623.5,1212.73202" id="Line-3" stroke="url(#linearGradient-8)" stroke-width="25" stroke-linecap="square" transform="translate(509.250000, 1114.866011) rotate(21.000000) translate(-509.250000, -1114.866011) "></path>
<path d="M1197.1056,905.82221 C1128.21718,829.771195 1028.67983,782 917.984925,782 C838.497818,782 764.763882,806.632256 704,848.679886" id="Oval" stroke="#831662" stroke-width="25" transform="translate(950.552799, 843.911105) rotate(124.000000) translate(-950.552799, -843.911105) "></path>
<path d="M1200.1056,502.82221 C1131.21718,426.771195 1031.67983,379 920.984925,379 C841.497818,379 767.763882,403.632256 707,445.679886" id="Oval" stroke="#831662" stroke-width="25" transform="translate(953.552799, 440.911105) rotate(45.000000) translate(-953.552799, -440.911105) "></path>
<path d="M1270.52459,759.801126 C1191.86296,650.129853 1063.28842,578.681563 918.026229,578.681563 C682.124696,578.681563 490.233162,767.110765 484.651185,1001.66961" id="Oval" stroke="#C5304D" stroke-width="25" transform="translate(877.587887, 790.175586) rotate(142.000000) translate(-877.587887, -790.175586) "></path>
<path d="M745.790706,295.972305 C735.140505,401.521944 829.226956,580.27142 1027.69658,467.022711" id="Path" stroke="url(#linearGradient-9)" stroke-width="25" transform="translate(886.337507, 399.625456) rotate(139.000000) translate(-886.337507, -399.625456) "></path>
<path d="M0.5,0.5 L0.5,1284.5 L1373.5,1284.5 L1373.5,0.5 L0.5,0.5 Z M710,1149.5 C429.163205,1149.5 201.5,921.836795 201.5,641 C201.5,360.163205 429.163205,132.5 710,132.5 C990.836795,132.5 1218.5,360.163205 1218.5,641 C1218.5,921.836795 990.836795,1149.5 710,1149.5 Z" id="Combined-Shape" stroke="#979797" fill="#5F5F5F"></path>
<path d="M761.827377,990 L768.172623,1037" id="Line" stroke="#831662" stroke-width="11" stroke-linecap="square" transform="translate(765.172623, 1013.500000) rotate(-1.000000) translate(-765.172623, -1013.500000) "></path>
<path d="M876.827377,354 L883.172623,401" id="Line" stroke="#891D5C" stroke-width="11" stroke-linecap="square" transform="translate(880.172623, 377.500000) rotate(40.000000) translate(-880.172623, -377.500000) "></path>
<path d="M578.827377,1031 L585.172623,1078" id="Line" stroke="#C5304D" stroke-width="11" stroke-linecap="square" transform="translate(582.172623, 1054.500000) rotate(24.000000) translate(-582.172623, -1054.500000) "></path>
<circle id="Oval" fill="#22736C" cx="567" cy="994" r="27"></circle>
<g id="Group-7" transform="translate(368.000000, 690.000000)">
<polygon id="Triangle" stroke="#000000" stroke-width="7" fill="#FF0000" stroke-linejoin="round" points="36 0 72 61 0 61"></polygon>
<text id="!" font-family="Helvetica-Bold, Helvetica" font-size="36" font-weight="bold" fill="#000000">
<tspan x="30" y="49">!</tspan>
</text>
</g>
<g id="Group-4" transform="translate(826.000000, 207.000000)">
<circle id="Oval" fill="#22736C" cx="27" cy="27" r="27"></circle>
<path d="M20.5,15 C20.5,26.1350598 20.5,26.1947111 20.5,32.3896484 C20.5,36.4265139 20.5,38.8274781 20.5,38.8274781" id="Path-10" stroke="#0E0B0B" stroke-width="6"></path>
<path d="M32.5,15 C32.5,26.2156828 32.5,26.275766 32.5,32.5155575 C32.5,36.5816517 32.5,39 32.5,39" id="Path-10" stroke="#0E0B0B" stroke-width="6"></path>
</g>
<g id="Group-4" transform="translate(550.000000, 263.000000)">
<circle id="Oval" fill="#22736C" cx="27" cy="27" r="27"></circle>
<path d="M20.5,15 C20.5,26.1350598 20.5,26.1947111 20.5,32.3896484 C20.5,36.4265139 20.5,38.8274781 20.5,38.8274781" id="Path-10" stroke="#0E0B0B" stroke-width="6"></path>
<path d="M32.5,15 C32.5,26.2156828 32.5,26.275766 32.5,32.5155575 C32.5,36.5816517 32.5,39 32.5,39" id="Path-10" stroke="#0E0B0B" stroke-width="6"></path>
</g>
<path d="M273,643 C277.374075,701.818436 284.163789,745.656327 293.369141,774.513672 C302.574493,803.371017 322.527618,843.367762 353.228516,894.503906" id="Path-7" stroke="url(#linearGradient-10)" stroke-width="40"></path>
<path d="M273.065918,629.915039 C273.065918,818.866168 420,921.421875 561.5,789" id="Path" stroke="url(#linearGradient-11)" stroke-width="25"></path>
<path d="M453.154297,357.556641 C430.653051,382.313075 412.750056,404.231044 399.445312,423.310547 C386.140569,442.39005 370.73497,472.350987 353.228516,513.193359" id="Path-8" stroke="url(#linearGradient-12)" stroke-width="40"></path>
<path d="M382.236018,400.025232 C385.073857,470.664321 466.83221,557.311482 559.722346,498.725051" id="Path" stroke="url(#linearGradient-13)" stroke-width="25" transform="translate(470.979182, 459.044419) rotate(53.000000) translate(-470.979182, -459.044419) "></path>
<path d="M577.380389,294.176757 C611.83811,406.069448 704.823594,424.481528 795.900453,373.184668" id="Path" stroke="url(#linearGradient-14)" stroke-width="25" transform="translate(686.640421, 347.293755) rotate(115.000000) translate(-686.640421, -347.293755) "></path>
<polygon id="Triangle" fill="#000000" transform="translate(563.500000, 994.500000) rotate(-89.000000) translate(-563.500000, -994.500000) " points="563.5 982 580 1007 547 1007"></polygon>
<g id="Group-8" transform="translate(393.000000, 936.000000)">
<g id="Group-3">
<circle id="Oval" fill="#22736C" cx="27" cy="27" r="27"></circle>
<polygon id="Triangle" fill="#000000" transform="translate(22.500000, 27.500000) rotate(-89.000000) translate(-22.500000, -27.500000) " points="22.5 15 39 40 6 40"></polygon>
</g>
</g>
<g id="Group-5" transform="translate(594.000000, 427.000000)">
<circle id="Oval" fill="#C5309A" cx="27" cy="27" r="27"></circle>
<text id="T" font-family="Helvetica-Bold, Helvetica" font-size="36" font-weight="bold" fill="#000000">
<tspan x="16" y="42">T</tspan>
</text>
</g>
<g id="Group-5" transform="translate(715.000000, 413.000000)">
<circle id="Oval" fill="#C5309A" cx="27" cy="27" r="27"></circle>
<text id="T" font-family="Helvetica-Bold, Helvetica" font-size="36" font-weight="bold" fill="#000000">
<tspan x="16" y="42">T</tspan>
</text>
</g>
<g id="Group-5" transform="translate(484.000000, 531.000000)">
<circle id="Oval" fill="#C5309A" cx="27" cy="27" r="27"></circle>
<text id="T" font-family="Helvetica-Bold, Helvetica" font-size="36" font-weight="bold" fill="#000000">
<tspan x="16" y="42">T</tspan>
</text>
</g>
<path d="M647,814.549371 C699.060495,833.490862 735.177666,831.528281 748.827069,829.456675 C763.728946,827.194978 784.258563,819.70942 810.415922,807" id="Path-2" stroke="url(#linearGradient-15)" stroke-width="117" transform="translate(728.707961, 818.866168) rotate(-3.000000) translate(-728.707961, -818.866168) "></path>
<g id="Group-5" transform="translate(536.000000, 764.000000)">
<circle id="Oval" fill="#C5309A" cx="27" cy="27" r="27"></circle>
<text id="T" font-family="Helvetica-Bold, Helvetica" font-size="36" font-weight="bold" fill="#000000">
<tspan x="16" y="42">T</tspan>
</text>
</g>
<g id="Group-5" transform="translate(475.000000, 634.000000)">
<circle id="Oval" fill="#C5309A" cx="27" cy="27" r="27"></circle>
<text id="T" font-family="Helvetica-Bold, Helvetica" font-size="36" font-weight="bold" fill="#000000">
<tspan x="16" y="42">T</tspan>
</text>
</g>
<path d="" id="Path-3" stroke="#979797"></path>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 17 KiB

View file

@ -1,66 +0,0 @@
<svg width="1395px" height="448px" viewBox="0 0 1395 448" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient x1="55.0356976%" y1="73.5772375%" x2="4.06132959%" y2="75.1295482%" id="linearGradient-1">
<stop stop-color="#831662" offset="0%"></stop>
<stop stop-color="#C5309A" offset="100%"></stop>
</linearGradient>
<linearGradient x1="55.0356976%" y1="69.1628915%" x2="4.06132959%" y2="70.4245643%" id="linearGradient-2">
<stop stop-color="#831662" offset="0%"></stop>
<stop stop-color="#C5309A" offset="100%"></stop>
</linearGradient>
<linearGradient x1="32.2245124%" y1="88.5052261%" x2="9.22591829%" y2="88.5052261%" id="linearGradient-3">
<stop stop-color="#C5304D" offset="0%"></stop>
<stop stop-color="#C5309A" offset="100%"></stop>
</linearGradient>
<linearGradient x1="57.8152523%" y1="90.4434915%" x2="4.06132959%" y2="90.4434915%" id="linearGradient-4">
<stop stop-color="#2AAD82" offset="0%"></stop>
<stop stop-color="#1A2265" offset="100%"></stop>
</linearGradient>
</defs>
<g id="Page-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<rect id="Rectangle" stroke="#979797" fill="#D8D8D8" x="0.5" y="322.5" width="1394" height="125"></rect>
<path d="M854.827377,358 L861.172623,405" id="Line" stroke="#C5304D" stroke-width="11" stroke-linecap="square" transform="translate(858.172623, 381.500000) rotate(8.000000) translate(-858.172623, -381.500000) "></path>
<g id="Group" transform="translate(80.000000, 354.000000)">
<circle id="Oval" fill="#C5309A" cx="27" cy="27" r="27"></circle>
<text id="S" font-family="Helvetica" font-size="36" font-weight="normal" fill="#000000">
<tspan x="15" y="41">S</tspan>
</text>
</g>
<rect id="Rectangle" fill="#4754BA" x="0" y="161" width="1395" height="161"></rect>
<rect id="Rectangle" fill="#2A867E" x="0" y="0" width="1395" height="161"></rect>
<polyline id="Line-2" stroke="url(#linearGradient-1)" stroke-width="25" stroke-linecap="square" points="478.175781 380.76092 414.5625 380.76092 370.058594 88.4179687 315.320313 380.76092 154.283195 381 107 381"></polyline>
<polyline id="Line-2" stroke="url(#linearGradient-2)" stroke-width="25" stroke-linecap="square" points="1319.71394 380.342952 1303.27644 380.342952 1258.77253 88 1204.03425 380.342952 1042.99713 380.582031 908 380.582031"></polyline>
<polyline id="Line-2" stroke="url(#linearGradient-3)" stroke-width="25" stroke-linecap="square" points="844.175781 380.342952 780.5625 380.342952 736.058594 88 681.320313 380.342952 562.011719 381"></polyline>
<g id="Group-4" transform="translate(470.000000, 355.000000)">
<circle id="Oval" fill="#22736C" cx="27" cy="27" r="27"></circle>
<path d="M20.5,15 C20.5,26.1350598 20.5,26.1947111 20.5,32.3896484 C20.5,36.4265139 20.5,38.8274781 20.5,38.8274781" id="Path-10" stroke="#0E0B0B" stroke-width="6"></path>
<path d="M32.5,15 C32.5,26.2156828 32.5,26.275766 32.5,32.5155575 C32.5,36.5816517 32.5,39 32.5,39" id="Path-10" stroke="#0E0B0B" stroke-width="6"></path>
</g>
<g id="Group-7" transform="translate(1290.000000, 338.000000)">
<polygon id="Triangle" stroke="#000000" stroke-width="7" fill="#FF0000" stroke-linejoin="round" points="36 0 72 61 0 61"></polygon>
<text id="!" font-family="Helvetica-Bold, Helvetica" font-size="36" font-weight="bold" fill="#000000">
<tspan x="30" y="49">!</tspan>
</text>
</g>
<polyline id="Line-2" stroke="url(#linearGradient-4)" stroke-width="25" stroke-linecap="square" points="569.088363 380.342952 524.584456 88 492 262.025123"></polyline>
<polyline id="Line-2" stroke="url(#linearGradient-4)" stroke-width="25" stroke-linecap="square" points="908.088363 380.342952 863.584456 88 831 262.025123"></polyline>
<polyline id="Line-2" stroke="url(#linearGradient-4)" stroke-width="25" stroke-linecap="square" points="107.088363 380.342952 62.5844564 88 30 262.025123"></polyline>
<g id="Group-6" transform="translate(908.000000, 381.000000) rotate(180.000000) translate(-908.000000, -381.000000) translate(881.000000, 354.000000)">
<circle id="Oval" fill="#22736C" cx="27" cy="27" r="27"></circle>
<polygon id="Triangle" fill="#000000" transform="translate(25.500000, 26.500000) rotate(-89.000000) translate(-25.500000, -26.500000) " points="25.5 14 42 39 9 39"></polygon>
</g>
<g id="Group-5" transform="translate(80.000000, 354.000000)">
<circle id="Oval" fill="#C5309A" cx="27" cy="27" r="27"></circle>
<text id="T" font-family="Helvetica-Bold, Helvetica" font-size="36" font-weight="bold" fill="#000000">
<tspan x="16" y="42">T</tspan>
</text>
</g>
<g id="Group-5" transform="translate(542.000000, 355.000000)">
<circle id="Oval" fill="#C5309A" cx="27" cy="27" r="27"></circle>
<text id="T" font-family="Helvetica-Bold, Helvetica" font-size="36" font-weight="bold" fill="#000000">
<tspan x="16" y="42">T</tspan>
</text>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 5.3 KiB

View file

@ -1,9 +0,0 @@
# Introduction
![async-std logo](./images/horizontal_color.svg)
This book serves as high-level documentation for `async-std` and a way of learning async programming in Rust through it. As such, it focuses on the `async-std` API and the task model it gives you.
Please note that the Rust project provides its own book on asynchronous programming, called ["Asynchronous Programming in Rust"][async-book], which we highly recommend reading along with this book, as it provides a different, wider view on the topic.
[async-book]: https://rust-lang.github.io/async-book/

10
docs/src/overview.md Normal file
View file

@ -0,0 +1,10 @@
# Overview
![async-std logo](./images/horizontal_color.svg)
`async-std` along with its [supporting libraries][organization] is a library making your life in async programming easier. It provides provide fundamental implementations for downstream libraries and applications alike. The name reflects the approach of this library: it is a closely modeled to the Rust main standard library as possible, replacing all components by async counterparts.
`async-std` provides an interface to all important primitives: filesystem operations, network operations and concurrency basics like timers. It also exposes an `task` in a model similar to the `thread` module found in the Rust standard lib. But it does not only include io primitives, but also `async/await` compatible versions of primitives like `Mutex`. You can read more about `async-std` in [the overview chapter][overview-std].
[organization]: https://github.com/async-std/async-std
[overview-std]: overview/async-std/

View file

@ -1,7 +1 @@
# Welcome to `async-std`
`async-std`, along with its [supporting libraries][organization], is a library making your life in async programming easier. It provides fundamental implementations for downstream libraries and applications alike. The name reflects the approach of this library: it is as closely modeled to the Rust main standard library as possible, replacing all components by async counterparts.
`async-std` provides an interface to all important primitives: filesystem operations, network operations and concurrency basics like timers. It also exposes a `task` in a model similar to the `thread` module found in the Rust standard lib. But it does not only include I/O primitives, but also `async/await` compatible versions of primitives like `Mutex`.
[organization]: https://github.com/async-rs
# async-std

View file

@ -5,7 +5,7 @@
In short: we are versioning our software as `MAJOR.MINOR.PATCH`. We increase the:
* MAJOR version when there are incompatible API changes,
* MINOR version when we introduce functionality in a backwards-compatible manner
* MINOR version when we introducece functionality in a backwards-compatible manner
* PATCH version when we make backwards-compatible bug fixes
We will provide migration documentation between major versions.
@ -31,10 +31,10 @@ In general, this crate will be conservative with respect to the minimum supporte
## Security fixes
Security fixes will be applied to _all_ minor branches of this library in all _supported_ major revisions. This policy might change in the future, in which case we give a notice at least _3 months_ ahead.
Security fixes will be applied to _all_ minor branches of this library in all _supported_ major revisions. This policy might change in the future, in which case we give at least _3 month_ of ahead notice.
## Credits
This policy is based on [BurntSushi's regex crate][regex-policy].
This policy is based on [burntsushis regex crate][regex-policy].
[regex-policy]: https://github.com/rust-lang/regex#minimum-rust-version-policy

View file

@ -4,19 +4,19 @@ Rust has two kinds of types commonly referred to as `Future`:
- the first is `std::future::Future` from Rusts [standard library](https://doc.rust-lang.org/std/future/trait.Future.html).
- the second is `futures::future::Future` from the [futures-rs crate](https://docs.rs/futures/0.3/futures/prelude/trait.Future.html).
- the second is `futures::future::Future` from the [futures-rs crate](https://docs.rs/futures-preview/0.3.0-alpha.17/futures/prelude/trait.Future.html), currently released as `futures-preview`.
The future defined in the [futures-rs](https://docs.rs/futures/0.3/futures/prelude/trait.Future.html) crate was the original implementation of the type. To enable the `async/await` syntax, the core Future trait was moved into Rusts standard library and became `std::future::Future`. In some sense, the `std::future::Future` can be seen as a minimal subset of `futures::future::Future`.
The future defined in the [futures-rs](https://docs.rs/futures-preview/0.3.0-alpha.17/futures/prelude/trait.Future.html) crate was the original implementation of the type. To enable the `async/await` syntax, the core Future trait was moved into Rusts standard library and became `std::future::Future`. In some sense, the `std``::future::Future` can be seen as a minimal subset of `futures::future::Future`.
It is critical to understand the difference between `std::future::Future` and `futures::future::Future`, and the approach that `async-std` takes towards them. In itself, `std::future::Future` is not something you want to interact with as a user—except by calling `.await` on it. The inner workings of `std::future::Future` are mostly of interest to people implementing `Future`. Make no mistake—this is very useful! Most of the functionality that used to be defined on `Future` itself has been moved to an extension trait called [`FuturesExt`](https://docs.rs/futures/0.3/futures/future/trait.FutureExt.html). From this information, you might be able to infer that the `futures` library serves as an extension to the core Rust async features.
It is critical to understand the difference between `std::future::Future` and `futures::future::Future`, and the approach that `async-std` takes towards them. In itself, `std::future::Future` is not something you want to interact with as a user—except by calling `.await` on it. The inner workings of `std::future::Future` are mostly of interest to people implementing `Future`. Make no mistake—this is very useful! Most of the functionality that used to be defined on `Future` itself has been moved to an extension trait called `[FuturesExt](https://docs.rs/futures-preview/0.3.0-alpha.17/futures/future/trait.FutureExt.html)`. From this information, you might be able to infer that the `futures` library serves as an extension to the core Rust async features.
In the same tradition as `futures`, `async-std` re-exports the core `std::future::Future` type. You can actively opt into the extensions provided by the `futures` crate by adding it to your `Cargo.toml` and importing `FuturesExt`.
In the same tradition as `futures`, `async-std` re-exports the core `std::future::``Future` type. You can get actively opt into the extensions provided by the `futures-preview` crate by adding it your `Cargo.toml` and importing `FuturesExt`.
## Interfaces and Stability
`async-std` aims to be a stable and reliable library, at the level of the Rust standard library. This also means that we don't rely on the `futures` library for our interface. Yet, we appreciate that many users have come to like the conveniences that `futures-rs` brings. For that reason, `async-std` implements all `futures` traits for its types.
Luckily, the approach from above gives you full flexibility. If you care about stability a lot, you can just use `async-std` as is. If you prefer the `futures` library interfaces, you link those in. Both uses are first class.
Luckily, the approach from above gives you full flexibility. If you care about stability a lot, you can just use `async-std` as is. If you prefer the `futures` library interfaces, you link those in.. Both uses are first class.
## `async_std::future`
@ -24,4 +24,4 @@ Theres some support functions that we see as important for working with futur
## Streams and Read/Write/Seek/BufRead traits
Due to limitations of the Rust compiler, those are currently implemented in `async_std`, but cannot be implemented by users themselves.
Due to limitations of the Rust compiler, those are currently implemented in `async_std`, but cannot be implemented by users themselves.

View file

@ -1,266 +0,0 @@
# Production-Ready Accept Loop
A production-ready accept loop needs the following things:
1. Handling errors
2. Limiting the number of simultanteous connections to avoid deny-of-service
(DoS) attacks
## Handling errors
There are two kinds of errors in an accept loop:
1. Per-connection errors. The system uses them to notify that there was a
connection in the queue and it's dropped by the peer. Subsequent connections
can be already queued so next connection must be accepted immediately.
2. Resource shortages. When these are encountered it doesn't make sense to
accept the next socket immediately. But the listener stays active, so you server
should try to accept socket later.
Here is the example of a per-connection error (printed in normal and debug mode):
```
Error: Connection reset by peer (os error 104)
Error: Os { code: 104, kind: ConnectionReset, message: "Connection reset by peer" }
```
And the following is the most common example of a resource shortage error:
```
Error: Too many open files (os error 24)
Error: Os { code: 24, kind: Other, message: "Too many open files" }
```
### Testing Application
To test your application for these errors try the following (this works
on unixes only).
Lower limits and start the application:
```
$ ulimit -n 100
$ cargo run --example your_app
Compiling your_app v0.1.0 (/work)
Finished dev [unoptimized + debuginfo] target(s) in 5.47s
Running `target/debug/examples/your_app`
Server is listening on: http://127.0.0.1:1234
```
Then in another console run the [`wrk`] benchmark tool:
```
$ wrk -c 1000 http://127.0.0.1:1234
Running 10s test @ http://localhost:8080/
2 threads and 1000 connections
$ telnet localhost 1234
Trying ::1...
Connected to localhost.
```
Important is to check the following things:
1. The application doesn't crash on error (but may log errors, see below)
2. It's possible to connect to the application again once load is stopped
(few seconds after `wrk`). This is what `telnet` does in example above,
make sure it prints `Connected to <hostname>`.
3. The `Too many open files` error is logged in the appropriate log. This
requires to set "maximum number of simultaneous connections" parameter (see
below) of your application to a value greater then `100` for this example.
4. Check CPU usage of the app while doing a test. It should not occupy 100%
of a single CPU core (it's unlikely that you can exhaust CPU by 1000
connections in Rust, so this means error handling is not right).
#### Testing non-HTTP applications
If it's possible, use the appropriate benchmark tool and set the appropriate
number of connections. For example `redis-benchmark` has a `-c` parameter for
that, if you implement redis protocol.
Alternatively, can still use `wrk`, just make sure that connection is not
immediately closed. If it is, put a temporary timeout before handing
the connection to the protocol handler, like this:
```rust,edition2018
# extern crate async_std;
# use std::time::Duration;
# use async_std::{
# net::{TcpListener, ToSocketAddrs},
# prelude::*,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
#async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
# let listener = TcpListener::bind(addr).await?;
# let mut incoming = listener.incoming();
while let Some(stream) = incoming.next().await {
task::spawn(async {
task::sleep(Duration::from_secs(10)).await; // 1
connection_loop(stream).await;
});
}
# Ok(())
# }
```
1. Make sure the sleep coroutine is inside the spawned task, not in the loop.
[`wrk`]: https://github.com/wg/wrk
### Handling Errors Manually
Here is how basic accept loop could look like:
```rust,edition2018
# extern crate async_std;
# use std::time::Duration;
# use async_std::{
# net::{TcpListener, ToSocketAddrs},
# prelude::*,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
let listener = TcpListener::bind(addr).await?;
let mut incoming = listener.incoming();
while let Some(result) = incoming.next().await {
let stream = match stream {
Err(ref e) if is_connection_error(e) => continue, // 1
Err(e) => {
eprintln!("Error: {}. Pausing for 500ms."); // 3
task::sleep(Duration::from_millis(500)).await; // 2
continue;
}
Ok(s) => s,
};
// body
}
Ok(())
}
```
1. Ignore per-connection errors.
2. Sleep and continue on resource shortage.
3. It's important to log the message, because these errors commonly mean the
misconfiguration of the system and are helpful for operations people running
the application.
Be sure to [test your application](#testing-application).
### External Crates
The crate [`async-listen`] has a helper to achieve this task:
```rust,edition2018
# extern crate async_std;
# extern crate async_listen;
# use std::time::Duration;
# use async_std::{
# net::{TcpListener, ToSocketAddrs},
# prelude::*,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
use async_listen::{ListenExt, error_hint};
async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
let listener = TcpListener::bind(addr).await?;
let mut incoming = listener
.incoming()
.log_warnings(log_accept_error) // 1
.handle_errors(Duration::from_millis(500));
while let Some(socket) = incoming.next().await { // 2
// body
}
Ok(())
}
fn log_accept_error(e: &io::Error) {
eprintln!("Error: {}. Listener paused for 0.5s. {}", e, error_hint(e)) // 3
}
```
1. Logs resource shortages (`async-listen` calls them warnings). If you use
`log` crate or any other in your app this should go to the log.
2. Stream yields sockets without `Result` wrapper after `handle_errors` because
all errors are already handled.
3. Together with the error we print a hint, which explains some errors for end
users. For example, it recommends increasing open file limit and gives
a link.
[`async-listen`]: https://crates.io/crates/async-listen/
Be sure to [test your application](#testing-application).
## Connections Limit
Even if you've applied everything described in
[Handling Errors](#handling-errors) section, there is still a problem.
Let's imagine you have a server that needs to open a file to process
client request. At some point, you might encounter the following situation:
1. There are as many client connection as max file descriptors allowed for
the application.
2. Listener gets `Too many open files` error so it sleeps.
3. Some client sends a request via the previously open connection.
4. Opening a file to serve request fails, because of the same
`Too many open files` error, until some other client drops a connection.
There are many more possible situations, this is just a small illustation that
limiting number of connections is very useful. Generally, it's one of the ways
to control resources used by a server and avoiding some kinds of deny of
service (DoS) attacks.
### `async-listen` crate
Limiting maximum number of simultaneous connections with [`async-listen`]
looks like the following:
```rust,edition2018
# extern crate async_std;
# extern crate async_listen;
# use std::time::Duration;
# use async_std::{
# net::{TcpListener, TcpStream, ToSocketAddrs},
# prelude::*,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
use async_listen::{ListenExt, Token, error_hint};
async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
let listener = TcpListener::bind(addr).await?;
let mut incoming = listener
.incoming()
.log_warnings(log_accept_error)
.handle_errors(Duration::from_millis(500)) // 1
.backpressure(100);
while let Some((token, socket)) = incoming.next().await { // 2
task::spawn(async move {
connection_loop(&token, stream).await; // 3
});
}
Ok(())
}
async fn connection_loop(_token: &Token, stream: TcpStream) { // 4
// ...
}
# fn log_accept_error(e: &io::Error) {
# eprintln!("Error: {}. Listener paused for 0.5s. {}", e, error_hint(e));
# }
```
1. We need to handle errors first, because [`backpressure`] helper expects
stream of `TcpStream` rather than `Result`.
2. The token yielded by a new stream is what is counted by backpressure helper.
I.e. if you drop a token, new connection can be established.
3. We give the connection loop a reference to token to bind token's lifetime to
the lifetime of the connection.
4. The token itsellf in the function can be ignored, hence `_token`
[`backpressure`]: https://docs.rs/async-listen/0.1.2/async_listen/trait.ListenExt.html#method.backpressure
Be sure to [test this behavior](#testing-application).

View file

@ -6,11 +6,11 @@ A collection of small, useful patterns.
`async-std` doesn't provide a `split()` method on `io` handles. Instead, splitting a stream into a read and write half can be done like this:
```rust,edition2018
# extern crate async_std;
use async_std::{io, net::TcpStream};
async fn echo(stream: TcpStream) {
```rust
use async_std::io;
async fn echo(stream: io::TcpStream) {
let (reader, writer) = &mut (&stream, &stream);
io::copy(reader, writer).await;
io::copy(reader, writer).await?;
}
```
```

View file

@ -8,5 +8,5 @@ In the case that you find a security-related bug in our library, please get in t
Patches improving the resilience of the library or the testing setup are happily accepted on our [github org][github].
[security-policy]: /security/policy
[github]: https://github.com/async-rs
[security-policies]: /security/policy
[github]: https://github.com/async-std/

View file

@ -2,12 +2,14 @@
Safety is one of the core principles of what we do, and to that end, we would like to ensure that async-std has a secure implementation. Thank you for taking the time to responsibly disclose any issues you find.
All security bugs in async-std distribution should be reported by email to florian.gilcher@ferrous-systems.com. This list is delivered to a small security team. Your email will be acknowledged within 24 hours, and youll receive a more detailed response to your email within 48 hours indicating the next steps in handling your report. If you would like, you can encrypt your report using our public key. This key is also On MITs keyserver and reproduced below.
All security bugs in async-std distribution should be reported by email to security@ferrous-systems.com. This list is delivered to a small security team. Your email will be acknowledged within 24 hours, and youll receive a more detailed response to your email within 48 hours indicating the next steps in handling your report. If you would like, you can encrypt your report using our public key. This key is also On MITs keyserver and reproduced below.
Be sure to use a descriptive subject line to avoid having your report be missed. After the initial reply to your report, the security team will endeavor to keep you informed of the progress being made towards a fix and full announcement. As recommended by [RFPolicy][rf-policy], these updates will be sent at least every five days. In reality, this is more likely to be every 24-48 hours.
If you have not received a reply to your email within 48 hours, or have not heard from the security team for the past five days, there are a few steps you can take (in order):
* Contact the current security coordinator TODO directly.
* Contact the back-up contact TODO directly.
* Post on our Community forums
Please note that the discussion forums are public areas. When escalating in these venues, please do not discuss your issue. Simply say that youre trying to get a hold of someone from the security team.
@ -24,7 +26,7 @@ The async-std project has a 5 step disclosure process.
* Fixes are prepared for all releases which are still under maintenance. These fixes are not committed to the public repository but rather held locally pending the announcement.
* On the embargo date, the changes are pushed to the public repository and new builds are deployed to crates.io. Within 6 hours, a copy of the advisory will be published on the the async.rs blog.
This process can take some time, especially when coordination is required with maintainers of other projects. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the release process above to ensure that the disclosure is handled in a consistent manner.
This process can take some time, especially when coordination is required with maintainers of other projects. Every effort will be made to handle the bug in as timely a manner as possible, however its important that we follow the release process above to ensure that the disclosure is handled in a consistent manner.
## Credits
@ -32,35 +34,4 @@ This policy is adapted from the [Rust project](https://www.rust-lang.org/policie
## PGP Key
```text
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQENBF1Wu/ABCADJaGt4HwSlqKB9BGHWYKZj/6mTMbmc29vsEOcCSQKo6myCf9zc
sasWAttep4FAUDX+MJhVbBTSq9M1YVxp33Qh5AF0t9SnJZnbI+BZuGawcHDL01xE
bE+8bcA2+szeTTUZCeWwsaoTd/2qmQKvpUCBQp7uBs/ITO/I2q7+xCGXaOHZwUKc
H8SUBLd35nYFtjXAeejoZVkqG2qEjrc9bkZAwxFXi7Fw94QdkNLaCjNfKxZON/qP
A3WOpyWPr3ERk5C5prjEAvrW8kdqpTRjdmzQjsr8UEXb5GGEOo93N4OLZVQ2mXt9
dfn++GOnOk7sTxvfiDH8Ru5o4zCtKgO+r5/LABEBAAG0UkZsb3JpYW4gR2lsY2hl
ciAoU2VjdXJpdHkgY29udGFjdCBhc3luYy1zdGQpIDxmbG9yaWFuLmdpbGNoZXJA
ZmVycm91cy1zeXN0ZW1zLmNvbT6JATgEEwECACIFAl1Wu/ACGwMGCwkIBwMCBhUI
AgkKCwQWAgMBAh4BAheAAAoJEACXY97PwLtSc0AH/18yvrElVOkG0ADWX7l+JKHH
nMQtYj0Auop8d6TuKBbpwtYhwELrQoITDMV7f2XEnchNsvYxAyBZhIISmXeJboE1
KzZD1O+4QPXRcXhj+QNsKQ680mrgZXgAI2Y4ptIW9Vyw3jiHu/ZVopvDAt4li+up
3fRJGPAvGu+tclpJmA+Xam23cDj89M7/wHHgKIyT59WgFwyCgibL+NHKwg2Unzou
9uyZQnq6hf62sQTWEZIAr9BQpKmluplNIJHDeECWzZoE9ucE2ZXsq5pq9qojsAMK
yRdaFdpBcD/AxtrTKFeXGS7X7LqaljY/IFBEdJOqVNWpqSLjGWqjSLIEsc1AB0K5
AQ0EXVa78AEIAJMxBOEEW+2c3CcjFuUfcRsoBsFH3Vk+GwCbjIpNHq/eAvS1yy2L
u10U5CcT5Xb6be3AeCYv00ZHVbEi6VwoauVCSX8qDjhVzQxvNLgQ1SduobjyF6t8
3M/wTija6NvMKszyw1l2oHepxSMLej1m49DyCDFNiZm5rjQcYnFT4J71syxViqHF
v2fWCheTrHP3wfBAt5zyDet7IZd/EhYAK6xXEwr9nBPjfbaVexm2B8K6hOPNj0Bp
OKm4rcOj7JYlcxrwhMvNnwEue7MqH1oXAsoaC1BW+qs4acp/hHpesweL6Rcg1pED
OJUQd3UvRsqRK0EsorDu0oj5wt6Qp3ZEbPMAEQEAAYkBHwQYAQIACQUCXVa78AIb
DAAKCRAAl2Pez8C7Uv8bB/9scRm2wvzHLbFtcEHaHvlKO1yYfSVqKqJzIKHc7pM2
+szM8JVRTxAbzK5Xih9SB5xlekixxO2UCJI5DkJ/ir/RCcg+/CAQ8iLm2UcYAgJD
TocKiR5gjNAvUDI4tMrDLLdF+7+RCQGc7HBSxFiNBJVGAztGVh1+cQ0zaCX6Tt33
1EQtyRcPID0m6+ip5tCJN0dILC0YcwzXGrSgjB03JqItIyJEucdQz6UB84TIAGku
JJl4tktgD9T7Rb5uzRhHCSbLy89DQVvCcKD4B94ffuDW3HO8n8utDusOiZuG4BUf
WdFy6/gTLNiFbTzkq1BBJQMN1nBwGs1sn63RRgjumZ1N
=dIcF
-----END PGP PUBLIC KEY BLOCK-----
```
TODO

View file

@ -1,96 +0,0 @@
## Writing an Accept Loop
Let's implement the scaffold of the server: a loop that binds a TCP socket to an address and starts accepting connections.
First of all, let's add required import boilerplate:
```rust,edition2018
# extern crate async_std;
use async_std::{
prelude::*, // 1
task, // 2
net::{TcpListener, ToSocketAddrs}, // 3
};
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>; // 4
```
1. `prelude` re-exports some traits required to work with futures and streams.
2. The `task` module roughly corresponds to the `std::thread` module, but tasks are much lighter weight.
A single thread can run many tasks.
3. For the socket type, we use `TcpListener` from `async_std`, which is just like `std::net::TcpListener`, but is non-blocking and uses `async` API.
4. We will skip implementing comprehensive error handling in this example.
To propagate the errors, we will use a boxed error trait object.
Do you know that there's `From<&'_ str> for Box<dyn Error>` implementation in stdlib, which allows you to use strings with `?` operator?
Now we can write the server's accept loop:
```rust,edition2018
# extern crate async_std;
# use async_std::{
# net::{TcpListener, ToSocketAddrs},
# prelude::*,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> { // 1
let listener = TcpListener::bind(addr).await?; // 2
let mut incoming = listener.incoming();
while let Some(stream) = incoming.next().await { // 3
// TODO
}
Ok(())
}
```
1. We mark the `accept_loop` function as `async`, which allows us to use `.await` syntax inside.
2. `TcpListener::bind` call returns a future, which we `.await` to extract the `Result`, and then `?` to get a `TcpListener`.
Note how `.await` and `?` work nicely together.
This is exactly how `std::net::TcpListener` works, but with `.await` added.
Mirroring API of `std` is an explicit design goal of `async_std`.
3. Here, we would like to iterate incoming sockets, just how one would do in `std`:
```rust,edition2018,should_panic
let listener: std::net::TcpListener = unimplemented!();
for stream in listener.incoming() {
}
```
Unfortunately this doesn't quite work with `async` yet, because there's no support for `async` for-loops in the language yet.
For this reason we have to implement the loop manually, by using `while let Some(item) = iter.next().await` pattern.
Finally, let's add main:
```rust,edition2018
# extern crate async_std;
# use async_std::{
# net::{TcpListener, ToSocketAddrs},
# prelude::*,
# task,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
# async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> { // 1
# let listener = TcpListener::bind(addr).await?; // 2
# let mut incoming = listener.incoming();
# while let Some(stream) = incoming.next().await { // 3
# // TODO
# }
# Ok(())
# }
#
// main
fn run() -> Result<()> {
let fut = accept_loop("127.0.0.1:8080");
task::block_on(fut)
}
```
The crucial thing to realise that is in Rust, unlike other languages, calling an async function does **not** run any code.
Async functions only construct futures, which are inert state machines.
To start stepping through the future state-machine in an async function, you should use `.await`.
In a non-async function, a way to execute a future is to hand it to the executor.
In this case, we use `task::block_on` to execute a future on the current thread and block until it's done.

View file

@ -1,142 +0,0 @@
## All Together
At this point, we only need to start the broker to get a fully-functioning (in the happy case!) chat:
```rust,edition2018
# extern crate async_std;
# extern crate futures;
use async_std::{
io::BufReader,
net::{TcpListener, TcpStream, ToSocketAddrs},
prelude::*,
task,
};
use futures::channel::mpsc;
use futures::sink::SinkExt;
use std::{
collections::hash_map::{HashMap, Entry},
sync::Arc,
};
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
type Sender<T> = mpsc::UnboundedSender<T>;
type Receiver<T> = mpsc::UnboundedReceiver<T>;
// main
fn run() -> Result<()> {
task::block_on(accept_loop("127.0.0.1:8080"))
}
fn spawn_and_log_error<F>(fut: F) -> task::JoinHandle<()>
where
F: Future<Output = Result<()>> + Send + 'static,
{
task::spawn(async move {
if let Err(e) = fut.await {
eprintln!("{}", e)
}
})
}
async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
let listener = TcpListener::bind(addr).await?;
let (broker_sender, broker_receiver) = mpsc::unbounded(); // 1
let _broker_handle = task::spawn(broker_loop(broker_receiver));
let mut incoming = listener.incoming();
while let Some(stream) = incoming.next().await {
let stream = stream?;
println!("Accepting from: {}", stream.peer_addr()?);
spawn_and_log_error(connection_loop(broker_sender.clone(), stream));
}
Ok(())
}
async fn connection_loop(mut broker: Sender<Event>, stream: TcpStream) -> Result<()> {
let stream = Arc::new(stream); // 2
let reader = BufReader::new(&*stream);
let mut lines = reader.lines();
let name = match lines.next().await {
None => Err("peer disconnected immediately")?,
Some(line) => line?,
};
broker.send(Event::NewPeer { name: name.clone(), stream: Arc::clone(&stream) }).await // 3
.unwrap();
while let Some(line) = lines.next().await {
let line = line?;
let (dest, msg) = match line.find(':') {
None => continue,
Some(idx) => (&line[..idx], line[idx + 1 ..].trim()),
};
let dest: Vec<String> = dest.split(',').map(|name| name.trim().to_string()).collect();
let msg: String = msg.to_string();
broker.send(Event::Message { // 4
from: name.clone(),
to: dest,
msg,
}).await.unwrap();
}
Ok(())
}
async fn connection_writer_loop(
mut messages: Receiver<String>,
stream: Arc<TcpStream>,
) -> Result<()> {
let mut stream = &*stream;
while let Some(msg) = messages.next().await {
stream.write_all(msg.as_bytes()).await?;
}
Ok(())
}
#[derive(Debug)]
enum Event {
NewPeer {
name: String,
stream: Arc<TcpStream>,
},
Message {
from: String,
to: Vec<String>,
msg: String,
},
}
async fn broker_loop(mut events: Receiver<Event>) -> Result<()> {
let mut peers: HashMap<String, Sender<String>> = HashMap::new();
while let Some(event) = events.next().await {
match event {
Event::Message { from, to, msg } => {
for addr in to {
if let Some(peer) = peers.get_mut(&addr) {
let msg = format!("from {}: {}\n", from, msg);
peer.send(msg).await?
}
}
}
Event::NewPeer { name, stream} => {
match peers.entry(name) {
Entry::Occupied(..) => (),
Entry::Vacant(entry) => {
let (client_sender, client_receiver) = mpsc::unbounded();
entry.insert(client_sender); // 4
spawn_and_log_error(connection_writer_loop(client_receiver, stream)); // 5
}
}
}
}
}
Ok(())
}
```
1. Inside the `accept_loop`, we create the broker's channel and `task`.
2. Inside `connection_loop`, we need to wrap `TcpStream` into an `Arc`, to be able to share it with the `connection_writer_loop`.
3. On login, we notify the broker.
Note that we `.unwrap` on send: broker should outlive all the clients and if that's not the case the broker probably panicked, so we can escalate the panic as well.
4. Similarly, we forward parsed messages to the broker, assuming that it is alive.

View file

@ -1,253 +0,0 @@
## Clean Shutdown
One of the problems of the current implementation is that it doesn't handle graceful shutdown.
If we break from the accept loop for some reason, all in-flight tasks are just dropped on the floor.
A more correct shutdown sequence would be:
1. Stop accepting new clients
2. Deliver all pending messages
3. Exit the process
A clean shutdown in a channel based architecture is easy, although it can appear a magic trick at first.
In Rust, receiver side of a channel is closed as soon as all senders are dropped.
That is, as soon as producers exit and drop their senders, the rest of the system shuts down naturally.
In `async_std` this translates to two rules:
1. Make sure that channels form an acyclic graph.
2. Take care to wait, in the correct order, until intermediate layers of the system process pending messages.
In `a-chat`, we already have an unidirectional flow of messages: `reader -> broker -> writer`.
However, we never wait for broker and writers, which might cause some messages to get dropped.
Let's add waiting to the server:
```rust,edition2018
# extern crate async_std;
# extern crate futures;
# use async_std::{
# io::{self, BufReader},
# net::{TcpListener, TcpStream, ToSocketAddrs},
# prelude::*,
# task,
# };
# use futures::channel::mpsc;
# use futures::sink::SinkExt;
# use std::{
# collections::hash_map::{HashMap, Entry},
# sync::Arc,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
# type Sender<T> = mpsc::UnboundedSender<T>;
# type Receiver<T> = mpsc::UnboundedReceiver<T>;
#
# fn spawn_and_log_error<F>(fut: F) -> task::JoinHandle<()>
# where
# F: Future<Output = Result<()>> + Send + 'static,
# {
# task::spawn(async move {
# if let Err(e) = fut.await {
# eprintln!("{}", e)
# }
# })
# }
#
#
# async fn connection_loop(mut broker: Sender<Event>, stream: TcpStream) -> Result<()> {
# let stream = Arc::new(stream); // 2
# let reader = BufReader::new(&*stream);
# let mut lines = reader.lines();
#
# let name = match lines.next().await {
# None => Err("peer disconnected immediately")?,
# Some(line) => line?,
# };
# broker.send(Event::NewPeer { name: name.clone(), stream: Arc::clone(&stream) }).await // 3
# .unwrap();
#
# while let Some(line) = lines.next().await {
# let line = line?;
# let (dest, msg) = match line.find(':') {
# None => continue,
# Some(idx) => (&line[..idx], line[idx + 1 ..].trim()),
# };
# let dest: Vec<String> = dest.split(',').map(|name| name.trim().to_string()).collect();
# let msg: String = msg.trim().to_string();
#
# broker.send(Event::Message { // 4
# from: name.clone(),
# to: dest,
# msg,
# }).await.unwrap();
# }
# Ok(())
# }
#
# async fn connection_writer_loop(
# mut messages: Receiver<String>,
# stream: Arc<TcpStream>,
# ) -> Result<()> {
# let mut stream = &*stream;
# while let Some(msg) = messages.next().await {
# stream.write_all(msg.as_bytes()).await?;
# }
# Ok(())
# }
#
# #[derive(Debug)]
# enum Event {
# NewPeer {
# name: String,
# stream: Arc<TcpStream>,
# },
# Message {
# from: String,
# to: Vec<String>,
# msg: String,
# },
# }
#
# async fn broker_loop(mut events: Receiver<Event>) -> Result<()> {
# let mut peers: HashMap<String, Sender<String>> = HashMap::new();
#
# while let Some(event) = events.next().await {
# match event {
# Event::Message { from, to, msg } => {
# for addr in to {
# if let Some(peer) = peers.get_mut(&addr) {
# let msg = format!("from {}: {}\n", from, msg);
# peer.send(msg).await?
# }
# }
# }
# Event::NewPeer { name, stream} => {
# match peers.entry(name) {
# Entry::Occupied(..) => (),
# Entry::Vacant(entry) => {
# let (client_sender, client_receiver) = mpsc::unbounded();
# entry.insert(client_sender); // 4
# spawn_and_log_error(connection_writer_loop(client_receiver, stream)); // 5
# }
# }
# }
# }
# }
# Ok(())
# }
#
async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
let listener = TcpListener::bind(addr).await?;
let (broker_sender, broker_receiver) = mpsc::unbounded();
let broker_handle = task::spawn(broker_loop(broker_receiver));
let mut incoming = listener.incoming();
while let Some(stream) = incoming.next().await {
let stream = stream?;
println!("Accepting from: {}", stream.peer_addr()?);
spawn_and_log_error(connection_loop(broker_sender.clone(), stream));
}
drop(broker_sender); // 1
broker_handle.await?; // 5
Ok(())
}
```
And to the broker:
```rust,edition2018
# extern crate async_std;
# extern crate futures;
# use async_std::{
# io::{self, BufReader},
# net::{TcpListener, TcpStream, ToSocketAddrs},
# prelude::*,
# task,
# };
# use futures::channel::mpsc;
# use futures::sink::SinkExt;
# use std::{
# collections::hash_map::{HashMap, Entry},
# sync::Arc,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
# type Sender<T> = mpsc::UnboundedSender<T>;
# type Receiver<T> = mpsc::UnboundedReceiver<T>;
#
# async fn connection_writer_loop(
# mut messages: Receiver<String>,
# stream: Arc<TcpStream>,
# ) -> Result<()> {
# let mut stream = &*stream;
# while let Some(msg) = messages.next().await {
# stream.write_all(msg.as_bytes()).await?;
# }
# Ok(())
# }
#
# fn spawn_and_log_error<F>(fut: F) -> task::JoinHandle<()>
# where
# F: Future<Output = Result<()>> + Send + 'static,
# {
# task::spawn(async move {
# if let Err(e) = fut.await {
# eprintln!("{}", e)
# }
# })
# }
#
# #[derive(Debug)]
# enum Event {
# NewPeer {
# name: String,
# stream: Arc<TcpStream>,
# },
# Message {
# from: String,
# to: Vec<String>,
# msg: String,
# },
# }
#
async fn broker_loop(mut events: Receiver<Event>) -> Result<()> {
let mut writers = Vec::new();
let mut peers: HashMap<String, Sender<String>> = HashMap::new();
while let Some(event) = events.next().await { // 2
match event {
Event::Message { from, to, msg } => {
for addr in to {
if let Some(peer) = peers.get_mut(&addr) {
let msg = format!("from {}: {}\n", from, msg);
peer.send(msg).await?
}
}
}
Event::NewPeer { name, stream} => {
match peers.entry(name) {
Entry::Occupied(..) => (),
Entry::Vacant(entry) => {
let (client_sender, client_receiver) = mpsc::unbounded();
entry.insert(client_sender);
let handle = spawn_and_log_error(connection_writer_loop(client_receiver, stream));
writers.push(handle); // 4
}
}
}
}
}
drop(peers); // 3
for writer in writers { // 4
writer.await;
}
Ok(())
}
```
Notice what happens with all of the channels once we exit the accept loop:
1. First, we drop the main broker's sender.
That way when the readers are done, there's no sender for the broker's channel, and the chanel closes.
2. Next, the broker exits `while let Some(event) = events.next().await` loop.
3. It's crucial that, at this stage, we drop the `peers` map.
This drops writer's senders.
4. Now we can join all of the writers.
5. Finally, we join the broker, which also guarantees that all the writes have terminated.

View file

@ -1,100 +0,0 @@
## Connecting Readers and Writers
So how do we make sure that messages read in `connection_loop` flow into the relevant `connection_writer_loop`?
We should somehow maintain a `peers: HashMap<String, Sender<String>>` map which allows a client to find destination channels.
However, this map would be a bit of shared mutable state, so we'll have to wrap an `RwLock` over it and answer tough questions of what should happen if the client joins at the same moment as it receives a message.
One trick to make reasoning about state simpler comes from the actor model.
We can create a dedicated broker task which owns the `peers` map and communicates with other tasks using channels.
By hiding `peers` inside such an "actor" task, we remove the need for mutexes and also make the serialization point explicit.
The order of events "Bob sends message to Alice" and "Alice joins" is determined by the order of the corresponding events in the broker's event queue.
```rust,edition2018
# extern crate async_std;
# extern crate futures;
# use async_std::{
# net::TcpStream,
# prelude::*,
# task,
# };
# use futures::channel::mpsc;
# use futures::sink::SinkExt;
# use std::sync::Arc;
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
# type Sender<T> = mpsc::UnboundedSender<T>;
# type Receiver<T> = mpsc::UnboundedReceiver<T>;
#
# async fn connection_writer_loop(
# mut messages: Receiver<String>,
# stream: Arc<TcpStream>,
# ) -> Result<()> {
# let mut stream = &*stream;
# while let Some(msg) = messages.next().await {
# stream.write_all(msg.as_bytes()).await?;
# }
# Ok(())
# }
#
# fn spawn_and_log_error<F>(fut: F) -> task::JoinHandle<()>
# where
# F: Future<Output = Result<()>> + Send + 'static,
# {
# task::spawn(async move {
# if let Err(e) = fut.await {
# eprintln!("{}", e)
# }
# })
# }
#
use std::collections::hash_map::{Entry, HashMap};
#[derive(Debug)]
enum Event { // 1
NewPeer {
name: String,
stream: Arc<TcpStream>,
},
Message {
from: String,
to: Vec<String>,
msg: String,
},
}
async fn broker_loop(mut events: Receiver<Event>) -> Result<()> {
let mut peers: HashMap<String, Sender<String>> = HashMap::new(); // 2
while let Some(event) = events.next().await {
match event {
Event::Message { from, to, msg } => { // 3
for addr in to {
if let Some(peer) = peers.get_mut(&addr) {
let msg = format!("from {}: {}\n", from, msg);
peer.send(msg).await?
}
}
}
Event::NewPeer { name, stream } => {
match peers.entry(name) {
Entry::Occupied(..) => (),
Entry::Vacant(entry) => {
let (client_sender, client_receiver) = mpsc::unbounded();
entry.insert(client_sender); // 4
spawn_and_log_error(connection_writer_loop(client_receiver, stream)); // 5
}
}
}
}
}
Ok(())
}
```
1. The broker task should handle two types of events: a message or an arrival of a new peer.
2. The internal state of the broker is a `HashMap`.
Note how we don't need a `Mutex` here and can confidently say, at each iteration of the broker's loop, what is the current set of peers
3. To handle a message, we send it over a channel to each destination
4. To handle a new peer, we first register it in the peer's map ...
5. ... and then spawn a dedicated task to actually write the messages to the socket.

View file

@ -1,307 +0,0 @@
## Handling Disconnections
Currently, we only ever _add_ new peers to the map.
This is clearly wrong: if a peer closes connection to the chat, we should not try to send any more messages to it.
One subtlety with handling disconnection is that we can detect it either in the reader's task, or in the writer's task.
The most obvious solution here is to just remove the peer from the `peers` map in both cases, but this would be wrong.
If _both_ read and write fail, we'll remove the peer twice, but it can be the case that the peer reconnected between the two failures!
To fix this, we will only remove the peer when the write side finishes.
If the read side finishes we will notify the write side that it should stop as well.
That is, we need to add an ability to signal shutdown for the writer task.
One way to approach this is a `shutdown: Receiver<()>` channel.
There's a more minimal solution however, which makes clever use of RAII.
Closing a channel is a synchronization event, so we don't need to send a shutdown message, we can just drop the sender.
This way, we statically guarantee that we issue shutdown exactly once, even if we early return via `?` or panic.
First, let's add a shutdown channel to the `connection_loop`:
```rust,edition2018
# extern crate async_std;
# extern crate futures;
# use async_std::net::TcpStream;
# use futures::channel::mpsc;
# use futures::sink::SinkExt;
# use std::sync::Arc;
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
# type Sender<T> = mpsc::UnboundedSender<T>;
# type Receiver<T> = mpsc::UnboundedReceiver<T>;
#
#[derive(Debug)]
enum Void {} // 1
#[derive(Debug)]
enum Event {
NewPeer {
name: String,
stream: Arc<TcpStream>,
shutdown: Receiver<Void>, // 2
},
Message {
from: String,
to: Vec<String>,
msg: String,
},
}
async fn connection_loop(mut broker: Sender<Event>, stream: Arc<TcpStream>) -> Result<()> {
// ...
# let name: String = unimplemented!();
let (_shutdown_sender, shutdown_receiver) = mpsc::unbounded::<Void>(); // 3
broker.send(Event::NewPeer {
name: name.clone(),
stream: Arc::clone(&stream),
shutdown: shutdown_receiver,
}).await.unwrap();
// ...
# unimplemented!()
}
```
1. To enforce that no messages are sent along the shutdown channel, we use an uninhabited type.
2. We pass the shutdown channel to the writer task.
3. In the reader, we create a `_shutdown_sender` whose only purpose is to get dropped.
In the `connection_writer_loop`, we now need to choose between shutdown and message channels.
We use the `select` macro for this purpose:
```rust,edition2018
# extern crate async_std;
# extern crate futures;
# use async_std::{net::TcpStream, prelude::*};
# use futures::channel::mpsc;
use futures::{select, FutureExt};
# use std::sync::Arc;
# type Receiver<T> = mpsc::UnboundedReceiver<T>;
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
# type Sender<T> = mpsc::UnboundedSender<T>;
# #[derive(Debug)]
# enum Void {} // 1
async fn connection_writer_loop(
messages: &mut Receiver<String>,
stream: Arc<TcpStream>,
shutdown: Receiver<Void>, // 1
) -> Result<()> {
let mut stream = &*stream;
let mut messages = messages.fuse();
let mut shutdown = shutdown.fuse();
loop { // 2
select! {
msg = messages.next().fuse() => match msg {
Some(msg) => stream.write_all(msg.as_bytes()).await?,
None => break,
},
void = shutdown.next().fuse() => match void {
Some(void) => match void {}, // 3
None => break,
}
}
}
Ok(())
}
```
1. We add shutdown channel as an argument.
2. Because of `select`, we can't use a `while let` loop, so we desugar it further into a `loop`.
3. In the shutdown case we use `match void {}` as a statically-checked `unreachable!()`.
Another problem is that between the moment we detect disconnection in `connection_writer_loop` and the moment when we actually remove the peer from the `peers` map, new messages might be pushed into the peer's channel.
To not lose these messages completely, we'll return the messages channel back to the broker.
This also allows us to establish a useful invariant that the message channel strictly outlives the peer in the `peers` map, and makes the broker itself infallible.
## Final Code
The final code looks like this:
```rust,edition2018
# extern crate async_std;
# extern crate futures;
use async_std::{
io::BufReader,
net::{TcpListener, TcpStream, ToSocketAddrs},
prelude::*,
task,
};
use futures::channel::mpsc;
use futures::sink::SinkExt;
use futures::{select, FutureExt};
use std::{
collections::hash_map::{Entry, HashMap},
future::Future,
sync::Arc,
};
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
type Sender<T> = mpsc::UnboundedSender<T>;
type Receiver<T> = mpsc::UnboundedReceiver<T>;
#[derive(Debug)]
enum Void {}
// main
fn run() -> Result<()> {
task::block_on(accept_loop("127.0.0.1:8080"))
}
async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
let listener = TcpListener::bind(addr).await?;
let (broker_sender, broker_receiver) = mpsc::unbounded();
let broker_handle = task::spawn(broker_loop(broker_receiver));
let mut incoming = listener.incoming();
while let Some(stream) = incoming.next().await {
let stream = stream?;
println!("Accepting from: {}", stream.peer_addr()?);
spawn_and_log_error(connection_loop(broker_sender.clone(), stream));
}
drop(broker_sender);
broker_handle.await;
Ok(())
}
async fn connection_loop(mut broker: Sender<Event>, stream: TcpStream) -> Result<()> {
let stream = Arc::new(stream);
let reader = BufReader::new(&*stream);
let mut lines = reader.lines();
let name = match lines.next().await {
None => Err("peer disconnected immediately")?,
Some(line) => line?,
};
let (_shutdown_sender, shutdown_receiver) = mpsc::unbounded::<Void>();
broker.send(Event::NewPeer {
name: name.clone(),
stream: Arc::clone(&stream),
shutdown: shutdown_receiver,
}).await.unwrap();
while let Some(line) = lines.next().await {
let line = line?;
let (dest, msg) = match line.find(':') {
None => continue,
Some(idx) => (&line[..idx], line[idx + 1 ..].trim()),
};
let dest: Vec<String> = dest.split(',').map(|name| name.trim().to_string()).collect();
let msg: String = msg.trim().to_string();
broker.send(Event::Message {
from: name.clone(),
to: dest,
msg,
}).await.unwrap();
}
Ok(())
}
async fn connection_writer_loop(
messages: &mut Receiver<String>,
stream: Arc<TcpStream>,
shutdown: Receiver<Void>,
) -> Result<()> {
let mut stream = &*stream;
let mut messages = messages.fuse();
let mut shutdown = shutdown.fuse();
loop {
select! {
msg = messages.next().fuse() => match msg {
Some(msg) => stream.write_all(msg.as_bytes()).await?,
None => break,
},
void = shutdown.next().fuse() => match void {
Some(void) => match void {},
None => break,
}
}
}
Ok(())
}
#[derive(Debug)]
enum Event {
NewPeer {
name: String,
stream: Arc<TcpStream>,
shutdown: Receiver<Void>,
},
Message {
from: String,
to: Vec<String>,
msg: String,
},
}
async fn broker_loop(events: Receiver<Event>) {
let (disconnect_sender, mut disconnect_receiver) = // 1
mpsc::unbounded::<(String, Receiver<String>)>();
let mut peers: HashMap<String, Sender<String>> = HashMap::new();
let mut events = events.fuse();
loop {
let event = select! {
event = events.next().fuse() => match event {
None => break, // 2
Some(event) => event,
},
disconnect = disconnect_receiver.next().fuse() => {
let (name, _pending_messages) = disconnect.unwrap(); // 3
assert!(peers.remove(&name).is_some());
continue;
},
};
match event {
Event::Message { from, to, msg } => {
for addr in to {
if let Some(peer) = peers.get_mut(&addr) {
let msg = format!("from {}: {}\n", from, msg);
peer.send(msg).await
.unwrap() // 6
}
}
}
Event::NewPeer { name, stream, shutdown } => {
match peers.entry(name.clone()) {
Entry::Occupied(..) => (),
Entry::Vacant(entry) => {
let (client_sender, mut client_receiver) = mpsc::unbounded();
entry.insert(client_sender);
let mut disconnect_sender = disconnect_sender.clone();
spawn_and_log_error(async move {
let res = connection_writer_loop(&mut client_receiver, stream, shutdown).await;
disconnect_sender.send((name, client_receiver)).await // 4
.unwrap();
res
});
}
}
}
}
}
drop(peers); // 5
drop(disconnect_sender); // 6
while let Some((_name, _pending_messages)) = disconnect_receiver.next().await {
}
}
fn spawn_and_log_error<F>(fut: F) -> task::JoinHandle<()>
where
F: Future<Output = Result<()>> + Send + 'static,
{
task::spawn(async move {
if let Err(e) = fut.await {
eprintln!("{}", e)
}
})
}
```
1. In the broker, we create a channel to reap disconnected peers and their undelivered messages.
2. The broker's main loop exits when the input events channel is exhausted (that is, when all readers exit).
3. Because broker itself holds a `disconnect_sender`, we know that the disconnections channel can't be fully drained in the main loop.
4. We send peer's name and pending messages to the disconnections channel in both the happy and the not-so-happy path.
Again, we can safely unwrap because the broker outlives writers.
5. We drop `peers` map to close writers' messages channel and shut down the writers for sure.
It is not strictly necessary in the current setup, where the broker waits for readers' shutdown anyway.
However, if we add a server-initiated shutdown (for example, kbd:[ctrl+c] handling), this will be a way for the broker to shutdown the writers.
6. Finally, we close and drain the disconnections channel.

View file

@ -1,63 +0,0 @@
## Implementing a client
Since the protocol is line-based, implementing a client for the chat is straightforward:
* Lines read from stdin should be sent over the socket.
* Lines read from the socket should be echoed to stdout.
Although async does not significantly affect client performance (as unlike the server, the client interacts solely with one user and only needs limited concurrency), async is still useful for managing concurrency!
The client has to read from stdin and the socket *simultaneously*.
Programming this with threads is cumbersome, especially when implementing a clean shutdown.
With async, the `select!` macro is all that is needed.
```rust,edition2018
# extern crate async_std;
# extern crate futures;
use async_std::{
io::{stdin, BufReader},
net::{TcpStream, ToSocketAddrs},
prelude::*,
task,
};
use futures::{select, FutureExt};
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
// main
fn run() -> Result<()> {
task::block_on(try_run("127.0.0.1:8080"))
}
async fn try_run(addr: impl ToSocketAddrs) -> Result<()> {
let stream = TcpStream::connect(addr).await?;
let (reader, mut writer) = (&stream, &stream); // 1
let mut lines_from_server = BufReader::new(reader).lines().fuse(); // 2
let mut lines_from_stdin = BufReader::new(stdin()).lines().fuse(); // 2
loop {
select! { // 3
line = lines_from_server.next().fuse() => match line {
Some(line) => {
let line = line?;
println!("{}", line);
},
None => break,
},
line = lines_from_stdin.next().fuse() => match line {
Some(line) => {
let line = line?;
writer.write_all(line.as_bytes()).await?;
writer.write_all(b"\n").await?;
}
None => break,
}
}
}
Ok(())
}
```
1. Here we split `TcpStream` into read and write halves: there's `impl AsyncRead for &'_ TcpStream`, just like the one in std.
2. We create a stream of lines for both the socket and stdin.
3. In the main select loop, we print the lines we receive from the server and send the lines we read from the console.

View file

@ -1,14 +0,0 @@
# Tutorial: Writing a chat
Nothing is simpler than creating a chat server, right?
Not quite, chat servers expose you to all the fun of asynchronous programming:
How will the server handle clients connecting concurrently?
How will it handle them disconnecting?
How will it distribute the messages?
This tutorial explains how to write a chat server in `async-std`.
You can also find the tutorial in [our repository](https://github.com/async-rs/async-std/blob/master/examples/a-chat).

View file

@ -1,145 +0,0 @@
## Receiving messages
Let's implement the receiving part of the protocol.
We need to:
1. split incoming `TcpStream` on `\n` and decode bytes as utf-8
2. interpret the first line as a login
3. parse the rest of the lines as a `login: message`
```rust,edition2018
# extern crate async_std;
# use async_std::{
# net::{TcpListener, ToSocketAddrs},
# prelude::*,
# task,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
use async_std::{
io::BufReader,
net::TcpStream,
};
async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
let listener = TcpListener::bind(addr).await?;
let mut incoming = listener.incoming();
while let Some(stream) = incoming.next().await {
let stream = stream?;
println!("Accepting from: {}", stream.peer_addr()?);
let _handle = task::spawn(connection_loop(stream)); // 1
}
Ok(())
}
async fn connection_loop(stream: TcpStream) -> Result<()> {
let reader = BufReader::new(&stream); // 2
let mut lines = reader.lines();
let name = match lines.next().await { // 3
None => Err("peer disconnected immediately")?,
Some(line) => line?,
};
println!("name = {}", name);
while let Some(line) = lines.next().await { // 4
let line = line?;
let (dest, msg) = match line.find(':') { // 5
None => continue,
Some(idx) => (&line[..idx], line[idx + 1 ..].trim()),
};
let dest: Vec<String> = dest.split(',').map(|name| name.trim().to_string()).collect();
let msg: String = msg.to_string();
}
Ok(())
}
```
1. We use `task::spawn` function to spawn an independent task for working with each client.
That is, after accepting the client the `accept_loop` immediately starts waiting for the next one.
This is the core benefit of event-driven architecture: we serve many clients concurrently, without spending many hardware threads.
2. Luckily, the "split byte stream into lines" functionality is already implemented.
`.lines()` call returns a stream of `String`'s.
3. We get the first line -- login
4. And, once again, we implement a manual async for loop.
5. Finally, we parse each line into a list of destination logins and the message itself.
## Managing Errors
One serious problem in the above solution is that, while we correctly propagate errors in the `connection_loop`, we just drop the error on the floor afterwards!
That is, `task::spawn` does not return an error immediately (it can't, it needs to run the future to completion first), only after it is joined.
We can "fix" it by waiting for the task to be joined, like this:
```rust,edition2018
# #![feature(async_closure)]
# extern crate async_std;
# use async_std::{
# io::BufReader,
# net::{TcpListener, TcpStream, ToSocketAddrs},
# prelude::*,
# task,
# };
#
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
#
# async fn connection_loop(stream: TcpStream) -> Result<()> {
# let reader = BufReader::new(&stream); // 2
# let mut lines = reader.lines();
#
# let name = match lines.next().await { // 3
# None => Err("peer disconnected immediately")?,
# Some(line) => line?,
# };
# println!("name = {}", name);
#
# while let Some(line) = lines.next().await { // 4
# let line = line?;
# let (dest, msg) = match line.find(':') { // 5
# None => continue,
# Some(idx) => (&line[..idx], line[idx + 1 ..].trim()),
# };
# let dest: Vec<String> = dest.split(',').map(|name| name.trim().to_string()).collect();
# let msg: String = msg.trim().to_string();
# }
# Ok(())
# }
#
# async move |stream| {
let handle = task::spawn(connection_loop(stream));
handle.await?
# };
```
The `.await` waits until the client finishes, and `?` propagates the result.
There are two problems with this solution however!
*First*, because we immediately await the client, we can only handle one client at time, and that completely defeats the purpose of async!
*Second*, if a client encounters an IO error, the whole server immediately exits.
That is, a flaky internet connection of one peer brings down the whole chat room!
A correct way to handle client errors in this case is log them, and continue serving other clients.
So let's use a helper function for this:
```rust,edition2018
# extern crate async_std;
# use async_std::{
# io,
# prelude::*,
# task,
# };
fn spawn_and_log_error<F>(fut: F) -> task::JoinHandle<()>
where
F: Future<Output = Result<()>> + Send + 'static,
{
task::spawn(async move {
if let Err(e) = fut.await {
eprintln!("{}", e)
}
})
}
```

View file

@ -1,44 +0,0 @@
## Sending Messages
Now it's time to implement the other half -- sending messages.
A most obvious way to implement sending is to give each `connection_loop` access to the write half of `TcpStream` of each other clients.
That way, a client can directly `.write_all` a message to recipients.
However, this would be wrong: if Alice sends `bob: foo`, and Charley sends `bob: bar`, Bob might actually receive `fobaor`.
Sending a message over a socket might require several syscalls, so two concurrent `.write_all`'s might interfere with each other!
As a rule of thumb, only a single task should write to each `TcpStream`.
So let's create a `connection_writer_loop` task which receives messages over a channel and writes them to the socket.
This task would be the point of serialization of messages.
if Alice and Charley send two messages to Bob at the same time, Bob will see the messages in the same order as they arrive in the channel.
```rust,edition2018
# extern crate async_std;
# extern crate futures;
# use async_std::{
# net::TcpStream,
# prelude::*,
# };
use futures::channel::mpsc; // 1
use futures::sink::SinkExt;
use std::sync::Arc;
# type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
type Sender<T> = mpsc::UnboundedSender<T>; // 2
type Receiver<T> = mpsc::UnboundedReceiver<T>;
async fn connection_writer_loop(
mut messages: Receiver<String>,
stream: Arc<TcpStream>, // 3
) -> Result<()> {
let mut stream = &*stream;
while let Some(msg) = messages.next().await {
stream.write_all(msg.as_bytes()).await?;
}
Ok(())
}
```
1. We will use channels from the `futures` crate.
2. For simplicity, we will use `unbounded` channels, and won't be discussing backpressure in this tutorial.
3. As `connection_loop` and `connection_writer_loop` share the same `TcpStream`, we need to put it into an `Arc`.
Note that because `client` only reads from the stream and `connection_writer_loop` only writes to the stream, we don't get a race here.

View file

@ -1,47 +0,0 @@
# Specification and Getting Started
## Specification
The chat uses a simple text protocol over TCP.
The protocol consists of utf-8 messages, separated by `\n`.
The client connects to the server and sends login as a first line.
After that, the client can send messages to other clients using the following syntax:
```text
login1, login2, ... loginN: message
```
Each of the specified clients then receives a `from login: message` message.
A possible session might look like this
```text
On Alice's computer: | On Bob's computer:
> alice | > bob
> bob: hello < from alice: hello
| > alice, bob: hi!
< from bob: hi!
< from bob: hi! |
```
The main challenge for the chat server is keeping track of many concurrent connections.
The main challenge for the chat client is managing concurrent outgoing messages, incoming messages and user's typing.
## Getting Started
Let's create a new Cargo project:
```bash
$ cargo new a-chat
$ cd a-chat
```
Add the following lines to `Cargo.toml`:
```toml
[dependencies]
futures = "0.3.0"
async-std = "1"
```

View file

@ -0,0 +1 @@
# Tutorials

View file

@ -0,0 +1,43 @@
# Exercise: Waiting for `std::thread`
Parallel processing is usually done via [threads].
In `async-std`, we have similar concept, called a [`task`].
These two worlds seem different - and in some regards, they are - though they
are easy to connect.
In this exercise, you will learn how to connect to concurrent/parallel components easily, by connecting a thread to a task.
## Understanding the problem
The standard thread API in Rust is `std::thread`. Specifically, it contains the [`spawn`] function, which allows us to start a thread:
```rust
std::thread::spawn(|| {
println!("in child thread");
})
println!("in parent thread");
```
This creates a thread, _immediately_ [schedules] it to run, and continues. This is crucial: once the thread is spawned, it is independent of its _parent thread_. If you want to wait for the thread to end, you need to capture its [`JoinHandle`] and join it with your current thread:
```rust
let thread = std::thread::spawn(|| {
println!("in child thread");
})
thread.join();
println!("in parent thread");
```
This comes at a cost though: the waiting thread will [block] until the child is done. Wouldn't it be nice if we could just use the `.await` syntax here and leave the opportunity for another task to be scheduled while waiting?
## Backchannels
[threads]: TODO: wikipedia
[`task`]: TODO: docs link
[`spawn`]: TODO: docs link
[`JoinHandle`]: TODO: docs link
[schedules]: TODO: Glossary link
[block]: TODO: Link to blocking

View file

@ -1,171 +0,0 @@
# Examples
This directory contains example code that makes use of `async-std`, each of which can be run from the command line.
##### [Hello World][hello-world]
Spawns a task that says hello.
```
cargo run --example hello-world
```
##### [Line Count][line-count]
Counts the number of lines in a file given as an argument.
```shell
cargo run --example line-count -- ./Cargo.toml
```
##### [List Dir][list-dir]
Lists files in a directory given as an argument.
```shell
cargo run --example list-dir -- .
```
##### [Logging][logging]
Prints the runtime's execution log on the standard output.
```shell
cargo run --example logging
```
##### [Print File][print-file]
Prints a file given as an argument to stdout.
```shell
cargo run --example print-file ./Cargo.toml
```
##### [Socket Timeouts][socket-timeouts]
Prints response of GET request made to TCP server with 5 second socket timeout
```shell
cargo run --example socket-timeouts
```
##### [Stdin Echo][stdin-echo]
Echoes lines read on stdin to stdout.
```shell
cargo run --example stdin-echo
```
##### [Stdin Timeout][stdin-timeout]
Reads a line from stdin, or exits with an error if nothing is read in 5 seconds.
```shell
cargo run --example stdin-timeout
```
##### [Surf Web][surf-web]
Sends an HTTP request to the Rust website.
```shell
cargo run --example surf-web
```
##### [Task Local][task-local]
Creates a task-local value.
```shell
cargo run --example task-local
```
##### [Task Name][task-name]
Spawns a named task that prints its name.
```shell
cargo run --example task-name
```
##### [TCP Client][tcp-client]
Connects to Localhost over TCP.
First, start the echo server:
```shell
cargo run --example tcp-echo
```
Then run the client:
```shell
cargo run --example tcp-client
```
##### [TCP Echo][tcp-echo]
TCP echo server.
Start the echo server:
```shell
cargo run --example tcp-echo
```
Make requests by running the client example:
```shell
cargo run --example tcp-client
```
##### [UDP Client][udp-client]
Connects to Localhost over UDP.
First, start the echo server:
```shell
cargo run --example udp-echo
```
Then run the client:
```shell
cargo run --example udp-client
```
##### [UDP Echo][udp-echo]
UDP echo server.
Start the echo server:
```shell
cargo run --example udp-echo
```
Make requests by running the client example:
```shell
cargo run --example udp-client
```
[hello-world]: https://github.com/async-rs/async-std/blob/master/examples/hello-world.rs
[line-count]: https://github.com/async-rs/async-std/blob/master/examples/line-count.rs
[list-dir]: https://github.com/async-rs/async-std/blob/master/examples/list-dir.rs
[logging]: https://github.com/async-rs/async-std/blob/master/examples/logging.rs
[print-file]: https://github.com/async-rs/async-std/blob/master/examples/print-file.rs
[socket-timeouts]: https://github.com/async-rs/async-std/blob/master/examples/socket-timeouts.rs
[stdin-echo]: https://github.com/async-rs/async-std/blob/master/examples/stdin-echo.rs
[stdin-timeout]: https://github.com/async-rs/async-std/blob/master/examples/stdin-timeout.rs
[surf-web]: https://github.com/async-rs/async-std/blob/master/examples/surf-web.rs
[task-local]: https://github.com/async-rs/async-std/blob/master/examples/task-local.rs
[task-name]: https://github.com/async-rs/async-std/blob/master/examples/task-name.rs
[tcp-client]: https://github.com/async-rs/async-std/blob/master/examples/tcp-client.rs
[tcp-echo]: https://github.com/async-rs/async-std/blob/master/examples/tcp-echo.rs
[udp-client]: https://github.com/async-rs/async-std/blob/master/examples/udp-client.rs
[udp-echo]: https://github.com/async-rs/async-std/blob/master/examples/udp-echo.rs

View file

@ -1,45 +0,0 @@
use futures::select;
use futures::FutureExt;
use async_std::{
io::{stdin, BufReader},
net::{TcpStream, ToSocketAddrs},
prelude::*,
task,
};
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
pub(crate) fn main() -> Result<()> {
task::block_on(try_main("127.0.0.1:8080"))
}
async fn try_main(addr: impl ToSocketAddrs) -> Result<()> {
let stream = TcpStream::connect(addr).await?;
let (reader, mut writer) = (&stream, &stream);
let reader = BufReader::new(reader);
let mut lines_from_server = futures::StreamExt::fuse(reader.lines());
let stdin = BufReader::new(stdin());
let mut lines_from_stdin = futures::StreamExt::fuse(stdin.lines());
loop {
select! {
line = lines_from_server.next().fuse() => match line {
Some(line) => {
let line = line?;
println!("{}", line);
},
None => break,
},
line = lines_from_stdin.next().fuse() => match line {
Some(line) => {
let line = line?;
writer.write_all(line.as_bytes()).await?;
writer.write_all(b"\n").await?;
}
None => break,
}
}
}
Ok(())
}

View file

@ -1,13 +0,0 @@
mod client;
mod server;
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
fn main() -> Result<()> {
let mut args = std::env::args();
match (args.nth(1).as_ref().map(String::as_str), args.next()) {
(Some("client"), None) => client::main(),
(Some("server"), None) => server::main(),
_ => Err("Usage: a-chat [client|server]".into()),
}
}

View file

@ -1,184 +0,0 @@
use std::{
collections::hash_map::{Entry, HashMap},
sync::Arc,
};
use futures::{channel::mpsc, select, FutureExt, SinkExt};
use async_std::{
io::BufReader,
net::{TcpListener, TcpStream, ToSocketAddrs},
prelude::*,
task,
};
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
type Sender<T> = mpsc::UnboundedSender<T>;
type Receiver<T> = mpsc::UnboundedReceiver<T>;
#[derive(Debug)]
enum Void {}
pub(crate) fn main() -> Result<()> {
task::block_on(accept_loop("127.0.0.1:8080"))
}
async fn accept_loop(addr: impl ToSocketAddrs) -> Result<()> {
let listener = TcpListener::bind(addr).await?;
let (broker_sender, broker_receiver) = mpsc::unbounded();
let broker = task::spawn(broker_loop(broker_receiver));
let mut incoming = listener.incoming();
while let Some(stream) = incoming.next().await {
let stream = stream?;
println!("Accepting from: {}", stream.peer_addr()?);
spawn_and_log_error(connection_loop(broker_sender.clone(), stream));
}
drop(broker_sender);
broker.await;
Ok(())
}
async fn connection_loop(mut broker: Sender<Event>, stream: TcpStream) -> Result<()> {
let stream = Arc::new(stream);
let reader = BufReader::new(&*stream);
let mut lines = reader.lines();
let name = match lines.next().await {
None => return Err("peer disconnected immediately".into()),
Some(line) => line?,
};
let (_shutdown_sender, shutdown_receiver) = mpsc::unbounded::<Void>();
broker
.send(Event::NewPeer {
name: name.clone(),
stream: Arc::clone(&stream),
shutdown: shutdown_receiver,
})
.await
.unwrap();
while let Some(line) = lines.next().await {
let line = line?;
let (dest, msg) = match line.find(':') {
None => continue,
Some(idx) => (&line[..idx], line[idx + 1..].trim()),
};
let dest: Vec<String> = dest
.split(',')
.map(|name| name.trim().to_string())
.collect();
let msg: String = msg.trim().to_string();
broker
.send(Event::Message {
from: name.clone(),
to: dest,
msg,
})
.await
.unwrap();
}
Ok(())
}
async fn connection_writer_loop(
messages: &mut Receiver<String>,
stream: Arc<TcpStream>,
mut shutdown: Receiver<Void>,
) -> Result<()> {
let mut stream = &*stream;
loop {
select! {
msg = messages.next().fuse() => match msg {
Some(msg) => stream.write_all(msg.as_bytes()).await?,
None => break,
},
void = shutdown.next().fuse() => match void {
Some(void) => match void {},
None => break,
}
}
}
Ok(())
}
#[derive(Debug)]
enum Event {
NewPeer {
name: String,
stream: Arc<TcpStream>,
shutdown: Receiver<Void>,
},
Message {
from: String,
to: Vec<String>,
msg: String,
},
}
async fn broker_loop(mut events: Receiver<Event>) {
let (disconnect_sender, mut disconnect_receiver) =
mpsc::unbounded::<(String, Receiver<String>)>();
let mut peers: HashMap<String, Sender<String>> = HashMap::new();
loop {
let event = select! {
event = events.next().fuse() => match event {
None => break,
Some(event) => event,
},
disconnect = disconnect_receiver.next().fuse() => {
let (name, _pending_messages) = disconnect.unwrap();
assert!(peers.remove(&name).is_some());
continue;
},
};
match event {
Event::Message { from, to, msg } => {
for addr in to {
if let Some(peer) = peers.get_mut(&addr) {
let msg = format!("from {}: {}\n", from, msg);
peer.send(msg).await.unwrap();
}
}
}
Event::NewPeer {
name,
stream,
shutdown,
} => match peers.entry(name.clone()) {
Entry::Occupied(..) => (),
Entry::Vacant(entry) => {
let (client_sender, mut client_receiver) = mpsc::unbounded();
entry.insert(client_sender);
let mut disconnect_sender = disconnect_sender.clone();
spawn_and_log_error(async move {
let res =
connection_writer_loop(&mut client_receiver, stream, shutdown).await;
disconnect_sender
.send((name, client_receiver))
.await
.unwrap();
res
});
}
},
}
}
drop(peers);
drop(disconnect_sender);
while let Some((_name, _pending_messages)) = disconnect_receiver.next().await {}
}
fn spawn_and_log_error<F>(fut: F) -> task::JoinHandle<()>
where
F: Future<Output = Result<()>> + Send + 'static,
{
task::spawn(async move {
if let Err(e) = fut.await {
eprintln!("{}", e)
}
})
}

15
examples/fetch-html.rs Normal file
View file

@ -0,0 +1,15 @@
//! Fetches the HTML contents of the Rust website.
#![feature(async_await)]
use std::error::Error;
use async_std::task;
fn main() -> Result<(), Box<dyn Error + Send + Sync>> {
task::block_on(async {
// let contents = surf::get("https://www.rust-lang.org").recv_string().await?;
// println!("{}", contents);
Ok(())
})
}

View file

@ -1,5 +1,7 @@
//! Spawns a task that says hello.
#![feature(async_await)]
use async_std::task;
async fn say_hi() {

View file

@ -0,0 +1,73 @@
#![feature(async_await)]
use async_std::task;
use futures::channel::oneshot;
use std::{thread, time};
struct AsyncHandle<T> {
handle: thread::JoinHandle<T>,
notifier: oneshot::Receiver<()>,
}
impl<T> AsyncHandle<T> {
fn thread(&self) -> &std::thread::Thread {
self.handle.thread()
}
async fn join(self) -> std::thread::Result<T> {
// ignore results, the join handle will propagate panics
let _ = self.notifier.await;
self.handle.join()
}
}
fn spawn<F, T>(f: F) -> AsyncHandle<T>
where
F: FnOnce() -> T,
F: Send + 'static,
T: Send + 'static,
{
let (sender, receiver) = oneshot::channel::<()>();
let thread_handle = thread::spawn(move || {
let res = f();
sender.send(()).unwrap();
res
});
AsyncHandle {
handle: thread_handle,
notifier: receiver,
}
}
fn main() {
let sleepy_thread = spawn(move || {
thread::sleep(time::Duration::from_millis(1000));
String::from("Finished")
});
task::block_on(async move {
println!("waiting for sleepy thread");
let thread_result = sleepy_thread.join().await;
match thread_result {
Ok(s) => println!("Result: {}", s),
Err(e) => println!("Error: {:?}", e),
}
});
let panicing_thread = spawn(move || {
panic!("aaah!");
String::from("Finished!")
});
task::block_on(async move {
println!("waiting for panicking thread");
let thread_result = panicing_thread.join().await;
match thread_result {
Ok(s) => println!("Result: {}", s),
Err(e) => println!("Error: {:?}", e),
}
});
}

View file

@ -1,5 +1,7 @@
//! Counts the number of lines in a file given as an argument.
#![feature(async_await)]
use std::env::args;
use async_std::fs::File;

View file

@ -1,5 +1,7 @@
//! Lists files in a directory given as an argument.
#![feature(async_await)]
use std::env::args;
use async_std::fs;
@ -13,9 +15,8 @@ fn main() -> io::Result<()> {
task::block_on(async {
let mut dir = fs::read_dir(&path).await?;
while let Some(res) = dir.next().await {
let entry = res?;
println!("{}", entry.file_name().to_string_lossy());
while let Some(entry) = dir.next().await {
println!("{}", entry?.file_name().to_string_lossy());
}
Ok(())

View file

@ -1,5 +1,7 @@
//! Prints the runtime's execution log on the standard output.
#![feature(async_await)]
use async_std::task;
fn main() {

View file

@ -1,5 +1,7 @@
//! Prints a file given as an argument to stdout.
#![feature(async_await)]
use std::env::args;
use async_std::fs::File;
@ -7,7 +9,7 @@ use async_std::io;
use async_std::prelude::*;
use async_std::task;
const LEN: usize = 16 * 1024; // 16 Kb
const LEN: usize = 4 * 1024 * 1024; // 4 Mb
fn main() -> io::Result<()> {
let path = args().nth(1).expect("missing path argument");

View file

@ -1,28 +0,0 @@
//! Prints response of GET request made to TCP server with 5 second socket timeout
use std::time::Duration;
use async_std::{io, net::TcpStream, prelude::*, task};
async fn get() -> io::Result<Vec<u8>> {
let mut stream = TcpStream::connect("example.com:80").await?;
stream
.write_all(b"GET /index.html HTTP/1.0\r\n\r\n")
.await?;
let mut buf = vec![];
io::timeout(Duration::from_secs(5), async move {
stream.read_to_end(&mut buf).await?;
Ok(buf)
})
.await
}
fn main() {
task::block_on(async {
let raw_response = get().await.expect("request");
let response = String::from_utf8(raw_response).expect("utf8 conversion");
println!("received: {}", response);
});
}

View file

@ -1,5 +1,7 @@
//! Echoes lines read on stdin to stdout.
#![feature(async_await)]
use async_std::io;
use async_std::prelude::*;
use async_std::task;

View file

@ -1,20 +1,30 @@
//! Reads a line from stdin, or exits with an error if nothing is read in 5 seconds.
#![feature(async_await)]
use std::time::Duration;
use async_std::io;
use async_std::prelude::*;
use async_std::task;
fn main() -> io::Result<()> {
// This async scope times out after 5 seconds.
task::block_on(io::timeout(Duration::from_secs(5), async {
task::block_on(async {
let stdin = io::stdin();
// Read a line from the standard input and display it.
let mut line = String::new();
stdin.read_line(&mut line).await?;
dbg!(line);
match stdin
.read_line(&mut line)
.timeout(Duration::from_secs(5))
.await
{
Ok(res) => {
res?;
print!("Got line: {}", line);
}
Err(_) => println!("You have only 5 seconds to enter a line. Try again :)"),
}
Ok(())
}))
})
}

View file

@ -1,17 +0,0 @@
use async_std::task;
fn main() -> Result<(), surf::Exception> {
task::block_on(async {
let url = "https://www.rust-lang.org";
let mut response = surf::get(url).await?;
let body = response.body_string().await?;
dbg!(url);
dbg!(response.status());
dbg!(response.version());
dbg!(response.headers());
dbg!(body.len());
Ok(())
})
}

View file

@ -1,5 +1,7 @@
//! Creates a task-local value.
#![feature(async_await)]
use std::cell::Cell;
use async_std::prelude::*;

View file

@ -1,5 +1,7 @@
//! Spawns a named task that prints its name.
#![feature(async_await)]
use async_std::task;
async fn print_name() {

View file

@ -12,6 +12,8 @@
//! $ cargo run --example tcp-client
//! ```
#![feature(async_await)]
use async_std::io;
use async_std::net::TcpStream;
use async_std::prelude::*;

View file

@ -6,6 +6,8 @@
//! $ nc localhost 8080
//! ```
#![feature(async_await)]
use async_std::io;
use async_std::net::{TcpListener, TcpStream};
use async_std::prelude::*;
@ -14,9 +16,8 @@ use async_std::task;
async fn process(stream: TcpStream) -> io::Result<()> {
println!("Accepted from: {}", stream.peer_addr()?);
let mut reader = stream.clone();
let mut writer = stream;
io::copy(&mut reader, &mut writer).await?;
let (reader, writer) = &mut (&stream, &stream);
io::copy(reader, writer).await?;
Ok(())
}

View file

@ -1,45 +0,0 @@
//! TCP echo server, accepting connections both on both ipv4 and ipv6 sockets.
//!
//! To send messages, do:
//!
//! ```sh
//! $ nc 127.0.0.1 8080
//! $ nc ::1 8080
//! ```
use async_std::io;
use async_std::net::{TcpListener, TcpStream};
use async_std::prelude::*;
use async_std::task;
async fn process(stream: TcpStream) -> io::Result<()> {
println!("Accepted from: {}", stream.peer_addr()?);
let mut reader = stream.clone();
let mut writer = stream;
io::copy(&mut reader, &mut writer).await?;
Ok(())
}
fn main() -> io::Result<()> {
task::block_on(async {
let ipv4_listener = TcpListener::bind("127.0.0.1:8080").await?;
println!("Listening on {}", ipv4_listener.local_addr()?);
let ipv6_listener = TcpListener::bind("[::1]:8080").await?;
println!("Listening on {}", ipv6_listener.local_addr()?);
let ipv4_incoming = ipv4_listener.incoming();
let ipv6_incoming = ipv6_listener.incoming();
let mut incoming = ipv4_incoming.merge(ipv6_incoming);
while let Some(stream) = incoming.next().await {
let stream = stream?;
task::spawn(async {
process(stream).await.unwrap();
});
}
Ok(())
})
}

View file

@ -12,6 +12,8 @@
//! $ cargo run --example udp-client
//! ```
#![feature(async_await)]
use async_std::io;
use async_std::net::UdpSocket;
use async_std::task;

View file

@ -6,6 +6,8 @@
//! $ nc -u localhost 8080
//! ```
#![feature(async_await)]
use async_std::io;
use async_std::net::UdpSocket;
use async_std::task;

View file

@ -1,21 +0,0 @@
use std::collections::BinaryHeap;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
impl<T: Ord + Send> stream::Extend<T> for BinaryHeap<T> {
fn extend<'a, S: IntoStream<Item = T> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
let stream = stream.into_stream();
self.reserve(stream.size_hint().0);
Box::pin(stream.for_each(move |item| self.push(item)))
}
}

View file

@ -1,23 +0,0 @@
use std::collections::BinaryHeap;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
impl<T: Ord + Send> FromStream<T> for BinaryHeap<T> {
#[inline]
fn from_stream<'a, S: IntoStream<Item = T> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
let stream = stream.into_stream();
Box::pin(async move {
let mut out = BinaryHeap::new();
stream::extend(&mut out, stream).await;
out
})
}
}

View file

@ -1,7 +0,0 @@
//! The Rust priority queue implemented with a binary heap
mod extend;
mod from_stream;
#[doc(inline)]
pub use std::collections::BinaryHeap;

View file

@ -1,19 +0,0 @@
use std::collections::BTreeMap;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
impl<K: Ord + Send, V: Send> stream::Extend<(K, V)> for BTreeMap<K, V> {
fn extend<'a, S: IntoStream<Item = (K, V)> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
Box::pin(stream.into_stream().for_each(move |(k, v)| {
self.insert(k, v);
}))
}
}

View file

@ -1,23 +0,0 @@
use std::collections::BTreeMap;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
impl<K: Ord + Send, V: Send> FromStream<(K, V)> for BTreeMap<K, V> {
#[inline]
fn from_stream<'a, S: IntoStream<Item = (K, V)> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
let stream = stream.into_stream();
Box::pin(async move {
let mut out = BTreeMap::new();
stream::extend(&mut out, stream).await;
out
})
}
}

View file

@ -1,7 +0,0 @@
//! The Rust B-Tree Map
mod extend;
mod from_stream;
#[doc(inline)]
pub use std::collections::BTreeMap;

View file

@ -1,19 +0,0 @@
use std::collections::BTreeSet;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
impl<T: Ord + Send> stream::Extend<T> for BTreeSet<T> {
fn extend<'a, S: IntoStream<Item = T> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
Box::pin(stream.into_stream().for_each(move |item| {
self.insert(item);
}))
}
}

View file

@ -1,23 +0,0 @@
use std::collections::BTreeSet;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
impl<T: Ord + Send> FromStream<T> for BTreeSet<T> {
#[inline]
fn from_stream<'a, S: IntoStream<Item = T> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
let stream = stream.into_stream();
Box::pin(async move {
let mut out = BTreeSet::new();
stream::extend(&mut out, stream).await;
out
})
}
}

View file

@ -1,7 +0,0 @@
//! The Rust B-Tree Set
mod extend;
mod from_stream;
#[doc(inline)]
pub use std::collections::BTreeSet;

View file

@ -1,41 +0,0 @@
use std::collections::HashMap;
use std::hash::{BuildHasher, Hash};
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
impl<K, V, H> stream::Extend<(K, V)> for HashMap<K, V, H>
where
K: Eq + Hash + Send,
V: Send,
H: BuildHasher + Default + Send,
{
fn extend<'a, S: IntoStream<Item = (K, V)> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
let stream = stream.into_stream();
// The following is adapted from the hashbrown source code:
// https://github.com/rust-lang/hashbrown/blob/d1ad4fc3aae2ade446738eea512e50b9e863dd0c/src/map.rs#L2470-L2491
//
// Keys may be already present or show multiple times in the stream. Reserve the entire
// hint lower bound if the map is empty. Otherwise reserve half the hint (rounded up), so
// the map will only resize twice in the worst case.
let additional = if self.is_empty() {
stream.size_hint().0
} else {
(stream.size_hint().0 + 1) / 2
};
self.reserve(additional);
Box::pin(stream.for_each(move |(k, v)| {
self.insert(k, v);
}))
}
}

View file

@ -1,29 +0,0 @@
use std::collections::HashMap;
use std::hash::{BuildHasher, Hash};
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
impl<K, V, H> FromStream<(K, V)> for HashMap<K, V, H>
where
K: Eq + Hash + Send,
H: BuildHasher + Default + Send,
V: Send,
{
#[inline]
fn from_stream<'a, S: IntoStream<Item = (K, V)> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
let stream = stream.into_stream();
Box::pin(async move {
let mut out = HashMap::with_hasher(Default::default());
stream::extend(&mut out, stream).await;
out
})
}
}

View file

@ -1,7 +0,0 @@
//! The Rust hash map, implemented with quadratic probing and SIMD lookup.
mod extend;
mod from_stream;
#[doc(inline)]
pub use std::collections::HashMap;

View file

@ -1,43 +0,0 @@
use std::collections::HashSet;
use std::hash::{BuildHasher, Hash};
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
impl<T, H> stream::Extend<T> for HashSet<T, H>
where
T: Eq + Hash + Send,
H: BuildHasher + Default + Send,
{
fn extend<'a, S: IntoStream<Item = T> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
// The Extend impl for HashSet in the standard library delegates to the internal HashMap.
// Thus, this impl is just a copy of the async Extend impl for HashMap in this crate.
let stream = stream.into_stream();
// The following is adapted from the hashbrown source code:
// https://github.com/rust-lang/hashbrown/blob/d1ad4fc3aae2ade446738eea512e50b9e863dd0c/src/map.rs#L2470-L2491
//
// Keys may be already present or show multiple times in the stream. Reserve the entire
// hint lower bound if the map is empty. Otherwise reserve half the hint (rounded up), so
// the map will only resize twice in the worst case.
let additional = if self.is_empty() {
stream.size_hint().0
} else {
(stream.size_hint().0 + 1) / 2
};
self.reserve(additional);
Box::pin(stream.for_each(move |item| {
self.insert(item);
}))
}
}

View file

@ -1,28 +0,0 @@
use std::collections::HashSet;
use std::hash::{BuildHasher, Hash};
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
impl<T, H> FromStream<T> for HashSet<T, H>
where
T: Eq + Hash + Send,
H: BuildHasher + Default + Send,
{
#[inline]
fn from_stream<'a, S: IntoStream<Item = T> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
let stream = stream.into_stream();
Box::pin(async move {
let mut out = HashSet::with_hasher(Default::default());
stream::extend(&mut out, stream).await;
out
})
}
}

View file

@ -1,7 +0,0 @@
//! The Rust hash set, implemented as a `HashMap` where the value is `()`.
mod extend;
mod from_stream;
#[doc(inline)]
pub use std::collections::HashSet;

View file

@ -1,18 +0,0 @@
use std::collections::LinkedList;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
impl<T: Send> stream::Extend<T> for LinkedList<T> {
fn extend<'a, S: IntoStream<Item = T> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
let stream = stream.into_stream();
Box::pin(stream.for_each(move |item| self.push_back(item)))
}
}

View file

@ -1,23 +0,0 @@
use std::collections::LinkedList;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
impl<T: Send> FromStream<T> for LinkedList<T> {
#[inline]
fn from_stream<'a, S: IntoStream<Item = T> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
let stream = stream.into_stream();
Box::pin(async move {
let mut out = LinkedList::new();
stream::extend(&mut out, stream).await;
out
})
}
}

View file

@ -1,7 +0,0 @@
//! The Rust doubly-linked list with owned nodes
mod extend;
mod from_stream;
#[doc(inline)]
pub use std::collections::LinkedList;

View file

@ -1,20 +0,0 @@
//! The Rust standard collections
//!
//! This library provides efficient implementations of the most common general purpose programming
//! data structures.
pub mod binary_heap;
pub mod btree_map;
pub mod btree_set;
pub mod hash_map;
pub mod hash_set;
pub mod linked_list;
pub mod vec_deque;
pub use binary_heap::BinaryHeap;
pub use btree_map::BTreeMap;
pub use btree_set::BTreeSet;
pub use hash_map::HashMap;
pub use hash_set::HashSet;
pub use linked_list::LinkedList;
pub use vec_deque::VecDeque;

View file

@ -1,21 +0,0 @@
use std::collections::VecDeque;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, IntoStream};
impl<T: Send> stream::Extend<T> for VecDeque<T> {
fn extend<'a, S: IntoStream<Item = T> + 'a>(
&'a mut self,
stream: S,
) -> Pin<Box<dyn Future<Output = ()> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
let stream = stream.into_stream();
self.reserve(stream.size_hint().0);
Box::pin(stream.for_each(move |item| self.push_back(item)))
}
}

View file

@ -1,23 +0,0 @@
use std::collections::VecDeque;
use std::pin::Pin;
use crate::prelude::*;
use crate::stream::{self, FromStream, IntoStream};
impl<T: Send> FromStream<T> for VecDeque<T> {
#[inline]
fn from_stream<'a, S: IntoStream<Item = T> + 'a>(
stream: S,
) -> Pin<Box<dyn Future<Output = Self> + 'a + Send>>
where
<S as IntoStream>::IntoStream: Send,
{
let stream = stream.into_stream();
Box::pin(async move {
let mut out = VecDeque::new();
stream::extend(&mut out, stream).await;
out
})
}
}

View file

@ -1,7 +0,0 @@
//! The Rust double-ended queue, implemented with a growable ring buffer.
mod extend;
mod from_stream;
#[doc(inline)]
pub use std::collections::VecDeque;

View file

@ -1,7 +1,8 @@
use std::fs;
use std::path::{Path, PathBuf};
use crate::io;
use crate::path::{Path, PathBuf};
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use crate::task::blocking;
/// Returns the canonical form of a path.
///
@ -14,15 +15,15 @@ use crate::utils::Context as _;
///
/// # Errors
///
/// An error will be returned in the following situations:
/// An error will be returned in the following situations (not an exhaustive list):
///
/// * `path` does not point to an existing file or directory.
/// * A non-final component in `path` is not a directory.
/// * Some other I/O error occurred.
/// * `path` does not exist.
/// * A non-final component in path is not a directory.
///
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
@ -33,10 +34,5 @@ use crate::utils::Context as _;
/// ```
pub async fn canonicalize<P: AsRef<Path>>(path: P) -> io::Result<PathBuf> {
let path = path.as_ref().to_owned();
spawn_blocking(move || {
std::fs::canonicalize(&path)
.map(Into::into)
.context(|| format!("could not canonicalize `{}`", path.display()))
})
.await
blocking::spawn(async move { fs::canonicalize(path) }).await
}

View file

@ -1,50 +1,43 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use std::fs;
use std::path::Path;
/// Copies the contents and permissions of a file to a new location.
use crate::io;
use crate::task::blocking;
/// Copies the contents and permissions of one file to another.
///
/// On success, the total number of bytes copied is returned and equals the length of the `to` file
/// after this operation.
/// On success, the total number of bytes copied is returned and equals the length of the `from`
/// file.
///
/// The old contents of `to` will be overwritten. If `from` and `to` both point to the same file,
/// then the file will likely get truncated as a result of this operation.
///
/// If you're working with open [`File`]s and want to copy contents through those types, use the
/// [`io::copy`] function.
/// then the file will likely get truncated by this operation.
///
/// This function is an async version of [`std::fs::copy`].
///
/// [`File`]: struct.File.html
/// [`io::copy`]: ../io/fn.copy.html
/// [`std::fs::copy`]: https://doc.rust-lang.org/std/fs/fn.copy.html
///
/// # Errors
///
/// An error will be returned in the following situations:
/// An error will be returned in the following situations (not an exhaustive list):
///
/// * `from` does not point to an existing file.
/// * The current process lacks permissions to read `from` or write `to`.
/// * Some other I/O error occurred.
/// * The `from` path is not a file.
/// * The `from` file does not exist.
/// * The current process lacks permissions to access `from` or write `to`.
///
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// let num_bytes = fs::copy("a.txt", "b.txt").await?;
/// let bytes_copied = fs::copy("a.txt", "b.txt").await?;
/// #
/// # Ok(()) }) }
/// ```
pub async fn copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<u64> {
let from = from.as_ref().to_owned();
let to = to.as_ref().to_owned();
spawn_blocking(move || {
std::fs::copy(&from, &to)
.context(|| format!("could not copy `{}` to `{}`", from.display(), to.display()))
})
.await
blocking::spawn(async move { fs::copy(&from, &to) }).await
}

View file

@ -1,43 +1,36 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use std::fs;
use std::path::Path;
/// Creates a new directory.
///
/// Note that this function will only create the final directory in `path`. If you want to create
/// all of its missing parent directories too, use the [`create_dir_all`] function instead.
use crate::io;
use crate::task::blocking;
/// Creates a new, empty directory.
///
/// This function is an async version of [`std::fs::create_dir`].
///
/// [`create_dir_all`]: fn.create_dir_all.html
/// [`std::fs::create_dir`]: https://doc.rust-lang.org/std/fs/fn.create_dir.html
///
/// # Errors
///
/// An error will be returned in the following situations:
/// An error will be returned in the following situations (not an exhaustive list):
///
/// * `path` already points to an existing file or directory.
/// * A parent directory in `path` does not exist.
/// * The current process lacks permissions to create the directory.
/// * Some other I/O error occurred.
/// * `path` already exists.
/// * A parent of the given path does not exist.
/// * The current process lacks permissions to create directory at `path`.
///
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// fs::create_dir("./some/directory").await?;
/// fs::create_dir("./some/dir").await?;
/// #
/// # Ok(()) }) }
/// ```
pub async fn create_dir<P: AsRef<Path>>(path: P) -> io::Result<()> {
let path = path.as_ref().to_owned();
spawn_blocking(move || {
std::fs::create_dir(&path)
.context(|| format!("could not create directory `{}`", path.display()))
})
.await
blocking::spawn(async move { fs::create_dir(path) }).await
}

View file

@ -1,9 +1,10 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use std::fs;
use std::path::{Path, PathBuf};
/// Creates a new directory and all of its parents if they are missing.
use crate::task::blocking;
use crate::io;
/// Creates a new, empty directory and all of its parents if they are missing.
///
/// This function is an async version of [`std::fs::create_dir_all`].
///
@ -11,28 +12,24 @@ use crate::utils::Context as _;
///
/// # Errors
///
/// An error will be returned in the following situations:
/// An error will be returned in the following situations (not an exhaustive list):
///
/// * `path` already points to an existing file or directory.
/// * The current process lacks permissions to create the directory or its missing parents.
/// * Some other I/O error occurred.
/// * The parent directories do not exists and couldn't be created.
/// * The current process lacks permissions to create directory at `path`.
///
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// fs::create_dir_all("./some/directory").await?;
/// fs::create_dir_all("./some/dir").await?;
/// #
/// # Ok(()) }) }
/// ```
pub async fn create_dir_all<P: AsRef<Path>>(path: P) -> io::Result<()> {
let path = path.as_ref().to_owned();
spawn_blocking(move || {
std::fs::create_dir_all(&path)
.context(|| format!("could not create directory path `{}`", path.display()))
})
.await
blocking::spawn(async move { fs::create_dir_all(path) }).await
}

View file

@ -1,31 +1,27 @@
use std::future::Future;
use std::fs;
use std::path::Path;
use cfg_if::cfg_if;
use crate::future::Future;
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::task::blocking;
/// A builder for creating directories with configurable options.
///
/// For Unix-specific options, import the [`os::unix::fs::DirBuilderExt`] trait.
/// A builder for creating directories in various manners.
///
/// This type is an async version of [`std::fs::DirBuilder`].
///
/// [`os::unix::fs::DirBuilderExt`]: ../os/unix/fs/trait.DirBuilderExt.html
/// [`std::fs::DirBuilder`]: https://doc.rust-lang.org/std/fs/struct.DirBuilder.html
#[derive(Debug, Default)]
#[derive(Debug)]
pub struct DirBuilder {
/// Set to `true` if non-existent parent directories should be created.
recursive: bool,
/// Unix mode for newly created directories.
#[cfg(unix)]
mode: Option<u32>,
}
impl DirBuilder {
/// Creates a blank set of options.
///
/// The [`recursive`] option is initially set to `false`.
/// Creates a new builder with [`recursive`] set to `false`.
///
/// [`recursive`]: #method.recursive
///
@ -37,24 +33,25 @@ impl DirBuilder {
/// let builder = DirBuilder::new();
/// ```
pub fn new() -> DirBuilder {
#[cfg(not(unix))]
let builder = DirBuilder { recursive: false };
#[cfg(unix)]
let builder = DirBuilder {
recursive: false,
mode: None,
};
#[cfg(windows)]
let builder = DirBuilder { recursive: false };
builder
}
/// Sets the option for recursive mode.
///
/// When set to `true`, this option means all parent directories should be created recursively
/// if they don't exist. Parents are created with the same permissions as the final directory.
/// This option, when `true`, means that all parent directories should be created recursively
/// if they don't exist. Parents are created with the same security settings and permissions as
/// the final directory.
///
/// This option is initially set to `false`.
/// This option defaults to `false`.
///
/// # Examples
///
@ -73,30 +70,23 @@ impl DirBuilder {
///
/// It is considered an error if the directory already exists unless recursive mode is enabled.
///
/// # Errors
///
/// An error will be returned in the following situations:
///
/// * `path` already points to an existing file or directory.
/// * The current process lacks permissions to create the directory or its missing parents.
/// * Some other I/O error occurred.
///
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs::DirBuilder;
///
/// DirBuilder::new()
/// .recursive(true)
/// .create("./some/directory")
/// .create("/tmp/foo/bar/baz")
/// .await?;
/// #
/// # Ok(()) }) }
/// ```
pub fn create<P: AsRef<Path>>(&self, path: P) -> impl Future<Output = io::Result<()>> {
let mut builder = std::fs::DirBuilder::new();
let mut builder = fs::DirBuilder::new();
builder.recursive(self.recursive);
#[cfg(unix)]
@ -107,17 +97,26 @@ impl DirBuilder {
}
let path = path.as_ref().to_owned();
async move { spawn_blocking(move || builder.create(path)).await }
async move { blocking::spawn(async move { builder.create(path) }).await }
}
}
cfg_unix! {
use crate::os::unix::fs::DirBuilderExt;
cfg_if! {
if #[cfg(feature = "docs")] {
use crate::os::unix::fs::DirBuilderExt;
} else if #[cfg(unix)] {
use std::os::unix::fs::DirBuilderExt;
}
}
impl DirBuilderExt for DirBuilder {
fn mode(&mut self, mode: u32) -> &mut Self {
self.mode = Some(mode);
self
#[cfg_attr(feature = "docs", doc(cfg(unix)))]
cfg_if! {
if #[cfg(any(unix, feature = "docs"))] {
impl DirBuilderExt for DirBuilder {
fn mode(&mut self, mode: u32) -> &mut Self {
self.mode = Some(mode);
self
}
}
}
}

View file

@ -1,26 +1,67 @@
use std::ffi::OsString;
use std::fmt;
use std::sync::Arc;
use std::fs;
use std::path::PathBuf;
use std::pin::Pin;
use std::sync::Mutex;
use crate::fs::{FileType, Metadata};
use cfg_if::cfg_if;
use futures::future::{self, FutureExt, TryFutureExt};
use crate::future::Future;
use crate::io;
use crate::path::PathBuf;
use crate::task::spawn_blocking;
use crate::task::{blocking, Poll};
/// An entry in a directory.
/// An entry inside a directory.
///
/// A stream of entries in a directory is returned by [`read_dir`].
/// An instance of `DirEntry` represents an entry inside a directory on the filesystem. Each entry
/// carriers additional information like the full path or metadata.
///
/// This type is an async version of [`std::fs::DirEntry`].
///
/// [`read_dir`]: fn.read_dir.html
/// [`std::fs::DirEntry`]: https://doc.rust-lang.org/std/fs/struct.DirEntry.html
pub struct DirEntry(Arc<std::fs::DirEntry>);
#[derive(Debug)]
pub struct DirEntry {
/// The state of the entry.
state: Mutex<State>,
/// The full path to the entry.
path: PathBuf,
#[cfg(unix)]
ino: u64,
/// The bare name of the entry without the leading path.
file_name: OsString,
}
/// The state of an asynchronous `DirEntry`.
///
/// The `DirEntry` can be either idle or busy performing an asynchronous operation.
#[derive(Debug)]
enum State {
Idle(Option<fs::DirEntry>),
Busy(blocking::JoinHandle<State>),
}
impl DirEntry {
/// Creates an asynchronous `DirEntry` from a synchronous one.
pub(crate) fn new(inner: std::fs::DirEntry) -> DirEntry {
DirEntry(Arc::new(inner))
/// Creates an asynchronous `DirEntry` from a synchronous handle.
pub(crate) fn new(inner: fs::DirEntry) -> DirEntry {
#[cfg(unix)]
let dir_entry = DirEntry {
path: inner.path(),
file_name: inner.file_name(),
ino: inner.ino(),
state: Mutex::new(State::Idle(Some(inner))),
};
#[cfg(windows)]
let dir_entry = DirEntry {
path: inner.path(),
file_name: inner.file_name(),
state: Mutex::new(State::Idle(Some(inner))),
};
dir_entry
}
/// Returns the full path to this entry.
@ -33,6 +74,7 @@ impl DirEntry {
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
@ -40,37 +82,25 @@ impl DirEntry {
///
/// let mut dir = fs::read_dir(".").await?;
///
/// while let Some(res) = dir.next().await {
/// let entry = res?;
/// while let Some(entry) = dir.next().await {
/// let entry = entry?;
/// println!("{:?}", entry.path());
/// }
/// #
/// # Ok(()) }) }
/// ```
pub fn path(&self) -> PathBuf {
self.0.path().into()
self.path.clone()
}
/// Reads the metadata for this entry.
/// Returns the metadata for this entry.
///
/// This function will traverse symbolic links to read the metadata.
///
/// If you want to read metadata without following symbolic links, use [`symlink_metadata`]
/// instead.
///
/// [`symlink_metadata`]: fn.symlink_metadata.html
///
/// # Errors
///
/// An error will be returned in the following situations:
///
/// * This entry does not point to an existing file or directory anymore.
/// * The current process lacks permissions to read the metadata.
/// * Some other I/O error occurred.
/// This function will not traverse symlinks if this entry points at a symlink.
///
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
@ -78,37 +108,53 @@ impl DirEntry {
///
/// let mut dir = fs::read_dir(".").await?;
///
/// while let Some(res) = dir.next().await {
/// let entry = res?;
/// while let Some(entry) = dir.next().await {
/// let entry = entry?;
/// println!("{:?}", entry.metadata().await?);
/// }
/// #
/// # Ok(()) }) }
/// ```
pub async fn metadata(&self) -> io::Result<Metadata> {
let inner = self.0.clone();
spawn_blocking(move || inner.metadata()).await
pub async fn metadata(&self) -> io::Result<fs::Metadata> {
future::poll_fn(|cx| {
let state = &mut *self.state.lock().unwrap();
loop {
match state {
State::Idle(opt) => match opt.take() {
None => return Poll::Ready(None),
Some(inner) => {
let (s, r) = futures::channel::oneshot::channel();
// Start the operation asynchronously.
*state = State::Busy(blocking::spawn(async move {
let res = inner.metadata();
let _ = s.send(res);
State::Idle(Some(inner))
}));
return Poll::Ready(Some(r));
}
},
// Poll the asynchronous operation the file is currently blocked on.
State::Busy(task) => *state = futures::ready!(Pin::new(task).poll(cx)),
}
}
})
.map(|opt| opt.ok_or_else(|| io_error("invalid state")))
.await?
.map_err(|_| io_error("blocking task failed"))
.await?
}
/// Reads the file type for this entry.
/// Returns the file type for this entry.
///
/// This function will not traverse symbolic links if this entry points at one.
///
/// If you want to read metadata with following symbolic links, use [`metadata`] instead.
///
/// [`metadata`]: #method.metadata
///
/// # Errors
///
/// An error will be returned in the following situations:
///
/// * This entry does not point to an existing file or directory anymore.
/// * The current process lacks permissions to read this entry's metadata.
/// * Some other I/O error occurred.
/// This function will not traverse symlinks if this entry points at a symlink.
///
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
@ -116,16 +162,43 @@ impl DirEntry {
///
/// let mut dir = fs::read_dir(".").await?;
///
/// while let Some(res) = dir.next().await {
/// let entry = res?;
/// while let Some(entry) = dir.next().await {
/// let entry = entry?;
/// println!("{:?}", entry.file_type().await?);
/// }
/// #
/// # Ok(()) }) }
/// ```
pub async fn file_type(&self) -> io::Result<FileType> {
let inner = self.0.clone();
spawn_blocking(move || inner.file_type()).await
pub async fn file_type(&self) -> io::Result<fs::FileType> {
future::poll_fn(|cx| {
let state = &mut *self.state.lock().unwrap();
loop {
match state {
State::Idle(opt) => match opt.take() {
None => return Poll::Ready(None),
Some(inner) => {
let (s, r) = futures::channel::oneshot::channel();
// Start the operation asynchronously.
*state = State::Busy(blocking::spawn(async move {
let res = inner.file_type();
let _ = s.send(res);
State::Idle(Some(inner))
}));
return Poll::Ready(Some(r));
}
},
// Poll the asynchronous operation the file is currently blocked on.
State::Busy(task) => *state = futures::ready!(Pin::new(task).poll(cx)),
}
}
})
.map(|opt| opt.ok_or_else(|| io_error("invalid state")))
.await?
.map_err(|_| io_error("blocking task failed"))
.await?
}
/// Returns the bare name of this entry without the leading path.
@ -133,6 +206,7 @@ impl DirEntry {
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
@ -140,36 +214,38 @@ impl DirEntry {
///
/// let mut dir = fs::read_dir(".").await?;
///
/// while let Some(res) = dir.next().await {
/// let entry = res?;
/// println!("{}", entry.file_name().to_string_lossy());
/// while let Some(entry) = dir.next().await {
/// let entry = entry?;
/// println!("{:?}", entry.file_name());
/// }
/// #
/// # Ok(()) }) }
/// ```
pub fn file_name(&self) -> OsString {
self.0.file_name()
self.file_name.clone()
}
}
impl fmt::Debug for DirEntry {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("DirEntry").field(&self.path()).finish()
/// Creates a custom `io::Error` with an arbitrary error type.
fn io_error(err: impl Into<Box<dyn std::error::Error + Send + Sync>>) -> io::Error {
io::Error::new(io::ErrorKind::Other, err)
}
cfg_if! {
if #[cfg(feature = "docs")] {
use crate::os::unix::fs::DirEntryExt;
} else if #[cfg(unix)] {
use std::os::unix::fs::DirEntryExt;
}
}
impl Clone for DirEntry {
fn clone(&self) -> Self {
DirEntry(self.0.clone())
}
}
cfg_unix! {
use crate::os::unix::fs::DirEntryExt;
impl DirEntryExt for DirEntry {
fn ino(&self) -> u64 {
self.0.ino()
#[cfg_attr(feature = "docs", doc(cfg(unix)))]
cfg_if! {
if #[cfg(any(unix, feature = "docs"))] {
impl DirEntryExt for DirEntry {
fn ino(&self) -> u64 {
self.ino
}
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,84 +0,0 @@
cfg_not_docs! {
pub use std::fs::FileType;
}
cfg_docs! {
/// The type of a file or directory.
///
/// A file type is returned by [`Metadata::file_type`].
///
/// Note that file types are mutually exclusive, i.e. at most one of methods [`is_dir`],
/// [`is_file`], and [`is_symlink`] can return `true`.
///
/// This type is a re-export of [`std::fs::FileType`].
///
/// [`Metadata::file_type`]: struct.Metadata.html#method.file_type
/// [`is_dir`]: #method.is_dir
/// [`is_file`]: #method.is_file
/// [`is_symlink`]: #method.is_symlink
/// [`std::fs::FileType`]: https://doc.rust-lang.org/std/fs/struct.FileType.html
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct FileType {
_private: (),
}
impl FileType {
/// Returns `true` if this file type represents a regular directory.
///
/// If this file type represents a symbolic link, this method returns `false`.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// let file_type = fs::metadata(".").await?.file_type();
/// println!("{:?}", file_type.is_dir());
/// #
/// # Ok(()) }) }
/// ```
pub fn is_dir(&self) -> bool {
unreachable!("this impl only appears in the rendered docs")
}
/// Returns `true` if this file type represents a regular file.
///
/// If this file type represents a symbolic link, this method returns `false`.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// let file_type = fs::metadata("a.txt").await?.file_type();
/// println!("{:?}", file_type.is_file());
/// #
/// # Ok(()) }) }
/// ```
pub fn is_file(&self) -> bool {
unreachable!("this impl only appears in the rendered docs")
}
/// Returns `true` if this file type represents a symbolic link.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// let file_type = fs::metadata("a.txt").await?.file_type();
/// println!("{:?}", file_type.is_symlink());
/// #
/// # Ok(()) }) }
/// ```
pub fn is_symlink(&self) -> bool {
unreachable!("this impl only appears in the rendered docs")
}
}
}

View file

@ -1,12 +1,13 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
use std::fs;
use std::path::Path;
/// Creates a hard link on the filesystem.
use crate::io;
use crate::task::blocking;
/// Creates a new hard link on the filesystem.
///
/// The `dst` path will be a link pointing to the `src` path. Note that operating systems often
/// require these two paths to be located on the same filesystem.
/// The `dst` path will be a link pointing to the `src` path. Note that systems often require these
/// two paths to both be located on the same filesystem.
///
/// This function is an async version of [`std::fs::hard_link`].
///
@ -14,14 +15,14 @@ use crate::utils::Context as _;
///
/// # Errors
///
/// An error will be returned in the following situations:
/// An error will be returned in the following situations (not an exhaustive list):
///
/// * `src` does not point to an existing file.
/// * Some other I/O error occurred.
/// * The `src` path is not a file or doesn't exist.
///
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
@ -33,14 +34,5 @@ use crate::utils::Context as _;
pub async fn hard_link<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<()> {
let from = from.as_ref().to_owned();
let to = to.as_ref().to_owned();
spawn_blocking(move || {
std::fs::hard_link(&from, &to).context(|| {
format!(
"could not create a hard link from `{}` to `{}`",
from.display(),
to.display()
)
})
})
.await
blocking::spawn(async move { fs::hard_link(&from, &to) }).await
}

View file

@ -1,29 +1,28 @@
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use std::fs::{self, Metadata};
use std::path::Path;
/// Reads metadata for a path.
use crate::io;
use crate::task::blocking;
/// Queries the metadata for a path.
///
/// This function will traverse symbolic links to read metadata for the target file or directory.
/// If you want to read metadata without following symbolic links, use [`symlink_metadata`]
/// instead.
/// This function will traverse symbolic links to query information about the file or directory.
///
/// This function is an async version of [`std::fs::metadata`].
///
/// [`symlink_metadata`]: fn.symlink_metadata.html
/// [`std::fs::metadata`]: https://doc.rust-lang.org/std/fs/fn.metadata.html
///
/// # Errors
///
/// An error will be returned in the following situations:
/// An error will be returned in the following situations (not an exhaustive list):
///
/// * `path` does not point to an existing file or directory.
/// * The current process lacks permissions to read metadata for the path.
/// * Some other I/O error occurred.
/// * `path` does not exist.
/// * The current process lacks permissions to query metadata for `path`.
///
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
@ -34,196 +33,5 @@ use crate::task::spawn_blocking;
/// ```
pub async fn metadata<P: AsRef<Path>>(path: P) -> io::Result<Metadata> {
let path = path.as_ref().to_owned();
spawn_blocking(move || std::fs::metadata(path)).await
}
cfg_not_docs! {
pub use std::fs::Metadata;
}
cfg_docs! {
use std::time::SystemTime;
use crate::fs::{FileType, Permissions};
/// Metadata for a file or directory.
///
/// Metadata is returned by [`metadata`] and [`symlink_metadata`].
///
/// This type is a re-export of [`std::fs::Metadata`].
///
/// [`metadata`]: fn.metadata.html
/// [`symlink_metadata`]: fn.symlink_metadata.html
/// [`is_dir`]: #method.is_dir
/// [`is_file`]: #method.is_file
/// [`std::fs::Metadata`]: https://doc.rust-lang.org/std/fs/struct.Metadata.html
#[derive(Clone, Debug)]
pub struct Metadata {
_private: (),
}
impl Metadata {
/// Returns the file type from this metadata.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// let metadata = fs::metadata("a.txt").await?;
/// println!("{:?}", metadata.file_type());
/// #
/// # Ok(()) }) }
/// ```
pub fn file_type(&self) -> FileType {
unreachable!("this impl only appears in the rendered docs")
}
/// Returns `true` if this metadata is for a regular directory.
///
/// If this metadata is for a symbolic link, this method returns `false`.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// let metadata = fs::metadata(".").await?;
/// println!("{:?}", metadata.is_dir());
/// #
/// # Ok(()) }) }
/// ```
pub fn is_dir(&self) -> bool {
unreachable!("this impl only appears in the rendered docs")
}
/// Returns `true` if this metadata is for a regular file.
///
/// If this metadata is for a symbolic link, this method returns `false`.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// let metadata = fs::metadata("a.txt").await?;
/// println!("{:?}", metadata.is_file());
/// #
/// # Ok(()) }) }
/// ```
pub fn is_file(&self) -> bool {
unreachable!("this impl only appears in the rendered docs")
}
/// Returns the file size in bytes.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// let metadata = fs::metadata("a.txt").await?;
/// println!("{}", metadata.len());
/// #
/// # Ok(()) }) }
/// ```
pub fn len(&self) -> u64 {
unreachable!("this impl only appears in the rendered docs")
}
/// Returns the permissions from this metadata.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// let metadata = fs::metadata("a.txt").await?;
/// println!("{:?}", metadata.permissions());
/// #
/// # Ok(()) }) }
/// ```
pub fn permissions(&self) -> Permissions {
unreachable!("this impl only appears in the rendered docs")
}
/// Returns the last modification time.
///
/// # Errors
///
/// This data may not be available on all platforms, in which case an error will be
/// returned.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// let metadata = fs::metadata("a.txt").await?;
/// println!("{:?}", metadata.modified());
/// #
/// # Ok(()) }) }
/// ```
pub fn modified(&self) -> io::Result<SystemTime> {
unreachable!("this impl only appears in the rendered docs")
}
/// Returns the last access time.
///
/// # Errors
///
/// This data may not be available on all platforms, in which case an error will be
/// returned.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// let metadata = fs::metadata("a.txt").await?;
/// println!("{:?}", metadata.accessed());
/// #
/// # Ok(()) }) }
/// ```
pub fn accessed(&self) -> io::Result<SystemTime> {
unreachable!("this impl only appears in the rendered docs")
}
/// Returns the creation time.
///
/// # Errors
///
/// This data may not be available on all platforms, in which case an error will be
/// returned.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// let metadata = fs::metadata("a.txt").await?;
/// println!("{:?}", metadata.created());
/// #
/// # Ok(()) }) }
/// ```
pub fn created(&self) -> io::Result<SystemTime> {
unreachable!("this impl only appears in the rendered docs")
}
}
blocking::spawn(async move { fs::metadata(path) }).await
}

View file

@ -2,20 +2,14 @@
//!
//! This module is an async version of [`std::fs`].
//!
//! [`os::unix::fs`]: ../os/unix/fs/index.html
//! [`os::windows::fs`]: ../os/windows/fs/index.html
//! [`std::fs`]: https://doc.rust-lang.org/std/fs/index.html
//!
//! # Platform-specific extensions
//!
//! * Unix: use the [`os::unix::fs`] module.
//! * Windows: use the [`os::windows::fs`] module.
//!
//! # Examples
//!
//! Create a new file and write some bytes to it:
//!
//! ```no_run
//! # #![feature(async_await)]
//! # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
//! #
//! use async_std::fs::File;
@ -30,16 +24,15 @@
pub use dir_builder::DirBuilder;
pub use dir_entry::DirEntry;
pub use file::File;
pub use file_type::FileType;
pub use metadata::Metadata;
pub use open_options::OpenOptions;
pub use permissions::Permissions;
pub use read_dir::ReadDir;
#[doc(inline)]
pub use std::fs::{FileType, Metadata, Permissions};
pub use canonicalize::canonicalize;
pub use copy::copy;
pub use create_dir::create_dir;
pub use create_dir_all::create_dir_all;
pub use hard_link::hard_link;
pub use metadata::metadata;
pub use read::read;
@ -57,15 +50,12 @@ pub use write::write;
mod canonicalize;
mod copy;
mod create_dir;
mod create_dir_all;
mod dir_builder;
mod dir_entry;
mod file;
mod file_type;
mod hard_link;
mod metadata;
mod open_options;
mod permissions;
mod read;
mod read_dir;
mod read_link;

View file

@ -1,35 +1,38 @@
use std::future::Future;
use std::fs;
use std::io;
use std::path::Path;
use crate::fs::File;
use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use cfg_if::cfg_if;
/// A builder for opening files with configurable options.
use super::File;
use crate::future::Future;
use crate::task::blocking;
/// Options and flags which for configuring how a file is opened.
///
/// Files can be opened in [`read`] and/or [`write`] mode.
/// This builder exposes the ability to configure how a [`File`] is opened and what operations are
/// permitted on the open file. The [`File::open`] and [`File::create`] methods are aliases for
/// commonly used options with this builder.
///
/// The [`append`] option opens files in a special writing mode that moves the file cursor to the
/// end of file before every write operation.
///
/// It is also possible to [`truncate`] the file right after opening, to [`create`] a file if it
/// doesn't exist yet, or to always create a new file with [`create_new`].
/// Generally speaking, when using `OpenOptions`, you'll first call [`new`], then chain calls to
/// methods to set each option, then call [`open`], passing the path of the file you're trying to
/// open. This will give you a [`File`] inside that you can further operate on.
///
/// This type is an async version of [`std::fs::OpenOptions`].
///
/// [`read`]: #method.read
/// [`write`]: #method.write
/// [`append`]: #method.append
/// [`truncate`]: #method.truncate
/// [`create`]: #method.create
/// [`create_new`]: #method.create_new
/// [`new`]: struct.OpenOptions.html#method.new
/// [`open`]: struct.OpenOptions.html#method.open
/// [`File`]: struct.File.html
/// [`File::open`]: struct.File.html#method.open
/// [`File::create`]: struct.File.html#method.create
/// [`std::fs::OpenOptions`]: https://doc.rust-lang.org/std/fs/struct.OpenOptions.html
///
/// # Examples
///
/// Open a file for reading:
/// Opening a file for reading:
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs::OpenOptions;
@ -42,9 +45,10 @@ use crate::task::spawn_blocking;
/// # Ok(()) }) }
/// ```
///
/// Open a file for both reading and writing, and create it if it doesn't exist yet:
/// Opening a file for both reading and writing, creating it if it doesn't exist:
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs::OpenOptions;
@ -59,16 +63,17 @@ use crate::task::spawn_blocking;
/// # Ok(()) }) }
/// ```
#[derive(Clone, Debug)]
pub struct OpenOptions(std::fs::OpenOptions);
pub struct OpenOptions(fs::OpenOptions);
impl OpenOptions {
/// Creates a blank set of options.
/// Creates a blank new set of options.
///
/// All options are initially set to `false`.
///
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs::OpenOptions;
@ -81,16 +86,17 @@ impl OpenOptions {
/// # Ok(()) }) }
/// ```
pub fn new() -> OpenOptions {
OpenOptions(std::fs::OpenOptions::new())
OpenOptions(fs::OpenOptions::new())
}
/// Configures the option for read mode.
/// Sets the option for read access.
///
/// When set to `true`, this option means the file will be readable after opening.
/// This option, when `true`, will indicate that the file should be readable if opened.
///
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs::OpenOptions;
@ -107,16 +113,17 @@ impl OpenOptions {
self
}
/// Configures the option for write mode.
/// Sets the option for write access.
///
/// When set to `true`, this option means the file will be writable after opening.
/// This option, when `true`, will indicate that the file should be writable if opened.
///
/// If the file already exists, write calls on it will overwrite the previous contents without
/// If the file already exists, any write calls on it will overwrite its contents, without
/// truncating it.
///
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs::OpenOptions;
@ -133,14 +140,36 @@ impl OpenOptions {
self
}
/// Configures the option for append mode.
/// Sets the option for append mode.
///
/// When set to `true`, this option means the file will be writable after opening and the file
/// cursor will be moved to the end of file before every write operaiton.
/// This option, when `true`, means that writes will append to a file instead of overwriting
/// previous contents. Note that setting `.write(true).append(true)` has the same effect as
/// setting only `.append(true)`.
///
/// For most filesystems, the operating system guarantees that all writes are atomic: no writes
/// get mangled because another process writes at the same time.
///
/// One maybe obvious note when using append mode: make sure that all data that belongs
/// together is written to the file in one operation. This can be done by concatenating strings
/// before writing them, or using a buffered writer (with a buffer of adequate size), and
/// flushing when the message is complete.
///
/// If a file is opened with both read and append access, beware that after opening and after
/// every write, the position for reading may be set at the end of the file. So, before
/// writing, save the current position by seeking with a zero offset, and restore it before the
/// next read.
///
/// ## Note
///
/// This function doesn't create the file if it doesn't exist. Use the [`create`] method to do
/// so.
///
/// [`create`]: #method.create
///
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs::OpenOptions;
@ -157,18 +186,17 @@ impl OpenOptions {
self
}
/// Configures the option for truncating the previous file.
/// Sets the option for truncating a previous file.
///
/// When set to `true`, the file will be truncated to the length of 0 bytes.
/// If a file is successfully opened with this option set, it will truncate the file to 0
/// length if it already exists.
///
/// The file must be opened in [`write`] or [`append`] mode for truncation to work.
///
/// [`write`]: #method.write
/// [`append`]: #method.append
/// The file must be opened with write access for truncation to work.
///
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs::OpenOptions;
@ -186,11 +214,11 @@ impl OpenOptions {
self
}
/// Configures the option for creating a new file if it doesn't exist.
/// Sets the option for creating a new file.
///
/// When set to `true`, this option means a new file will be created if it doesn't exist.
/// This option indicates whether a new file will be created if the file does not yet exist.
///
/// The file must be opened in [`write`] or [`append`] mode for file creation to work.
/// In order for the file to be created, [`write`] or [`append`] access must be used.
///
/// [`write`]: #method.write
/// [`append`]: #method.append
@ -198,6 +226,7 @@ impl OpenOptions {
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs::OpenOptions;
@ -215,19 +244,26 @@ impl OpenOptions {
self
}
/// Configures the option for creating a new file or failing if it already exists.
/// Sets the option to always create a new file.
///
/// When set to `true`, this option means a new file will be created, or the open operation
/// will fail if the file already exists.
/// This option indicates whether a new file will be created. No file is allowed to exist at
/// the target location, also no (dangling) symlink.
///
/// The file must be opened in [`write`] or [`append`] mode for file creation to work.
/// This option is useful because it is atomic. Otherwise, between checking whether a file
/// exists and creating a new one, the file may have been created by another process (a TOCTOU
/// race condition / attack).
///
/// [`write`]: #method.write
/// [`append`]: #method.append
/// If `.create_new(true)` is set, [`.create()`] and [`.truncate()`] are ignored.
///
/// The file must be opened with write or append access in order to create a new file.
///
/// [`.create()`]: #method.create
/// [`.truncate()`]: #method.truncate
///
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs::OpenOptions;
@ -245,70 +281,78 @@ impl OpenOptions {
self
}
/// Opens a file with the configured options.
/// Opens a file at specified path with the configured options.
///
/// # Errors
///
/// An error will be returned in the following situations:
/// This function will return an error under a number of different circumstances. Some of these
/// error conditions are listed here, together with their [`ErrorKind`]. The mapping to
/// [`ErrorKind`]s is not part of the compatibility contract of the function, especially the
/// `Other` kind might change to more specific kinds in the future.
///
/// * The file does not exist and neither [`create`] nor [`create_new`] were set.
/// * The file's parent directory does not exist.
/// * The current process lacks permissions to open the file in the configured mode.
/// * The file already exists and [`create_new`] was set.
/// * Invalid combination of options was used, like [`truncate`] was set but [`write`] wasn't,
/// or none of [`read`], [`write`], and [`append`] modes was set.
/// * An OS-level occurred, like too many files are open or the file name is too long.
/// * Some other I/O error occurred.
/// * [`NotFound`]: The specified file does not exist and neither `create` or `create_new` is
/// set.
/// * [`NotFound`]: One of the directory components of the file path does not exist.
/// * [`PermissionDenied`]: The user lacks permission to get the specified access rights for
/// the file.
/// * [`PermissionDenied`]: The user lacks permission to open one of the directory components
/// of the specified path.
/// * [`AlreadyExists`]: `create_new` was specified and the file already exists.
/// * [`InvalidInput`]: Invalid combinations of open options (truncate without write access, no
/// access mode set, etc.).
/// * [`Other`]: One of the directory components of the specified file path was not, in fact, a
/// directory.
/// * [`Other`]: Filesystem-level errors: full disk, write permission requested on a read-only
/// file system, exceeded disk quota, too many open files, too long filename, too many
/// symbolic links in the specified path (Unix-like systems only), etc.
///
/// [`read`]: #method.read
/// [`write`]: #method.write
/// [`append`]: #method.append
/// [`truncate`]: #method.truncate
/// [`create`]: #method.create
/// [`create_new`]: #method.create_new
/// [`ErrorKind`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html
/// [`AlreadyExists`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html#variant.AlreadyExists
/// [`InvalidInput`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html#variant.InvalidInput
/// [`NotFound`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html#variant.NotFound
/// [`Other`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html#variant.Other
/// [`PermissionDenied`]: https://doc.rust-lang.org/std/io/enum.ErrorKind.html#variant.PermissionDenied
///
/// # Examples
///
/// ```no_run
/// # #![feature(async_await)]
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs::OpenOptions;
///
/// let file = OpenOptions::new()
/// .read(true)
/// .open("a.txt")
/// .await?;
/// let file = OpenOptions::new().open("a.txt").await?;
/// #
/// # Ok(()) }) }
/// ```
pub fn open<P: AsRef<Path>>(&self, path: P) -> impl Future<Output = io::Result<File>> {
let path = path.as_ref().to_owned();
let options = self.0.clone();
async move {
let file = spawn_blocking(move || options.open(path)).await?;
Ok(File::new(file, true))
}
async move { blocking::spawn(async move { options.open(path).map(|f| f.into()) }).await }
}
}
impl Default for OpenOptions {
fn default() -> Self {
Self::new()
cfg_if! {
if #[cfg(feature = "docs")] {
use crate::os::unix::fs::OpenOptionsExt;
} else if #[cfg(unix)] {
use std::os::unix::fs::OpenOptionsExt;
}
}
cfg_unix! {
use crate::os::unix::fs::OpenOptionsExt;
#[cfg_attr(feature = "docs", doc(cfg(unix)))]
cfg_if! {
if #[cfg(any(unix, feature = "docs"))] {
impl OpenOptionsExt for OpenOptions {
fn mode(&mut self, mode: u32) -> &mut Self {
self.0.mode(mode);
self
}
impl OpenOptionsExt for OpenOptions {
fn mode(&mut self, mode: u32) -> &mut Self {
self.0.mode(mode);
self
}
fn custom_flags(&mut self, flags: i32) -> &mut Self {
self.0.custom_flags(flags);
self
fn custom_flags(&mut self, flags: i32) -> &mut Self {
self.0.custom_flags(flags);
self
}
}
}
}

View file

@ -1,56 +0,0 @@
cfg_not_docs! {
pub use std::fs::Permissions;
}
cfg_docs! {
/// A set of permissions on a file or directory.
///
/// This type is a re-export of [`std::fs::Permissions`].
///
/// [`std::fs::Permissions`]: https://doc.rust-lang.org/std/fs/struct.Permissions.html
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Permissions {
_private: (),
}
impl Permissions {
/// Returns the read-only flag.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// let perm = fs::metadata("a.txt").await?.permissions();
/// println!("{:?}", perm.readonly());
/// #
/// # Ok(()) }) }
/// ```
pub fn readonly(&self) -> bool {
unreachable!("this impl only appears in the rendered docs")
}
/// Configures the read-only flag.
///
/// [`fs::set_permissions`]: fn.set_permissions.html
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// let mut perm = fs::metadata("a.txt").await?.permissions();
/// perm.set_readonly(true);
/// fs::set_permissions("a.txt", perm).await?;
/// #
/// # Ok(()) }) }
/// ```
pub fn set_readonly(&mut self, readonly: bool) {
unreachable!("this impl only appears in the rendered docs")
}
}
}

Some files were not shown because too many files have changed in this diff Show more