From fe1309e833094b65f37400890d799ea9f01c7125 Mon Sep 17 00:00:00 2001 From: Klemens Morgenstern Date: Tue, 5 Sep 2023 11:02:30 +0800 Subject: [PATCH] Multipage html --- acknowledgements.html | 501 ++++ benchmarks.html | 921 ++++++++ compiler_support.html | 784 +++++++ coroutine_primer.html | 1050 +++++++++ design.html | 1160 +++++++++ index.html | 4676 +------------------------------------ motivation.html | 544 +++++ overview.html | 564 +++++ reference.html | 3148 +++++++++++++++++++++++++ technical_background.html | 896 +++++++ tutorial.html | 1613 +++++++++++++ 11 files changed, 11210 insertions(+), 4647 deletions(-) create mode 100644 acknowledgements.html create mode 100644 benchmarks.html create mode 100644 compiler_support.html create mode 100644 coroutine_primer.html create mode 100644 design.html create mode 100644 motivation.html create mode 100644 overview.html create mode 100644 reference.html create mode 100644 technical_background.html create mode 100644 tutorial.html diff --git a/acknowledgements.html b/acknowledgements.html new file mode 100644 index 00000000..5b483158 --- /dev/null +++ b/acknowledgements.html @@ -0,0 +1,501 @@ + + + + + + + + +Documentation boost.async + + + + + + + +
+
+

Acknowledgements

+
+
+

This library would not have been possible without the CppAlliance and its founder Vinnie Falco. +Vinnie trusted me enough to let me work on this project, while himself having very different views on how such a library should be designed.

+
+
+

Thanks also go to Ruben Perez & Richard Hodges for listening to my design problems and giving me advice & use-cases. Furthermore, this library would not have been possible without the great boost.asio by Chris Kohlhoff.

+
+
+
+ +
+ + + \ No newline at end of file diff --git a/benchmarks.html b/benchmarks.html new file mode 100644 index 00000000..db2d4b4b --- /dev/null +++ b/benchmarks.html @@ -0,0 +1,921 @@ + + + + + + + + +Documentation boost.async + + + + + + + + +
+
+

Benchmarks

+
+
+

Run on 11th Gen Intel® Core™ i7-1185G7 @ 3.00GHz

+
+
+

Posting to an executor

+
+

The benchmark is running the following code, with async’s task, asio::awaitable and `asio’s +stackful coroutine (boost.context) based.

+
+
+
+
async::task<void> atest()
+{
+  for (std::size_t i = 0u; i < n; i++)
+    co_await asio::post(async::use_op);
+}
+
+
+ + +++++ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 5. results for 50M times in ms
gcc 12clang 16

async

2472

2098

awaitable

2432

2253

stackful

3655

3725

+
+
+

Running noop coroutine in parallel

+
+

This benchmark uses an asio::experimental::channel that has a size of zero, +to read & write in parallel to it. It uses gather with async +and an awaitable_operator in the asio::awaitable.

+
+
+
+
async::task<void> atest()
+{
+  asio::experimental::channel<void(system::error_code)> chan{co_await async::this_coro::executor, 0u};
+  for (std::size_t i = 0u; i < n; i++)
+    co_await async::gather(
+              chan.async_send(system::error_code{}, async::use_task),
+              chan.async_receive(async::use_task));
+}
+
+asio::awaitable<void> awtest()
+{
+  asio::experimental::channel<void(system::error_code)> chan{co_await async::this_coro::executor, 0u};
+  using boost::asio::experimental::awaitable_operators::operator&&;
+  for (std::size_t i = 0u; i < n; i++)
+    co_await (
+        chan.async_send(system::error_code{}, asio::use_awaitable)
+        &&
+        chan.async_receive(asio::use_awaitable));
+}
+
+
+ + +++++ + + + + + + + + + + + + + + + + + + + +
Table 6. results for 3M times in ms
gcc 12clang 16

async

1563

1468

awaitable

2800

2805

+
+
+

Immediate

+
+

This benchmark utilizes the immediate completion, by using a channel +with a size of 1, so that every operation is immediate.

+
+
+
+
async::task<void> atest()
+{
+  asio::experimental::channel<void(system::error_code)> chan{co_await async::this_coro::executor, 1u};
+  for (std::size_t i = 0u; i < n; i++)
+  {
+    co_await chan.async_send(system::error_code{}, async::use_op);
+    co_await chan.async_receive(async::use_op);
+  }
+}
+
+
+ + +++++ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 7. result for 10M times in ms
gcc 12clang 16

async

1810

1864

awaitable

3109

4110

stackful

3922

4705

+
+
+

Channels

+
+

As In this benchmark asio::experimental::channel and async::channel get compared.

+
+
+

This si similar to the parallel test, but uses the async::channel instead.

+
+ + +++++ + + + + + + + + + + + + + + + + + + + + + + + + +
Table 8. result of running the test 3M times in ms
gccclang

async

500

350

awaitable

790

770

stackful

867

907

+
+
+
+ +
+ + + \ No newline at end of file diff --git a/compiler_support.html b/compiler_support.html new file mode 100644 index 00000000..df82812f --- /dev/null +++ b/compiler_support.html @@ -0,0 +1,784 @@ + + + + + + + + +Documentation boost.async + + + + + + + + +
+
+

Compiler support

+
+
+

This library is supported since Clang 14, Gcc 10 & MSVC 19.28.

+
+
+ + + + + +
+ + +Gcc versions 12.1 and 12.2 appear to have a bug for coroutines with out stack variables +as can be seen [here](https://godbolt.org/z/6adGcqP1z) and should be avoided for coroutines. +
+
+
+

Clang only added std::pmr support in 16, so older clang versions use boost::contianer::pmr as a drop-in replacement.

+
+
+ + + + + +
+ + +Some if not all MSVC versions have a broken coroutine implementation, +that this library needs to workaround. This may cause non-deterministic behaviour and overhead. +
+
+
+

A coroutine continuation may be done in the awaitable returned from a final_suspend, like this:

+
+
+
+
// in promise
+auto final_suspend() noexcept
+{
+    struct final_awaitable
+    {
+      std::coroutine_handle<void> continuation{std::noop_coroutine()}; (1)
+      bool await_ready() const noexcept;
+      std::coroutine_handle<void> await_suspend(std::coroutine_handle<void> h) noexcept
+      {
+        auto cc = continuation;
+        h.destroy(); (2)
+        return cc;
+      }
+
+      void await_resume() noexcept {}
+    };
+    return final_awaitable{my_continuation};
+};
+
+
+
+ + + + + + + + + +
1The continuation
2Self destroying the coroutine before continuation
+
+
+

The final_suspend doesnt not properly suspend the coroutine on MSVC, so that the h.destroy() will cause +double destruction of elements on the coroutine frame. +Therefor, msvc will need to post the destruction, to do it out of line. +This will cause overhead and make the actual freeing of memory indeterministic.

+
+
+
+ +
+ + + \ No newline at end of file diff --git a/coroutine_primer.html b/coroutine_primer.html new file mode 100644 index 00000000..5c763510 --- /dev/null +++ b/coroutine_primer.html @@ -0,0 +1,1050 @@ + + + + + + + + +Documentation boost.async + + + + + + + + +
+
+

Coroutine Primer

+
+
+

Async programming

+
+

Asynchronous programming generally refers to a style of programming +that allows tasks to be run in the background, while the other works is performed.

+
+
+

Imagine if you will a get-request function that performs a +full http request including connecting & ssl handshakes etc.

+
+
+
+
std::string http_get(std:string_view url);
+
+int main(int argc, char * argv[])
+{
+    auto res = http_get("https://boost.org");
+    printf("%s", res.c_str());
+    return 0;
+}
+
+
+
+

The above code would be traditional synchronous programming. If we want to perform +two requests in parallel we would need to create another thread to run another thread +with synchronous code.

+
+
+
+
std::string http_get(std:string_view url);
+
+int main(int argc, char * argv[])
+{
+    std::string other_res;
+
+    std::thread thr{[&]{ other_res = http_get("https://cppalliance.org"); }};
+    auto res = http_get("https://boost.org");
+    thr.join();
+
+    printf("%s", res.c_str());
+    printf("%s", other_res.c_str());
+    return 0;
+}
+
+
+
+

This works, but our program will spend most of the time waiting for input. +Operating systems provide APIs that allow IO to be performed asynchronously, +and libraries such as boost.asio +provide portable ways to manage asynchronous operations. +Asio itself does not dictate a way to handle the completions. +This library (boost.async) provides a way to manage this all through coroutines/awaitables.

+
+
+
+
async::promise<std::string> http_async_get(std:string_view url);
+
+async::main co_main(int argc, char * argv[])
+{
+    auto [res, other_res] =
+            async::join(
+                http_async_get(("https://boost.org"),
+                http_async_get(("https://cppalliance.org")
+            );
+
+    printf("%s", res.c_str());
+    printf("%s", other_res.c_str());
+    return 0;
+}
+
+
+
+

In the above code the asynchronous function to perform the request +takes advantage of the operating system APIs so that the actual IO doesn’t block. +This means that while we’re waiting for both functions to complete, +the operations are interleaved and non-blocking. +At the same time async provides the coroutine primitives that keep us out of callback hell.

+
+
+
+

Coroutines

+
+

Coroutines are resumable functions. +Resumable means that a function can suspend, +i.e. pass the control back to the caller multiple times.

+
+
+

A regular function yields control back to the caller with the return function, where it also returns the value.

+
+
+

A coroutine on the other hand might yield control to the caller and get resumed multiple times.

+
+
+

A coroutine has three control keywords akin to co_return +(of which only co_return has to be supported).

+
+
+
    +
  • +

    co_return

    +
  • +
  • +

    co_yield

    +
  • +
  • +

    co_await

    +
  • +
+
+
+

co_return

+
+

This is similar to return, but marks the function as a coroutine.

+
+
+
+

co_await

+
+

The co_await expression suspends for an Awaitable, +i.e. stops execution until the awaitable resumes it.

+
+
+

E.g.:

+
+
+
+
async::promise<void> delay(std::chrono::milliseconds);
+
+async::task<void> example()
+{
+  co_await delay(std::chrono::milliseconds(50));
+}
+
+
+
+

A co_await expression can yield a value, depending on what it is awaiting.

+
+
+
+
async::promise<std::string> read_some();
+
+async::task<void> example()
+{
+  std::string res = co_await read_some();
+}
+
+
+
+ + + + + +
+ + +In async most coroutine primitives are also Awaitables. +
+
+
+
+

co_yield

+
+

The co_yield expression is similar to the co_await, +but it yields control to the caller and carries a value.

+
+
+

For example:

+
+
+
+
async::generator<int> iota(int max)
+{
+  int i = 0;
+  while (i < max)
+    co_yield i++;
+
+  co_return i;
+}
+
+
+
+

A co_yield expression can also produce a value, +which allows the user of yielding coroutine to push values into it.

+
+
+
+
async::generator<int, bool> iota()
+{
+  int i = 0;
+  bool more = false;
+  do
+  {
+    more = co_yield i++;
+  }
+  while(more);
+  co_return -1;
+}
+
+
+
+
+
Stackless
+
+

C++ coroutine are stack-less, which means they only allocate their own function frame.

+
+
+

See Stackless for more details.

+
+
+
+
+
+
+

Awaitables

+
+

Awaitables are types that can be used in a co_await expression.

+
+
+
+
struct awaitable_prototype
+{
+    bool await_ready();
+
+    template<typename T>
+    see_below await_suspend(std::coroutine_handle<T>);
+
+    return_type  await_resume();
+};
+
+
+
+ + + + + +
+ + +Type will be implicitly converted into an awaitable if there is an operator co_await call available. +This documentation will use awaitable to include these types, +and "actual_awaitable" to refer to type conforming to the above prototype. +
+
+
+
+Diagram +
+
+
+

In a co_await expression the waiting coroutine will first invoke +await_ready to check if the coroutine needs to suspend. +When ready, it goes directly to await_resume to get the value, +as there is no suspension needed. +Otherwise, it will suspend itself and call await_suspend with a +std::coroutine_handle to its own promise.

+
+
+ + + + + +
+ + +std::coroutine_handle<void> can be used for type erasure. +
+
+
+

The return_type is the result type of the co_await expression, e.g. int:

+
+
+
+
int i = co_await awaitable_with_int_result();
+
+
+
+

The return type of the await_suspend can be three things:

+
+
+
    +
  • +

    void

    +
  • +
  • +

    bool

    +
  • +
  • +

    std::coroutine_handle<U>

    +
  • +
+
+
+

If it is void the awaiting coroutine remains suspended. If it is bool, +the value will be checked, and if falls, the awaiting coroutine will resume right away.

+
+
+

If a std::coroutine_handle is returned, this coroutine will be resumed. +The latter allows await_suspend to return the handle passed in, +being effectively the same as returning false.

+
+
+

If the awaiting coroutine gets re-resumed right away, i.e. after calling await_resume, +it is referred to as "immediate completion" within this library. +This is not to be confused with a non-suspending awaitable, i.e. one that returns true from await_ready.

+
+
+
+

Event Loops

+
+

Since the coroutines in async can co_await events, they need to be run on an event-loop. +That is another piece of code is responsible for tracking outstanding event and resume a resuming coroutines that are awaiting them. +This pattern is very common and is used in a similar way by node.js or python’s asyncio.

+
+
+

async uses an asio::io_context as its default event loop.

+
+
+

The event loop is accessed through an executor (following the asio terminology) and can be manually set using set_executor.

+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/design.html b/design.html new file mode 100644 index 00000000..7c8c7970 --- /dev/null +++ b/design.html @@ -0,0 +1,1160 @@ + + + + + + + + +Documentation boost.async + + + + + + + + +
+
+

Design

+
+
+

Concepts

+
+

This library has two fundamental concepts:

+
+
+ +
+
+

An awaitable is an expression that can be used with co_await +from within a coroutine, e.g.:

+
+
+
+
co_await delay(50ms);
+
+
+
+

An actual awaitable is a type that can be co_await-ed from within any coroutine, +like a delay operation. +A pseudo-awaitable is one that can only be used in coroutines adding special +functionality for it. It is akin to a contextual pseudo-keyword.

+
+
+

All the verbs in the this_coro namespace are such pseudo-awaitables.

+
+
+
+
auto exec = co_await this_coro::executor;
+
+
+
+ + + + + +
+ + +This library exposes a set of enable_* base classes for promises, +to make the creation of custom coroutines easy. +
+
+
+

A coroutine in the context of this documentation refers +to an asynchronous coroutine, i.e. synchronous coroutines like +generator +are not considered.

+
+
+

All coroutines except main are also actual awaitables.

+
+
+
+

Executors

+
+

Since everything is asynchronous the library needs to use an event-loop. +Because everything is single-threaded, it can be assumed that there is exactly one executor +per thread, which will suffice for 97% of use-cases. +Therefore, there is a thread_local executor that gets used as default +by the coroutine objects (although stored by copy in the coroutine promise).

+
+
+

Likewise, there is one executor type used by the library, +which defaults to asio::any_io_executor.

+
+
+ + + + + +
+ + +If you write your own coroutine, it should hold a copy of the executor, +and have a get_executor function returning it by const reference. +
+
+
+

Using Strands

+
+

While strands can be used, they are not compatible with the thread_local executor. +This is because they might switch threads, thus they can’t be thread_local.

+
+
+

If you wish to use strands (e.g. through a spawn) +the executor for any promise, generator or channel +must be assigned manually.

+
+
+

In the case of a channel this is a constructor argument, +but for the other coroutine types, asio::executor_arg needs to be used. +This is done by having asio::executor_arg_t (somewhere) in the argument +list directly followed by the executor to be used in the argument list of the coroutine, e.g.:

+
+
+
+
async::promise<void> example_with_executor(int some_arg, asio::executor_arg_t, async::executor);
+
+
+
+

This way the coroutine-promise can pick up the executor from the third argument, +instead of defaulting to the thread_local one.

+
+
+

The arguments can of course be defaulted, to make them less inconvenient, +if they are sometimes with a thread_local executor.

+
+
+
+
async::promise<void> example_with_executor(int some_arg,
+                                           asio::executor_arg_t = asio::executor_arg,
+                                           async::executor = async::this_thread::get_executor());
+
+
+
+

If this gets omitted on a strand an exception of type asio::bad_allocator is thrown, +or - worse - the wrong executor is used.

+
+
+
+
+

polymorphic memory resource

+
+

Similarly, the library uses a thread_local pmr::memory_resource to allocate +coroutine frames & to use as allocator on asynchronous operations.

+
+
+

The reason is, that users may want to customize allocations, +e.g. to avoid locks, limit memory usage or monitor usage. +pmr allows us to achieve this without introducing unnecessary template parameters, +i.e. no promise<T, Allocator> complexity. +Using pmr however does introduce some minimal overheads, +so a user has the option to disable by defining BOOST_ASYNC_NO_PMR.

+
+
+

op uses an internal resource optimized for asio’s allocator usages +and gather, select and join use a monotonic resource to miminize allocations. +Both still work with BOOST_ASYNC_NO_PMR defined, in which case they’ll use new/delete as upstream allocations.

+
+
+

main and thread single pmr::unsynchronized_pool_resource per thread with PMR enabled.

+
+
+ + + + + +
+ + +If you write your own coroutine, it should have a get_allocator function +returning a pmr::polymorphic_allocator<void>. +
+
+
+
+

cancellation

+
+

async uses implicit cancellation based on asio::cancellation_signal. +This is mostly used implicitly (e.g. with select), +so that there is very little explicit use in the examples.

+
+
+ + + + + +
+ + +If you write custom coroutine it must return a cancellation_slot from a +get_cancellation_slot function in order to be able to cancel other operations. +
+
+
+ + + + + +
+ + +If you write a custom awaitable, it can use that function in await_suspend to receive cancellation signals. +
+
+
+
+

Promise

+
+

The main coroutine type is a promise, which is eager. +The reason to default to this, is that the compiler can optimize out +promises that do not suspend, like this:

+
+
+
+
async::promise<void> noop()
+{
+  co_return;
+}
+
+
+
+

Awaiting the above operation is in theory a noop, +but practically speaking, compilers aren’t there as of 2023.

+
+
+
+

Select

+
+

The most important synchronization mechanism is the select function.

+
+
+

It awaits multiple awaitables in a pseudo-random order +and will return the result of the first one completion, before disregarding the rest.

+
+
+

That is, it initiates the co_await in a pseudo-random order and stops once one +awaitable is found to be ready or completed immediately.

+
+
+
+
async::generator<int> gen1();
+async::generator<double> gen2();
+
+async::promise<void> p()
+{
+  auto g1 = gen1();
+  auto g2 = gen2();
+  while (!co_await async::this_coro::cancelled)
+  {
+    switch(auto v = co_await select(g1, g2); v.index())
+    {
+    case 0:
+      printf("Got int %d\n", get<0>(v));
+      break;
+    case 1:
+      printf("Got double %f\n", get<1>(v));
+      break;
+    }
+  }
+}
+
+
+
+

The select must however internally wait for all awaitable to complete +once it initiates to co_await. +Therefor, once the first awaitable completes, +it tries to interrupt the rest, and if that fails cancels them.

+
+
+

select is the preferred way to trigger cancellations, e.g:

+
+
+
+
async::promise<void> timeout();
+async::promise<void> work();
+
+select(timeout(), work());
+
+
+
+
+

interrupt_await

+
+

If it naively cancelled it would however lose data. +Thus, the concept of interrupt_await is introduced, +which tells the awaitable (that supports it) +to immediately resume the awaiter and return or throw an ignored value.

+
+
+
Example of an interruptible awaitable
+
+
struct awaitable
+{
+   bool await_ready() const;
+
+   template<typename Promise>
+   std::coroutine_handle<void> await_suspend(std::coroutine_handle<Promise> h);
+
+   T await_resume();
+
+   void interrupt_await() &;
+};
+
+
+
+

If the interrupt_await doesn’t result in immediate resumption (of h), +select will send a cancel signal.

+
+
+

select applies these with the correct reference qualification:

+
+
+
+
auto g = gen1();
+select(g, gen2());
+
+
+
+

The above will call a interrupt_await() & function for g1 and interrupt_await() && for g2 if available.

+
+
+ + + + + +
+ + +Generally speaking, the coroutines in async support lvalue interruption, ie. interrupt_await() &. +channel operations are unqualified, i.e. work in both cases. +
+
+
+

join and gather will forward interruptions, +i.e. this will only interrupt g1 and g2 if gen2() completes first:

+
+
+
+

Associators

+
+

async uses the associator concept of asio, but simplifies it. +That is, it has three associators that are member functions of an awaiting promise.

+
+
+
    +
  • +

    const executor_type & get_executor() (always executor, must return by const ref)

    +
  • +
  • +

    allocator_type get_allocator() (always pmr::polymorphic_allocator<void>)

    +
  • +
  • +

    cancellation_slot_type get_cancellation_slot() (must have the same IF as asio::cancellation_slot)

    +
  • +
+
+
+

async uses concepts to check if those are present in its await_suspend functions.

+
+
+

That way custom coroutines can support cancellation, executors etc..

+
+
+

In a custom awaitable you can obtain them like this:

+
+
+
+
struct my_awaitable
+{
+    bool await_ready();
+    template<typename T>
+    void await_suspend(std::corutine_handle<P> h)
+    {
+        if constexpr (requires  (Promise p) {p.get_executor();})
+            handle_executor(h.promise().get_executor();
+
+        if constexpr (requires (Promise p) {p.get_cancellation_slot();})
+            if ((cl = h.promise().get_cancellation_slot()).is_connected())
+                cl.emplace<my_cancellation>();
+    }
+
+    void await_resume();
+};
+
+
+
+
+

Threading

+
+

This library is single-threaded by design, because this simplifies resumption +and thus more performant handling of synchronizations like select. +select would need to lock every selected awaitable to avoid data loss +which would need to be blocking and get worse with every additional element.

+
+
+ + + + + +
+ + +you can’t have any coroutines be resumed on a different thread than created on, +except for a task (e.g. using spawn). +
+
+
+

The main technical reason is that the most efficient way of switching coroutines is by returning the handle +of the new coroutine from await_suspend like this:

+
+
+
+
struct my_awaitable
+{
+    bool await_ready();
+    std::coroutine_handle<T> await_suspend(std::coroutine_handle<U>);
+    void await_resume();
+};
+
+
+
+

In this case, the awaiting coroutine will be suspended before await_suspend is called, +and the coroutine returned is resumed. This of course doesn’t work if we need to go through an executor.

+
+
+

This doesn’t only apply to awaited coroutines, but channels, too. +The channels in this library use an intrusive list of awaitables +and may return the handle of reading (and thus suspended) coroutine +from a write_operation’s await_suspend.

+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/index.html b/index.html index ee463739..851121cb 100644 --- a/index.html +++ b/index.html @@ -434,223 +434,12 @@ @media amzn-kf8{#header,#content,#footnotes,#footer{padding:0}} - + - +
-
-

Overview

-
-
-

Here’s a list of relevant featuers in async:

-
- - ---- - - - - - - - - - - - - - - - - - - -
Table 1. Coroutine types

promise

An eager coroutine returning a single result- consider it the default

generator

An eager coroutine that can yield multiple values.

task

A lazy version of promise that can be spawned onto other executors.

detached

A coroutine similar to promise, without a handle

- - ---- - - - - - - - - - - - - - - - - - - -
Table 2. Synchronization Functions

select

A function that waits for one coroutine out of a set that is ready in a pseudo-random way, to avoid starvation.

join

A function that waits for a set of coroutines and returns all of them as value or throws an exception if any awaitable does so.

gather

A function that waits for a set of coroutines and returns all of them as result, capturing all exceptions individually.

left_select

A deterministic select that evaluates left-to-right.

- - ---- - - - - - - - - - - -
Table 3. Utilities

channel

A thread-local utility to send values between coroutines.

with

An async RAII helper, that allows async teardown when exceptions occur

-
-
-
-

Motivation

-
-
-

Many languages provide a simple programming languages -like node.js and python provide easy to use single-threaded concurrency frameworks. -While more complex than synchronous code, -single threaded asynchronicity avoids many of the pitfalls & overhead of multi-threading.

-
-
-

That is, one coroutine can work, while others wait for events (e.g. a response from a server). -This allows to write applications that do multiple things at once on a single thread.

-
-
-

This library is meant to provide this to C++: simple single threaded asynchronicity -akin to node.js and asyncio in python that works with existing libraries like -boost.beast, boost.mysql or boost.redis.

-
-
-

It takes a collection of concepts from other languages and provides them based on C++20 coroutines.

-
-
- -
-
-

Unlike asio::awaitable and asio::experimental::coro, async coroutines are open. -That is, an asio::awaitable can only await and be awaited by other asio::awaitable -and does not provide coroutine specific synchronization mechanisms.

-
-
-

async on the other hand provides a coroutine specific channel -and different wait types (select, gather etc.) that are optimized -to work with coroutines and awaitables.

-
-
-
-
-

Coroutine Primer

-
-
-

Coroutines

-
-

Coroutines are resumable functions. -Resumable means that a function can suspend, -i.e. pass the control back to the caller multiple times.

-
-
-

A regular function yields control back to the caller with the return function, where it also returns the value.

-
-
-

A coroutine on the other hand might yield control to the caller and get resumed multiple times.

-
-
-

A coroutine has three control keywords akin to co_return -(of which only co_return has to be supported).

-
-
-
-

co_return

-
-

This is similar to return, but marks the function as a coroutine.

-
-
-
-

co_await

-
-

The co_await expression suspends for an Awaitable, -i.e. stops execution until the awaitable resumes it.

-
-
-

E.g.:

-
-
-
-
async::promise<void> delay(std::chrono::milliseconds);
-
-async::task<void> example()
-{
-  co_await delay(std::chrono::milliseconds(50));
-}
-
-
-
-

A co_await expression can yield a value, depending on what it is awaiting.

-
-
-
-
async::promise<std::string> read_some();
-
-async::task<void> example()
-{
-  std::string res = co_await read_some();
-}
-
-
-
- - - - - -
- - -In async most coroutine primitives are also Awaitables. -
-
-
-
-

co_yield

-
-

The co_yield expression is similar to the co_await, -but it yields control to the caller and carries a value.

-
-
-

For example:

-
-
-
-
async::generator<int> iota(int max)
-{
-  int i = 0;
-  while (i < max)
-    co_yield i++;
-
-  co_return i;
-}
-
-
-
-

A co_yield statement can also produce a value, -which allows the user of yielding coroutine to push values into it.

-
-
-
-
async::generator<int, bool> iota()
-{
-  int i = 0;
-  bool more = false;
-  do
-  {
-    more = co_yield i++;
-  }
-  while(more);
-  co_return -1;
-}
-
-
-
-
-
Stackless
-
-

C++ coroutine are stack-less, which means they only allocate their own function frame.

-
-
-

See Stackless for more details.

-
-
-
-
-
-
-

Awaitables

-
-

Awaitables are types that can be used in a co_await expression.

-
-
-
-
struct awaitable_prototype
-{
-    bool await_ready();
-
-    template<typename T>
-    see_below await_suspend(std::coroutine_handle<T>);
-
-    return_type  await_resume();
-};
-
-
-
- - - - - -
- - -Type will be implicitly converted into an awaitable if there is an operator co_await call available. -This documentation will use awaitable to include these types, -and "actual_awaitable" to refer to type conforming to the above prototype. -
-
-
-
-Diagram -
-
-
-

In a co_await statement the waiting coroutine will first invoke -await_ready to check if the coroutine needs to suspend. -When ready, it goes directly to await_resume to get the value, -as there is no suspension needed. -Otherwise, it will suspend itself and call await_suspend with a -std::coroutine_handle to its own promise.

-
-
- - - - - -
- - -std::coroutine_handle<void> can be used for type erasure. -
-
-
-

The return_type is the result type of the coroutine, e.g. int:

-
-
-
-
int i = co_await awaitable_with_int_result();
-
-
-
-

The return type of the await_suspend can be three things:

-
-
- -
-
-

If it is void the awaiting coroutine remains suspended. If it is bool, -the value will be checked, and if falls, the awaiting coroutine will resume right away.

-
-
-

If a std::coroutine_handle is returned, this coroutine will be resumed. -The latter allows await_suspend to return the handle passed in, -being effectively the same as returning false.

-
-
-

If the awaiting coroutine gets resumed right away, -it is referred to as "immediate completion" within this library.

-
-
-
-

Event Loops

-
-

Since the coroutines in async can co_await events, -they need to be run on an event-loop. -That is another piece of code is responsible for tracking -outstanding event and resume a resuming coroutines that are awaiting them. -This pattern is very common and is used in a similar way -by node.js or python’s asyncio.

-
-
-

async uses an asio::io_context as its default event loop.

-
-
-
-
-
-

Tutorial

-
-
-

delay

-
-

Let’s start with the simplest example possible: a simple delay.

-
-
-
example/delay.cpp
-
-
async::main co_main(int argc, char * argv[]) (1)
-{
-  asio::steady_timer tim{co_await asio::this_coro::executor, (2)
-                         std::chrono::milliseconds(std::stoi(argv[1]))}; (3)
-  co_await tim.async_wait(async::use_op); (4)
-  co_return 0; (5)
-}
-
-
-
- - - - - - - - - - - - - - - - - - - - - -
1The co_main function defines an implicit main when defined -and is the easiest way to enter asynchronous code.
2Take the executor from the current coroutine promise.
3Use an argument to set the timeout
4Perform the wait by using async::use_op.
5Return a value that gets returned from the implicit main.
-
-
-

In this example we use the async/main.hpp header, which provides us with a main coroutine if co_main -is defined as above. This has a few advantages:

-
-
-
  • -

    The environment get set up correctly (executor & memory)

    +

    Reference

  • -

    asio is signaled that the context is single threaded

    +

    Technical Background

  • -

    an asio::signal_set with SIGINT & SIGTERM is automatically connected to cancellations (i.e. Ctrl+C causes cancellations)

    +

    Benchmarks

  • -
-
-
-

This coroutine then has an executor in its promise[1] which we can obtain through the dummy-awaitables in -the this_coro namespace.

-
-
-

We can then construct a timer and initiate the async_wait with use_op. -async provides multiple ways to co_await to interact with asio, of which use_op is the easiest.

-
-
-
-

echo server

-
-

We’ll be using the use_op (asio completion) token everywhere, -so we’re using a default completion token, so that we can skip the last parameters.

-
-
-
example/echo_server.cpp declarations
-
-
namespace async = boost::async;
-using boost::asio::ip::tcp;
-using boost::asio::detached;
-using tcp_acceptor = async::use_op_t::as_default_on_t<tcp::acceptor>;
-using tcp_socket   = async::use_op_t::as_default_on_t<tcp::socket>;
-namespace this_coro = boost::async::this_coro;
-
-
-
-

We’re writing the echo function as a promise coroutine. -It’s an eager coroutine and recommended as the default; -in case a lazy coro is needed, task is available.

-
-
-
example/echo_server.cpp echo function
-
-
async::promise<void> echo(tcp_socket socket)
-{
-  try (1)
-  {
-    char data[4096];
-    while (socket.is_open()) (2)
-    {
-      std::size_t n = co_await socket.async_read_some(boost::asio::buffer(data)); (3)
-      co_await async_write(socket, boost::asio::buffer(data, n)); (4)
-    }
-  }
-  catch (std::exception& e)
-  {
-    std::printf("echo: exception: %s\n", e.what());
-  }
-}
-
-
-
- - - - - - - - - - - - - - - - - -
1When using the use_op completion token, I/O errors are translated into C++ exceptions. Additionally, -if the coroutine gets cancelled (e.g. because the user hit Ctrl-C), -an exception will be raised, too. Under these conditions, we print the error and exit the loop.
2We run the loop until we get cancelled (exception) or the user closes the connection.
3Read as much as is available.
4Write all the read bytes.
-
-
-

Note that promise is eager. Calling echo will immediately execute code until async_read_some -and then return control to the caller.

-
-
-

Next, we also need an acceptor function. Since this is a tutorial, we’re using a generator. -This is a coroutine that can be co_awaited multiple times, until a co_return statement is reached.

-
-
-
example/echo_server.cpp listen function
-
-
async::generator<tcp_socket> listen()
-{
-  tcp_acceptor acceptor({co_await async::this_coro::executor}, {tcp::v4(), 55555});
-  for (;;) (1)
-  {
-    tcp_socket sock = co_await acceptor.async_accept(); (2)
-    co_yield std::move(sock); (3)
-  }
-}
-
-
-
- - - - - - - - - - - - - -
1Cancellation will also lead to an exception here being thrown from the co_await
2Asynchronously accept the connection
3Yield it to the awaiting coroutine
-
-
-

With those two functions we can now write the server:

-
-
-
example/echo_server.cpp run_server function
-
-
async::promise<void> run_server(async::wait_group & workers)
-{
-  auto l = listen(); (1)
-  while (true)
-  {
-    if (workers.size() == 10u)
-      co_await workers.wait_one();  (2)
-    else
-      workers.push_back(echo(co_await l)); (3)
-  }
-}
-
-
-
- - - - - - - - - - - - - -
1Construct the listener generator coroutine. When the object is destroyed, -the coroutine will be cancelled, performing all required cleanup.
2When we have more than 10 workers, we wait for one to finish
3Accept a new connection & launch it.
-
-
-

The wait_group is used to manage the running echo functions. -This class will cancel & await the running echo coroutines.

-
-
-

We do not need to do the same for the listener, because it will just stop on its own, when l gets destroyed. -The destructor of a generator will cancel it.

-
-
-

Since the promise is eager, just calling it is enough to launch. -We then put those promises into a wait_group which will allow us to tear down all the workers on scope exit.

-
-
-
example/echo_server.cpp co_main function
-
-
async::main co_main(int argc, char ** argv)
-{
-  co_await async::with(async::wait_group(), &run_server); (1)
-  co_return 0u;
-}
-
-
-
- - - - - -
1Run run_server with an async scope.
-
-
-

The with function shown above, will run a function with a resource such as wait_group. -On scope exit with will invoke & co_await an asynchronous teardown function. -This will cause all connections to be properly shutdown before co_main exists.

-
-
-
-

price ticker

-
-

To demonstrate channels and other tools, we need a certain complexity. -For that purpose our project is a price ticker, that connects to -https://blockchain.info. A user can then connection to localhost -to query a given currency pair, like this:

-
-
-
-
wscat -c localhost:8080/btc/usd
-
-
-
-

First we do the same declarations as echo-server.

-
-
-
example/ticker.cpp declarations
-
-
using executor_type = async::use_op_t::executor_with_default<async::executor>;
-using socket_type   = typename asio::ip::tcp::socket::rebind_executor<executor_type>::other;
-using acceptor_type = typename asio::ip::tcp::acceptor::rebind_executor<executor_type>::other;
-using websocket_type = beast::websocket::stream<asio::ssl::stream<socket_type>>;
-namespace http = beast::http;
-
-
-
-

The next step is to write a function to connect an ssl-stream, -to connect upstream:

-
-
-
example/ticker.cpp connect
-
-
async::promise<asio::ssl::stream<socket_type>> connect(
-        std::string host, boost::asio::ssl::context & ctx)
-{
-    asio::ip::tcp::resolver res{async::this_thread::get_executor()};
-    auto ep = co_await res.async_resolve(host, "https", async::use_op); (1)
-
-    asio::ssl::stream<socket_type> sock{async::this_thread::get_executor(), ctx};
-    co_await sock.next_layer().async_connect(*ep.begin()); (2)
-    co_await sock.async_handshake(asio::ssl::stream_base::client); (3)
-
-    co_return sock; (4)
-}
-
-
-
- - - - - - - - - - - - - - - - - -
1Lookup the host
2Connect to the endpoint
3Do the ssl handshake
4Return the socket to the caller
-
-
-

Next, we’ll need a function to do the websocket upgrade -on an existing ssl-stream.

-
-
-
example/ticker.cpp connect_to_blockchain_info
-
-
async::promise<void> connect_to_blockchain_info(websocket_type & ws)
-{
- ws.set_option(beast::websocket::stream_base::decorator(
-     [](beast::websocket::request_type& req)
-     {
-       req.set(http::field::user_agent,
-               std::string(BOOST_BEAST_VERSION_STRING) + " async-ticker");
-       req.set(http::field::origin,
-               "https://exchange.blockchain.com"); (1)
-     }));
-
- co_await ws.async_handshake("ws.blockchain.info", "/mercury-gateway/v1/ws"); (2)
-}
-
-
-
- - - - - - - - - -
1blockchain.info requires this header to be set.
2Perform the websocket handshake.
-
-
-

Once the websocket is connected, we want to continuously receive json messages, -for which a generator is a good choice.

-
-
-
example/ticker.cpp json_read
-
-
async::generator<json::object> json_reader(websocket_type & ws)
-try
-{
-    beast::flat_buffer buf;
-    while (ws.is_open()) (1)
-    {
-        auto sz = co_await ws.async_read(buf); (2)
-        json::string_view data{static_cast<const char*>(buf.cdata().data()), sz};
-        auto obj = json::parse(data);
-        co_yield obj.as_object(); (3)
-        buf.consume(sz);
-    }
-    co_return {};
-}
-catch (std::exception & e)
-{
-  std::cerr << "Error reading: " << e.what() << std::endl;
-  throw;
-}
-
-
-
- - - - - - - - - - - - - -
1Keep running as long as the socket is open
2Read a frame from the websocket
3Parse & co_yield it as an object.
-
-
-

This then needs to be connected to subscriber, for which we’ll utilize channels to pass raw json. -To make life-time management easy, the subscriber will hold a shared_ptr, and the producer a weak_ptr.

-
-
-
example/ticker.cpp subscription types
-
-
using subscription = std::pair<std::string, std::weak_ptr<async::channel<json::object>>>;
-using subscription_channel = std::weak_ptr<async::channel<json::object>>;
-using subscription_map = boost::unordered_multimap<std::string, subscription_channel>;
-
-
-
-

The main function running the blockchain connector, operates on two inputs: -data coming from the websocket and a channel to handle new subscriptions.

-
-
-
example/ticker.cpp run blockchain_info
-
-
async::promise<void> run_blockchain_info(async::channel<subscription> & subc)
-try
-{
-    asio::ssl::context ctx{asio::ssl::context_base::tls_client};
-    websocket_type ws{co_await connect("blockchain.info", ctx)};
-    co_await connect_to_blockchain_info(ws); (1)
-
-    subscription_map subs;
-    std::list<std::string> unconfirmed;
-
-    auto rd = json_reader(ws); (2)
-    while (ws.is_open()) (3)
-    {
-      switch (auto msg = co_await async::select(rd, subc.read()); msg.index()) (4)
-      {
-        case 0: (5)
-          if (auto ms = get<0>(msg);
-              ms.at("event") == "rejected") // invalid sub, cancel however subbed
-            co_await handle_rejections(unconfirmed, subs, ms);
-          else
-            co_await handle_update(unconfirmed, subs, ms, ws);
-        break;
-        case 1: // (6)
-            co_await handle_new_subscription(
-                unconfirmed, subs,
-                std::move(get<1>(msg)), ws);
-        break;
-      }
-    }
-
-    for (auto & [k ,c] : subs)
-    {
-        if (auto ptr = c.lock())
-            ptr->close();
-    }
-}
-catch(std::exception & e)
-{
-  std::cerr << "Exception: " << e.what() << std::endl;
-  throw;
-}
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - -
1Initialize the connection
2Instantiate the json_reader
3Run as long as the websocket is open
4Select, i.e. wait for either a new json message or subscription
5When its a json handle an update or a rejection
6Handle new subscription messages
-
-
-

The handle_* function’s contents are not as important for the async functionality, -so its skipped in this tutorial.

-
-
-

The handle_new_subscription function sends a message to the blockchain.info, -which will send a confirmation or rejection back. -The handle_rejection and handle_update will take the json values -and forward them to the subscription channel.

-
-
-

On the consumer side, our server will just forward data to the client. -If the client inputs data, we’ll close the websocket immediately. -We’re using as_tuple to ignore potential errors.

-
-
-
example/ticker.cpp read and close
-
-
async::promise<void> read_and_close(beast::websocket::stream<socket_type> & st, beast::flat_buffer buf)
-{
-    system::error_code ec;
-    co_await st.async_read(buf, asio::as_tuple(async::use_op));
-    co_await st.async_close(beast::websocket::close_code::going_away, asio::as_tuple(async::use_op));
-    st.next_layer().close(ec);
-}
-
-
-
-

Next, we’re running the session that the users sends

-
-
-
example/ticker.cpp run_session
-
-
async::promise<void> run_session(beast::websocket::stream<socket_type> st,
-                                 async::channel<subscription> & subc)
-try
-{
-    http::request<http::empty_body> req;
-    beast::flat_buffer buf;
-    co_await http::async_read(st.next_layer(), buf, req); (1)
-    // check the target
-    auto r = urls::parse_uri_reference(req.target());
-    if (r.has_error() || (r->segments().size() != 2u)) (2)
-    {
-        http::response<http::string_body> res{http::status::bad_request, 11};
-        res.body() = r.has_error() ? r.error().message() :
-                    "url needs two segments, e.g. /btc/usd";
-        co_await http::async_write(st.next_layer(), res);
-        st.next_layer().close();
-        co_return ;
-    }
-
-    co_await st.async_accept(req); (3)
-
-    auto sym = std::string(r->segments().front()) + "-" +
-               std::string(r->segments().back());
-    boost::algorithm::to_upper(sym);
-    // close when data gets sent
-    auto p = read_and_close(st, std::move(buf)); (4)
-
-    auto ptr = std::make_shared<async::channel<json::object>>(1u); (5)
-    co_await subc.write(subscription{sym, ptr}); (6)
-
-    while (ptr->is_open() && st.is_open()) (7)
-    {
-      auto bb = json::serialize(co_await ptr->read());
-      co_await st.async_write(asio::buffer(bb));
-    }
-
-    co_await st.async_close(beast::websocket::close_code::going_away,
-                            asio::as_tuple(async::use_op)); (8)
-    st.next_layer().close();
-    co_await p; (9)
-
-}
-catch(std::exception & e)
-{
-    std::cerr << "Session ended with exception: " << e.what() << std::endl;
-}
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1Read the http request, because we want the path
2Check the path, e.g. /btc/usd.
3Accept the websocket
4Start reading & close if the consumer sends something
5Create the channel to receive updates
6Send a subscription requests to run_blockchain_info
7While the channel & websocket are open, we’re forwarding data.
8Close the socket & ignore the error
9Since the websocket is surely closed by now, wait for the read_and_close to close.
-
-
-

With run_session and run_blockchain_info written, we can not move on to main:

-
-
-
example/ticker.cpp main
-
-
async::main co_main(int argc, char * argv[])
-{
-    acceptor_type acc{co_await async::this_coro::executor,
-                      asio::ip::tcp::endpoint (asio::ip::tcp::v4(), 8080)};
-    std::cout << "Listening on localhost:8080" << std::endl;
-
-    constexpr int limit = 10; // allow 10 ongoing sessions
-    async::channel<subscription> sub_manager; (1)
-
-    co_await join( (2)
-      run_blockchain_info(sub_manager),
-      async::with( (3)
-        async::wait_group(
-            asio::cancellation_type::all,
-            asio::cancellation_type::all),
-        [&](async::wait_group & sessions) -> async::promise<void>
-        {
-          while (!co_await async::this_coro::cancelled) (4)
-          {
-            if (sessions.size() >= limit) (5)
-              co_await sessions.wait_one();
-
-            auto conn = co_await acc.async_accept(); (6)
-            sessions.push_back( (7)
-                run_session(
-                    beast::websocket::stream<socket_type>{std::move(conn)},
-                    sub_manager));
-          }
-        })
-      );
-
-    co_return 0;
-}
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1Create the channel to manage subscriptions
2Use join to run both tasks in parallel.
3Use an async scope to provide a wait_group.
4Run until cancelled.
5When we’ve reached the limit we wait for one task to complete.
6Wait for a new connection.
7Insert the session into the wait_group.
-
-
-

Main is using join because one task failing should cancel the other one.

-
-
-
-

delay op

-
-

We’ve used the use_op so far, to use an implicit operation based on asio’s completion token mechanic.

-
-
-

We can however implement our own ops, that can also utilize the async_ready optimization. -To leverage this coroutine feature, async provides an easy way to create a skipable operation:

-
-
-
example/delay_op.cpp
-
-
struct wait_op final : async::op<system::error_code> (1)
-{
-  asio::steady_timer & tim;
-  wait_op(asio::steady_timer & tim) : tim(tim) {}
-  void ready(async::handler<system::error_code> h ) override (2)
-  {
-    if (tim.expiry() < std::chrono::steady_clock::now())
-      h(system::error_code{});
-  }
-  void initiate(async::completion_handler<system::error_code> complete) override (3)
-  {
-    tim.async_wait(std::move(complete));
-  }
-};
-
-
-async::main co_main(int argc, char * argv[])
-{
-  asio::steady_timer tim{co_await asio::this_coro::executor,
-                         std::chrono::milliseconds(std::stoi(argv[1]))};
-  co_await wait_op(tim); (4)
-  co_return 0; //
-}
-
-
-
- - - - - - - - - - - - - - - - - -
1Declare the op. We inherit op to make it awaitable.
2The pre-suspend check is implemented here
3Do the wait if we need to
4Use the op just like any other awaitable.
-
-
-

This way we can minimize the amounts of coroutine suspensions.

-
-
-

While the above is used with asio, you can also use these handlers -with any other callback based code. -== Generator with push value

-
-
-

Coroutines with push values are not as common, -but can simplify certain issues significantly.

-
-
-

Since we’ve already got a json_reader in the previous example, -here’s how we can write a json_writer that gets values pushed in.

-
-
-

The advantage of using a generator is the internal state management.

-
-
-
-
async::generator<system::error_code, json::object>
-    json_writer(websocket_type & ws)
-try
-{
-    co_await async::this_coro::pro_active(true); (1)
-    char buffer[4096];
-    json::serializer ser;
-
-    while (ws.is_open()) (2)
-    {
-        auto val = co_yield system::error_code{}; (3)
-
-        while (!ser.done())
-        {
-            auto sv = ser.read(buffer);
-            co_await ws.async_write({sv.data(), sv.size()}); (4)
-        }
-
-    }
-    co_return {};
-}
-catch (system::system_error& e)
-{
-    co_return e.code();
-}
-catch (std::exception & e)
-{
-    std::cerr << "Error reading: " << e.what() << std::endl;
-    throw;
-}
-
-
-
- - - - - - - - - - - - - - - - - -
1Set to proactive
2Keep running as long as the socket is open
3co_yield the current error and retrieve a new value.
4Write a frame to the websocket
-
-
-

Now we can use the generator like this:

-
-
-
-
auto g = json_writer(my_ws);
-
-extern std::vector<json::value> to_write;
-
-for (auto && tw : std::move(to_write))
-{
-    if (auto ec = co_await g(std::move(tw)))
-        return ec; // yield error
-}
-
-
-
-
-
-
-

Design

-
-
-

Concepts

-
-

This library has two fundamental concepts:

-
- -
-

An awaitable is an expression that can be used with co_await -from within a coroutine, e.g.:

-
-
-
-
co_await delay(50ms);
-
-
-
-

An actual awaitable is a type that can be co_await-ed from within any coroutine, -like a delay operation. -A pseudo-awaitable is one that can only be used in coroutines adding special -functionality for it. It is akin to a contextual pseudo-keyword.

-
-
-

All the verbs in the this_coro namespace are such pseudo-awaitables.

-
-
-
-
auto exec = co_await this_coro::executor;
-
-
-
- - - - - -
- - -This library exposes a set of enable_* base classes for promises, -to make the creation of custom coroutines easy. -
-
-
-

A coroutine in the context of this documentation refers -to an asynchronous coroutine, i.e. synchronous coroutines like -generator -are not considered.

-
-
-

All coroutines except main are also actual awaitables.

-
-
-
-

Executors

-
-

Since everything is asynchronous the library needs to use an event-loop. -Because everything is single-threaded, it can be assumed that there is exactly one executor -per thread, which will suffice for 97% of use-cases. -Therefore, there is a thread_local executor that gets used as default -by the coroutine objects (although stored by copy in the coroutine promise).

-
-
-

Likewise, there is one executor type used by the library, -which defaults to asio::any_io_executor.

-
-
- - - - - -
- - -If you write your own coroutine, it should hold a copy of the executor, -and have a get_executor function returning it by const reference. -
-
-
-

Using Strands

-
-

While strands can be used, they are not compatible with the thread_local executor. -This is because they might switch threads, thus they can’t be thread_local.

-
-
-

If you wish to use strands (e.g. through a spawn) -the executor for any promise, generator or channel -must be assigned manually.

-
-
-

In the case of a channel this is a constructor argument, -but for the other coroutine types, asio::executor_arg needs to be used. -This is done by having asio::executor_arg_t (somewhere) in the argument -list directly followed by the executor to be used in the argument list of the coroutine, e.g.:

-
-
-
-
async::promise<void> example_with_executor(int some_arg, asio::executor_arg_t, async::executor);
-
-
-
-

This way the coroutine-promise can pick up the executor from the third argument, -instead of defaulting to the thread_local one.

-
-
-

The arguments can of course be defaulted, to make them less inconvenient, -if they are sometimes with a thread_local executor.

-
-
-
-
async::promise<void> example_with_executor(int some_arg,
-                                           asio::executor_arg_t = asio::executor_arg,
-                                           async::executor = async::this_thread::get_executor());
-
-
-
-

If this gets omitted on a strand an exception of type asio::bad_allocator is thrown, -or - worse - the wrong executor is used.

-
-
-
-
-

polymorphic memory resource

-
-

Similarly, the library uses a thread_local pmr::memory_resource to allocate -coroutine frames & to use as allocator on asynchronous operations. -This allows the usage of a single pmr::unsynchronized_pool_resource per thread.

-
-
- - - - - -
- - -If you write your own coroutine, it should have a get_allocator function -returning a pmr::polymorphic_allocator<void>. -
-
-
-
-

cancellation

-
-

async uses implicit cancellation based on asio::cancellation_signal. -This is mostly used implicitly (e.g. with select), -so that there is very little explicit use in the examples.

-
-
- - - - - -
- - -If you write custom coroutine it must return a cancellation_slot from a -get_cancellation_slot function in order to be able to cancel other operations. -
-
-
- - - - - -
- - -If you write a custom awaitable, it can use that function in await_suspend to receive cancellation signals. -
-
-
-
-

Promise

-
-

The main coroutine type is a promise, which is eager. -The reason to default to this, is that the compiler can optimize out -promises that do not suspend, like this:

-
-
-
-
async::promise<void> noop()
-{
-  co_return;
-}
-
-
-
-

Awaiting the above operation is in theory a noop, -but practically speaking, compilers aren’t there as of 2023.

-
-
-
-

Select

-
-

The most important synchronization mechanism is the select function.

-
-
-

It awaits multiple awaitables in a pseudo-random order -and will return the result of the first one completion, before disregarding the rest.

-
-
-

That is, it initiates the co_await in a pseudo-random order and stops once one -awaitable is found to be ready or completed immediately.

-
-
-
-
async::generator<int> gen1();
-async::generator<double> gen2();
-
-async::promise<void> p()
-{
-  auto g1 = gen1();
-  auto g2 = gen2();
-  while (!co_await async::this_coro::cancelled)
-  {
-    switch(auto v = co_await select(g1, g2); v.index())
-    {
-    case 0:
-      printf("Got int %d\n", get<0>(v));
-      break;
-    case 1:
-      printf("Got double %f\n", get<1>(v));
-      break;
-    }
-  }
-}
-
-
-
-

The select must however internally wait for all awaitable to complete -once it initiates to co_await. -Therefor, once the first awaitable completes, -it tries to interrupt the rest, and if that fails cancels them.

-
-
-

select is the preferred way to trigger cancellations, e.g:

-
-
-
-
async::promise<void> timeout();
-async::promise<void> work();
-
-select(timeout(), work());
-
-
-
-
-

interrupt_await

-
-

If it naively cancelled it would however lose data. -Thus, the concept of interrupt_await is introduced, -which tells the awaitable (that supports it) -to immediately resume the awaiter and return or throw an ignored value.

-
-
-
Example of an interruptible awaitable
-
-
struct awaitable
-{
-   bool await_ready() const;
-
-   template<typename Promise>
-   std::coroutine_handle<void> await_suspend(std::coroutine_handle<Promise> h);
-
-   T await_resume();
-
-   void interrupt_await() &;
-};
-
-
-
-

If the interrupt_await doesn’t result in immediate resumption (of h), -select will send a cancel signal.

-
-
-

select applies these with the correct reference qualification:

-
-
-
-
auto g = gen1();
-select(g, gen2());
-
-
-
-

The above will call a interrupt_await() & function for g1 and interrupt_await() && for g2 if available.

-
-
- - - - - -
- - -Generally speaking, the coroutines in async support lvalue interruption, ie. interrupt_await() &. -channel operations are unqualified, i.e. work in both cases. -
-
-
-

join and gather will forward interruptions, -i.e. this will only interrupt g1 and g2 if gen2() completes first:

-
-
-
-

Associators

-
-

async uses the associator concept of asio, but simplifies it. -That is, it has three associators that are member functions of an awaiting promise.

-
-
-
    -
  • -

    const executor_type & get_executor() (always executor, must return by const ref)

    -
  • -
  • -

    allocator_type get_allocator() (always pmr::polymorphic_allocator<void>)

    -
  • -
  • -

    cancellation_slot_type get_cancellation_slot() (must have the same IF as asio::cancellation_slot)

    -
  • -
-
-
-

async uses concepts to check if those are present in its await_suspend functions.

-
-
-

That way custom coroutines can support cancellation, executors etc..

-
-
-

In a custom awaitable you can obtain them like this:

-
-
-
-
struct my_awaitable
-{
-    bool await_ready();
-    template<typename T>
-    void await_suspend(std::corutine_handle<P> h)
-    {
-        if constexpr (requires  (Promise p) {p.get_executor();})
-            handle_executor(h.promise().get_executor();
-
-        if constexpr (requires (Promise p) {p.get_cancellation_slot();})
-            if ((cl = h.promise().get_cancellation_slot()).is_connected())
-                cl.emplace<my_cancellation>();
-    }
-
-    void await_resume();
-};
-
-
-
-
-

Threading

-
-

This library is single-threaded by design, because this simplifies resumption -and thus more performant handling of synchronizations like select. -select would need to lock every selected awaitable to avoid data loss -which would need to be blocking and get worse with every additional element.

-
-
- - - - - -
- - -you can’t have any coroutines be resumed on a different thread than created on, -except for a task (e.g. using spawn). -
-
-
-

The main technical reason is that the most efficient way of switching coroutines is by returning the handle -of the new coroutine from await_suspend like this:

-
-
-
-
struct my_awaitable
-{
-    bool await_ready();
-    std::coroutine_handle<T> await_suspend(std::coroutine_handle<U>);
-    void await_resume();
-};
-
-
-
-

In this case, the awaiting coroutine will be suspended before await_suspend is called, -and the coroutine returned is resumed. This of course doesn’t work if we need to go through an executor.

-
-
-

This doesn’t only apply to awaited coroutines, but channels, too. -The channels in this library use an intrusive list of awaitables -and may return the handle of reading (and thus suspended) coroutine -from a write_operation’s await_suspend.

-
-
-
-
-
-

Reference

-
-
-

async/main.hpp

-
-

The easiest way to get started with an async application is to use the co_main function with the following signature:

-
-
-
-
async::main co_main(int argc, char *argv[]);
-
-
-
-

Declaring co_main will add a main function that performs all the necessary steps to run a coroutine -on an event loop. -This allows us to write a very simple asynchronous programs;

-
-
-
-
async::main co_main(int argc, char *argv[])
-{
-  auto exec = co_await async::this_coro::executor;             (1)
-  asio::steady_timer tim{exec, std::chrono::milliseconds(50)}; (2)
-  co_await tim.async_wait(async::use_op);                      (3)
-  co_return 0;
-}
-
-
-
- - - - - - - - - - - - - -
1get the executor main running on
2Use it with an asio object
3co_await an async operation
-
-
-

The main promise will create an asio::signal_set and uses it for cancellation. -SIGINT becomes total , while SIGTERM becomes terminal cancellation.

-
-
-

Executor

-
-

It will also create an asio::io_context to run on, which you can get through the this_coro::executor. -It will be assigned to the async::this_thread::get_executor() .

-
-
-
-

Memory Resource

-
-

It also creates a memory resource that will be used as a default for internal memory allocations. -It will be assigned to the thread_local to the async::this_thread::get_default_resoruce().

-
-
-
-

Promise

-
-

Every coroutine has an internal state, called promise (not to be confused with the async::promise). -Depending on the coroutine properties different things can be co_await-ed, like we used in the example above.

-
-
-

They are implemented through inheritance, and shared among different promise types

-
-
-

The main promise has the following properties.

-
- -
-
-

Specification

-
-
    -
  1. -

    declaring co_main will implicitly declare a main function

    -
  2. -
  3. -

    main is only present when co_main is defined.

    -
  4. -
  5. -

    SIGINT and SIGTERM will cause cancellation of the internal task.

    -
  6. -
-
-
-
-
-

async/promise.hpp

-
-

A promise is an eager coroutine that can co_await and co_return values. That is, it cannot use co_yield.

-
-
-
-
async::promise<void> delay(std::chrono::milliseconds ms)
-{
-  asio::steady_timer tim{co_await async::this_coro::executor, ms};
-  co_await tim.async_wait(async::use_op);
-}
-
-async::main co_main(int argc, char *argv[])
-{
-  co_await delay(std::chrono::milliseconds(50));
-  co_return 0;
-}
-
-
-
-

Promises can also be used to spawn tasks easily.

-
-
-

Promises are by default attached. -This means, that a cancellation is sent when the promise handles goes out of scope.

-
-
-

A promise can be detached by calling detach or by using the prefix + operator.

-
-
-
-
async::promise<void> my_task();
-
-async::main co_main(int argc, char *argv[])
-{
-  +my_task(); (1)
-  co_await delay(std::chrono::milliseconds(50));
-  co_return 0;
-}
-
-
-
- - - - - -
1By using + the task gets detached. Without it, the compiler would generate a nodiscard warning.
-
-
-

Executor

-
-

The executor is taken from the thread_local get_executor function, unless a asio::executor_arg is used -in any position followed by the executor argument.

-
-
-
-
async::promise<int> my_gen(asio::executor_arg_t, asio::io_context::executor_type exec_to_use);
-
-
-
-
-

Memory Resource

-
-

The memory resource is taken from the thread_local get_default_resource function, -unless a std::allocator_arg is used in any position followed by a polymorphic_allocator argument.

-
-
-
-
async::promise<int> my_gen(std::allocator_arg_t, pmr::polymorphic_allocator<void> alloc);
-
-
-
-
-

Outline

-
-
-
template<typename Return>
-struct [[nodiscard]] promise
-{
-    promise(promise &&lhs) noexcept;
-    promise& operator=(promise && lhs) noexcept;
-
-    // enable `co_await`. (1)
-    auto operator co_await ();
-
-    // Ignore the return value, i.e. detach it. (2)
-    void operator +() &&;
-
-    // Cancel the promise.
-    void cancel(asio::cancellation_type ct = asio::cancellation_type::all);
-
-    // Check if the result is ready
-    bool ready() const;
-    // Check if the promise can be awaited.
-    explicit operator bool () const; (3)
-
-    // Detach or attach
-    bool attached() const;
-    void detach();
-    void attach();
-    // Get the return value if ready - otherwise throw
-    Return get();
-};
-
-
-
- - - - - - - - - - - - - -
1Supports Interrupt Wait
2This allows to spawn promised with a simple +my_task() expression.
3This allows code like while (p) co_await p;
-
-
-
-

Promise

-
-

The coroutine promise (promise::promise_type) has the following properties.

-
- -
-
-
-

async/generator.hpp

-
-

A generator is an eager coroutine that can co_await and co_yield values to the caller.

-
-
-
-
async::generator<int> example()
-{
-  printf("In coro 1\n");
-  co_yield 2;
-  printf("In coro 3\n");
-  co_return 4;
-}
-
-async::main co_main(int argc, char * argv[])
-{
-  printf("In main 0\n");
-  auto f = example(); // call and let it run until the first co_yield
-  printf("In main 1\n");
-  printf("In main %d\n", co_await f);
-  printf("In main %d\n", co_await f);
-  return 0;
-}
-
-
-
-

Which will generate the following output

-
-
-
-
In main 0
-In coro 1
-In main 1
-In main 2
-In coro 3
-In main 4
-
-
-
-
-Diagram -
-
-
-

Values can be pushed into the generator, when Push (the second template parameter) is set to non-void:

-
-
-
-
async::generator<int, int> example()
-{
-  printf("In coro 1\n");
-  int i =  co_yield 2;
-  printf("In coro %d\n");
-  co_return 4;
-}
-
-async::main co_main(int argc, char * argv[])
-{
-  printf("In main 0\n");
-  auto f = example(); // call and let it run until the first co_yield
-  printf("In main %d\n", co_await f(3)); (1)
-  return 0;
-}
-
-
-
- - - - - -
1The pushed value gets passed through operator() to the result of co_yield.
-
-
-

Which will generate the following output

-
-
-
-
In main 0
-In coro 1
-In main 2
-Pushed 2
-In coro 3
-In main 4
-
-
-
-

Lazy

-
-

A generator can be turned lazy by awaiting initial. -This co_await statement will statement will return the Push value. -This means the generator will wait until it’s awaited for the first time, -and then process the newly pushed value and resume at the next co_yield.

-
-
-
-
async::generator<int, int> example()
-{
-  int v = co_await async::this_coro::initial;
-  printf("In coro %d\n", v);
-  co_yield 2;
-  printf("In coro %d\n", v);
-  co_return 4;
-}
-
-async::main co_main(int argc, char * argv[])
-{
-  printf("In main 0\n");
-  auto f = example(); // call and let it run until the first co_yield
-  printf("In main 1\n"); // < this is now before the co_await initial
-  printf("In main %d\n", co_await f(1));
-  printf("In main %d\n", co_await f(3));
-  return 0;
-}
-
-
-
-

Which will generate the following output

-
-
-
-
In main 0
-In main 1
-In coro 1
-In main 2
-In coro 3
-In main 4
-
-
-
-
-Diagram -
-
-
-
-

Executor

-
-

The executor is taken from the thread_local get_executor function, unless a asio::executor_arg is used -in any position followed by the executor argument.

-
-
-
-
async::generator<int> my_gen(asio::executor_arg_t, asio::io_context::executor_type exec_to_use);
-
-
-
-
-

Memory Resource

-
-

The memory resource is taken from the thread_local get_default_resource function, -unless a std::allocator_arg is used in any position followed by a polymorphic_allocator argument.

-
-
-
-
async::generator<int> my_gen(std::allocator_arg_t, pmr::polymorphic_allocator<void> alloc);
-
-
-
-
-

Outline

-
-
-
template<typename Yield, typename Push = void>
-struct [[nodiscard]] generator
-{
-  // Movable
-
-  generator(generator &&lhs) noexcept = default;
-  generator& operator=(generator &&) noexcept = default;
-
-  // True until it co_returns & is co_awaited after (1)
-  explicit operator bool() const;
-
-  // Cancel the generator. (3)
-  void cancel(asio::cancellation_type ct = asio::cancellation_type::all);
-
-  // Check if a value is available
-  bool ready() const;
-
-  // Get the return value. Throws if not ready.
-  Yield get();
-
-  // Cancel & detach the generator.
-  ~generator();
-
-  // an awaitable that results in value of Yield.
-  using generator_awaitable = unspecified;
-
-  // Present when Push != void
-  generator_awaitable operator()(      Push && push);
-  generator_awaitable operator()(const Push &  push);
-
-  // Present when Push == void, i.e. can co_await the generator directly.
-  generator_awaitable operator co_await (); (2)
-
-};
-
-
-
- - - - - - - - - - - - - -
1This allows code like while (gen) co_await gen:
2Supports Interrupt Wait
3A cancelled generator maybe be resumable
-
-
-
-

Promise

-
-

The generator promise has the following properties.

-
- -
-
-
-

async/task.hpp

-
-

A task is a lazy coroutine that can co_await and co_return values. That is, it cannot use co_yield.

-
-
-
-
async::task<void> delay(std::chrono::milliseconds ms)
-{
-  asio::steady_timer tim{co_await async::this_coro::executor, ms};
-  co_await tim.async_wait(async::use_op);
-}
-
-async::main co_main(int argc, char *argv[])
-{
-  co_await delay(std::chrono::milliseconds(50));
-  co_return 0;
-}
-
-
-
-

Unlike a promise, a task can be awaited or spawned on another executor than it was created on.

-
-
-

Executor

-
-

Since a task it lazy, it does not need to have an executor on construction. -It rather attempts to take it from the caller or awaiter if present. -Otherwise, it’ll default to the thread_local executor.

-
-
-
-

Memory Resource

-
-

The memory resource is NOT taken from the thread_local get_default_resource function, -but pmr::get_default_resource(), -unless a `std::allocator_arg is used in any position followed by a polymorphic_allocator argument.

-
-
-
-
async::task<int> my_gen(std::allocator_arg_t, pmr::polymorphic_allocator<void> alloc);
-
-
-
-
-

Outline

-
-
-
template<typename Return>
-struct [[nodiscard]] task
-{
-    task(task &&lhs) noexcept = default;
-    task& operator=(task &&) noexcept = default;
-
-    // enable `co_await`
-    auto operator co_await ();
-
-};
-
-
-
- - - - - -
- - -Tasks can be used synchronously from a sync function by calling run(my_task()). -
-
-
-
-

Promise

-
-

The task promise has the following properties.

-
- -
-
-

use_task

-
-

The use_task completion token can be used to create a task from an async_ function. -This is less efficient than use_op as it needs to allocate a coroutine frame, -but has an obvious return type and support Interrupt Wait.

-
-
-
-
-

async/detached.hpp

-
-

A detached is an eager coroutine that can co_await but not co_return values. -That is, it cannot be resumed and is usually not awaited.

-
-
-
-
async::detached delayed_print(std::chrono::milliseconds ms)
-{
-  asio::steady_timer tim{co_await async::this_coro::executor, ms};
-  co_await tim.async_wait(async::use_op);
-  printf("Hello world\n");
-}
-
-async::main co_main(int argc, char *argv[])
-{
-  delayed_print();
-  co_return 0;
-}
-
-
-
-

Promises are mainly used to spawn tasks easily.

-
-
-
-
async::detached my_task();
-
-async::main co_main(int argc, char *argv[])
-{
-  my_task(); (1)
-  co_await delay(std::chrono::milliseconds(50));
-  co_return 0;
-}
-
-
-
- - - - - -
1Spawn off the detached coro.
-
-
-

A detached can assign itself a new cancellation source like this:

-
-
-
-
async::detached my_task(asio::cancellation_slot sl)
-{
-   co_await this_coro::reset_cancellation_source(sl);
-   // do somework
-}
-
-async::main co_main(int argc, char *argv[])
-{
-  asio::cancellation_signal sig;
-  my_task(sig.slot()); (1)
-  co_await delay(std::chrono::milliseconds(50));
-  sig.emit(asio::cancellation_type::all);
-  co_return 0;
-}
-
-
-
-

Executor

-
-

The executor is taken from the thread_local get_executor function, unless a asio::executor_arg is used -in any position followed by the executor argument.

-
-
-
-
async::detached my_gen(asio::executor_arg_t, asio::io_context::executor_type exec_to_use);
-
-
-
-
-

Memory Resource

-
-

The memory resource is taken from the thread_local get_default_resource function, -unless a std::allocator_arg is used in any position followed by a polymorphic_allocator argument.

-
-
-
-
async::detached my_gen(std::allocator_arg_t, pmr::polymorphic_allocator<void> alloc);
-
-
-
-
-

Outline

-
-
-
struct detached {};
-
-
-
- - - - - -
1Supports Interrupt Wait
-
-
-
-

Promise

-
-

The thread detached has the following properties.

-
- -
-
-
-

async/op.hpp

-
-

An async operation is an awaitable wrapping an asio operation.

-
-
-

E.g. this is an async_operation with the completion signature void().

-
-
-
-
auto op = asio::post(ctx, async::use_op);
-
-
-
-

Or the async_operation can be templated like this:

-
-
-
-
auto op = [&ctx](auto token) {return asio::post(ctx, std::move(token)); };
-
-
-
-

use_op

-
-

The use_op token is the direct to create an op, -i.e. using async::use_op as the completion token will create the required awaitable.

-
-
-

It also supports defaults_on so that async_ops can be awaited without the token:

-
-
-
-
auto tim = async::use_op.as_default_on(asio::steady_timer{co_await async::this_coro::executor});
-co_await tim.async_wait();
-
-
-
-

Depending on the completion signature the co_await statement may throw.

-
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
SignatureReturn typeException

void()

void

noexcept

void(T)

T

noexcept

void(T…​)

std::tuple<T…​>

noexcept

void(system::error_code, T)

T

system::system_error

void(system::error_code, T…​)

std::tuple<T…​>

system::system_error

void(std::exception_ptr, T)

T

any exception

void(std::exception_ptr, T…​)

std::tuple<T…​>

any exception

-
- - - - - -
- - -use_op will never complete immediately, i.e. await_ready will always return false, but always suspend the coroutine. -
-
-
-
-

Hand coded Operations

-
-

Operations are a more advanced implementation of the async/op.hpp feature.

-
-
-

This library makes it easy to create asynchronous operations with an early completion condition, -i.e. a condition that avoids suspension of coroutines altogether.

-
-
-

We can for example create a wait_op that does nothing if the timer is already expired.

-
-
-
-
struct wait_op : async::op<system::error_code> (1)
-{
-  asio::steady_timer & tim;
-
-  wait_op(asio::steady_timer & tim) : tim(tim) {}
-
-  bool ready(async::handler<system::error_code> ) (2)
-  {
-    if (tim.expiry() < std::chrono::steady_clock::now())
-        h(system::error_code{});
-  }
-  void initiate(async::completion_handler<system::error_code> complete) (3)
-  {
-    tim.async_wait(std::move(complete));
-  }
-};
-
-
-
- - - - - - - - - - - - - -
1Inherit op with the matching signature await_transform picks it up
2Check if the operation is ready - called from await_ready
3Initiate the async operation if its not ready.
-
-
-
-
-

async/concepts.hpp

-
-

Awaitable

-
-

An awaitable is an expression that can be used with co_await.

-
-
-
-
template<typename Awaitable, typename Promise = void>
-concept awaitable_type = requires (Awaitable aw, std::coroutine_handle<Promise> h)
-{
-    {aw.await_ready()} -> std::convertible_to<bool>;
-    {aw.await_suspend(h)};
-    {aw.await_resume()};
-};
-
-template<typename Awaitable, typename Promise = void>
-concept awaitable =
-        awaitable_type<Awaitable, Promise>
-    || requires (Awaitable && aw) { {std::forward<Awaitable>(aw).operator co_await()} -> awaitable_type<Promise>;}
-    || requires (Awaitable && aw) { {operator co_await(std::forward<Awaitable>(aw))} -> awaitable_type<Promise>;};
-
-
-
- - - - - -
- - -awaitables in this library require that the coroutine promise -return their executor by const reference if they provide one. Otherwise it’ll use this_thread::get_executor(). -
-
-
-
-

Enable awaitables

-
-

Inheriting enable_awaitables will enable a coroutine to co_await anything through await_transform -that would be co_await-able in the absence of any await_transform.

-
-
-
-
-

async/this_coro.hpp

-
-

The this_coro namespace provides utilities to access the internal state of a coroutine promise.

-
-
-

Pseudo-awaitables:

-
-
-
-
// Awaitable type that returns the executor of the current coroutine.
-struct executor_t {}
-constexpr executor_t executor;
-
-// Awaitable type that returns the cancellation state of the current coroutine.
-struct cancellation_state_t {};
-constexpr cancellation_state_t cancellation_state;
-
-// Reset the cancellation state with custom or default filters.
-constexpr unspecified reset_cancellation_state();
-template<typename Filter>
-constexpr unspecified reset_cancellation_state(
-    Filter && filter);
-template<typename InFilter, typename OutFilter>
-constexpr unspecified reset_cancellation_state(
-    InFilter && in_filter,
-    OutFilter && out_filter);
-
-// get & set the throw_if_cancelled setting.
-unspecified throw_if_cancelled();
-unspecified throw_if_cancelled(bool value);
-
-// Set the cancellation source in a detached.
-unspecified reset_cancellation_source();
-unspecified reset_cancellation_source(asio::cancellation_slot slot);
-
-
-// get the allocator the promise
-struct allocator_t {};
-constexpr allocator_t allocator;
-
-// get the current cancellation state-type
-struct cancelled_t {};
-constexpr cancelled_t cancelled;
-
-// set the over-eager mode of a generator
-struct initial_t {};
-constexpr initial_t initial;
-
-
-
-

Await Allocator

-
-

The allocator of a coroutine supporting enable_await_allocator can be obtained the following way:

-
-
-
-
co_await async::this_coro::allocator;
-
-
-
-

In order to enable this for your own coroutine you can inherit enable_await_allocator with the CRTP pattern:

-
-
-
-
struct my_promise : async::enable_await_allocator<my_promise>
-{
-  using allocator_type = __your_allocator_type__;
-  allocator_type get_allocator();
-};
-
-
-
- - - - - -
- - -If available the allocator gets used by use_op -
-
-
-
-

Await Executor

-
-

The allocator of a coroutine supporting enable_await_executor can be obtained the following way:

-
-
-
-
co_await async::this_coro::executor;
-
-
-
-

In order to enable this for your own coroutine you can inherit enable_await_executor with the CRTP pattern:

-
-
-
-
struct my_promise : async::enable_await_executor<my_promise>
-{
-  using executor_type = __your_executor_type__;
-  executor_type get_executor();
-};
-
-
-
- - - - - -
- - -If available the executor gets used by use_op -
-
-
-
-

Memory resource base

-
-

The promise_memory_resource_base base of a promise will provide a get_allocator in the promise taken from -either the default resource or one passed following a std::allocator_arg argument. -Likewise, it will add operator new overloads so the coroutine uses the same memory resource for its frame allocation.

-
-
-
-

Throw if cancelled

-
-

The promise_throw_if_cancelled_base provides the basic options to allow operation to enable a coroutines -to turn throw an exception when another actual awaitable is awaited.

-
-
-
-
co_await async::this_coro::throw_if_cancelled;
-
-
-
-
-

Cancellation state

-
-

The promise_cancellation_base provides the basic options to allow operation to enable a coroutines -to have a cancellation_state that is resettable by -reset_cancellation_state

-
-
-
-
co_await async::this_coro::reset_cancellation_state();
-
-
-
-

For convenience there is also a short-cut to check the current cancellation status:

-
-
-
-
asio::cancellation_type ct = (co_await async::this_coro::cancellation_state).cancelled();
-asio::cancellation_type ct = co_await async::this_coro::cancelled; // same as above
-
-
-
-
-
-

async/this_thread.hpp

-
-

Since everything is single threaded this library provides an executor -& default memory-resource for every thread.

-
-
-
-
pmr::memory_resource* get_default_resource() noexcept; (1)
-pmr::memory_resource* set_default_resource(pmr::memory_resource* r) noexcept; (2)
-pmr::polymorphic_allocator<void> get_allocator(); (3)
-
-typename asio::io_context::executor_type & get_executor(); (4)
-void set_executor(asio::io_context::executor_type exec) noexcept; (5)
-
-
-
- - - - - - - - - - - - - - - - - - - - - -
1Get the default resource - will be pmr::get_default_resource unless set
2Set the default resource - returns the previously set one
3Get an allocator wrapping (1)
4Get the executor of the thread - throws if not set
5Set the executor of the current thread.
-
-
-

The coroutines will use these as defaults, but keep a copy just in case.

-
-
- - - - - -
- - -The only exception is the initialization of an async-operation, -which will use the this_thread::executor to rethrow from. -
-
-
-
-

async/channel.hpp

-
-

Channels can be used to exchange data between different coroutines -on a single thread.

-
-
-

Outline

-
-
channel outline
-
-
template<typename T>
-struct channel
-{
-  // create a channel with a buffer limit, executor & resource.
-  explicit
-  channel(std::size_t limit = 0u,
-          executor executor = this_thread::get_executor(),
-          pmr::memory_resource * resource = this_thread::get_default_resource());
-  // movable. moving with active operations is undefined behaviour.
-  channel(channel && ) noexcept = default;
-  channel & operator=(channel && lhs) noexcept = delete;
-
-  using executor_type = executor;
-  const executor_type & get_executor();
-
-  // Closes the channel
-  ~channel();
-  bool is_open() const;
-  // close the operation, will cancel all pending ops, too
-  void close();
-
-  // an awaitable that yields T
-  using read_op = unspecified;
-
-  // an awaitable that yields void
-  using write_op = unspecified;
-
-  // read a value to a channel
-  read_op  read();
-
-  // write a value to the channel
-  write_op write(const T  && value);
-  write_op write(const T  &  value);
-  write_op write(      T &&  value);
-  write_op write(      T  &  value);
-
-  // write a value to the channel if T is void
-
-};
-
-
-
-
-

Description

-
-

Channels are a tool for two coroutines to communicate and synchronize.

-
-
-
-
const std::size_t buffer_size = 2;
-channel<int> ch{exec, buffer_size};
-
-// in coroutine (1)
-co_await ch.write(42);
-
-// in coroutine (2)
-auto val = co_await ch.read();
-
-
-
- - - - - - - - - -
1Send a value to the channel - will block until it can be sent
2Read a value from the channel - will block until a value is awaitable.
-
-
-

Both operations maybe be blocking depending on the channel buffer size.

-
-
-

If the buffer size is zero, a read & write will need to occur at the same time, -i.e. act as a rendezvous.

-
-
-

If the buffer is not full, the write operation will not suspend the coroutine; -likewise if the buffer is not empty, the read operation will not suspend.

-
-
-

If two operations complete at once (as is always the case with an empty buffer), -the second operation gets posted to the executor for later completion.

-
-
- - - - - -
- - -A channel type can be void, in which case write takes no parameter. -
-
-
-

The channel operations can be cancelled without losing data. -This makes them usable with select.

-
-
-
-
generator<variant2::variant<int, double>> merge(
-    channel<int> & c1,
-    channel<double> & c2)
-{
-    while (c1 && c2)
-       co_yield co_await select(c1, c2);
-}
-
-
-
-
-

Example

-
-
-
async::promise<void> producer(async::channel<int> & chan)
-{
-  for (int i = 0; i < 4; i++)
-    co_await chan.write(i);
-
-  chan.close();
-}
-
-async::main co_main(int argc, char * argv[])
-{
-  async::channel<int> c;
-
-  auto p = producer(c);
-  while (c.is_open())
-    std::cout << co_await c.read() << std::endl;
-
-  co_await p;
-  co_return 0;
-}
-
-
-
-

Additionally, a channel_reader is provided to make reading channels more convenient & usable with -BOOST_ASYNC_FOR.

-
-
-
-
async::main co_main(int argc, char * argv[])
-{
-  async::channel<int> c;
-
-  auto p = producer(c);
-  BOOST_ASYNC_FOR(int value, async::channel_reader(c))
-    std::cout << value << std::endl;
-
-  co_await p;
-  co_return 0;
-}
-
-
-
-
-
-

async/with.hpp

-
-

The with facility provides a way to perform asynchronous tear-down of coroutines. -That is it like an asynchronous destructor call.

-
-
-
-
struct my_resource
-{
-  async::promise<void> await_exit(std::exception_ptr e);
-};
-
-async::promise<void> work(my_resource & res);
-
-async::promise<void> outer()
-{
-  co_await async::with(my_resource(), &work);
-}
-
-
-
- - - - - -
- - -with statements always yield void. -
-
-
-

The teardown can either be done by providing an await_exit member function or a tag_invoke function -that returns an awaitable or by providing the teardown as the third argument to with.

-
-
-
-
using ws_stream = beast::websocket::stream<asio::ip::tcp::socket>>;
-async::promise<ws_stream> connect(urls::url); (1)
-async::promise<void>   disconnect(ws_stream &ws); (2)
-
-auto teardown(const boost::async::with_exit_tag & wet , ws_stream & ws, std::exception_ptr e)
-{
-  return disconnect(ws);
-}
-
-async::promise<void> run_session(ws_stream & ws);
-
-async::main co_main(int argc, char * argv[])
-{
-  co_await async::with(co_await connect(argv[1]), &run_session, &teardown);
-  co_return 0;
-}
-
-
-
- - - - - - - - - -
1Implement websocket connect & websocket initiation
2Implement an orderly shutdown.
-
-
- - - - - -
- - -The std::exception_ptr is null if the scope is exited without exception. -NOTE: It’s legal for the exit functions to take the exception_ptr by reference and modify it. -
-
-
-
-

async/select.hpp

-
-

The select function can be used to co_await one awaitable out of a set of them.

-
-
-

It can be called as a variadic function with multiple awaitable or as on a range of awaitables.

-
-
-
-
async::promise<void> task1();
-async::promise<void> task2();
-
-async::promise<void> do_wait()
-{
-  co_await async::select(task1(), task2()); (1)
-  std::vector<async::promise<void>> aws {task1(), task2()};
-  co_await async::select(aws); (2)
-}
-
-
-
- - - - - - - - - -
1Wait for a variadic set of awaitables
2wait for a vector of awaitables
-
-
-

The first parameter so select can be a uniform random bit generator.

-
-
-
Signatures of select
-
-
extern promise<void> pv1, pv2;
-std::vector<promise<void>> pvv;
-
-std::mt1337 rdm{1};
-// if everything returns void select returns the index
-std::size_t r1 = co_await select(pv1, pv2);
-std::size_t r2 = co_await select(rdm, pv1, pv2);
-std::size_t r3 = co_await select(pvv);
-std::size_t r4 = co_await select(rdm, pvv);
-
-// variant if not everything is void. void become monostate
-extern promise<int> pi1, pi2;
-variant2::variant<monostate, int, int> r5 = co_await select(pv1, pi1, pi2);
-variant2::variant<monostate, int, int> r6 = co_await select(rdm, pv1, pi1, pi2);
-
-// a range returns a pair of the index and the result if non-void
-std::vector<promise<int>> piv;
-std::pair<std::size_t, int> r7 = co_await select(piv);
-std::pair<std::size_t, int> r8 = co_await select(rdm, piv);
-
-
-
-

Interrupt Wait

-
-

When arguments are passed as rvalue reference, the select will attempt to use .interrupt_await -on the awaitable to detach the not completed awaitables. If supported, the Awaitable must complete immediately. -If the select doesn’t detect the immediate completion, it will send a cancellation.

-
-
-

This means that you can reuse select like this:

-
-
-
-
async::promise<void> do_wait()
-{
-  auto t1 = task1();
-  auto t2 = task2();
-  co_await async::select(t1, t2); (1)
-  co_await async::select(t1, t2); (2)
-}
-
-
-
- - - - - - - - - -
1Wait for the first task to complete
2Wait for the other task to complete
-
-
-

This is supported by promise, generator and gather.

-
-
-

The select will invoke the functions of the awaitable as if used in a co_await expression -or not evaluate them at all.

-
-
-
-

left_select

-
-

The left_select functions are like select but follow a strict left-to-right scan. -This can lead to starvation issues, which is why this is not the recommended default, but can -be useful for prioritization if proper care is taken.

-
-
-
-

Outline

-
-
-
// Concept for the random number generator.
-template<typename G>
-  concept uniform_random_bit_generator =
-    requires ( G & g)
-    {
-      {typename std::decay_t<G>::result_type() } -> std::unsigned_integral; // is an unsigned integer type
-      // T	Returns the smallest value that G's operator() may return. The value is strictly less than G::max(). The function must be constexpr.
-      {std::decay_t<G>::min()} -> std::same_as<typename std::decay_t<G>::result_type>;
-      // T	Returns the largest value that G's operator() may return. The value is strictly greater than G::min(). The function must be constexpr.
-      {std::decay_t<G>::max()} -> std::same_as<typename std::decay_t<G>::result_type>;
-      {g()} -> std::same_as<typename std::decay_t<G>::result_type>;
-    } && (std::decay_t<G>::max() > std::decay_t<G>::min());
-
-
-// Variadic select with a custom random number generator
-template<asio::cancellation_type Ct = asio::cancellation_type::all,
-         uniform_random_bit_generator URBG, awaitable ... Promise>
-awaitable select(URBG && g, Promise && ... p);
-
-// Ranged select with a custom random number generator
-template<asio::cancellation_type Ct = asio::cancellation_type::all,
-         uniform_random_bit_generator URBG, range<awaitable> PromiseRange>
-awaitable select(URBG && g, PromiseRange && p);
-
-// Variadic select with the default random number generator
-template<asio::cancellation_type Ct = asio::cancellation_type::all, awaitable... Promise>
-awaitable select(Promise && ... p);
-
-// Ranged select with the default random number generator
-template<asio::cancellation_type Ct = asio::cancellation_type::all, range<awaitable>>
-awaitable select(PromiseRange && p);
-
-// Variadic left select
-template<asio::cancellation_type Ct = asio::cancellation_type::all, awaitable... Promise>
-awaitable left_select(Promise && ... p);
-
-// Ranged left select
-template<asio::cancellation_type Ct = asio::cancellation_type::all, range<awaitable>>
-awaitable left_select(PromiseRange && p);
-
-
-
-
-
-

async/gather.hpp

-
-

The gather function can be used to co_await multiple awaitables -at once with cancellations being passed through.

-
-
-

The function will gather all completion and return them as system::result, -i.e. capture conceptions as values. One awaitable throwing an exception will not cancel the others.

-
-
-

It can be called as a variadic function with multiple Awaitable or as on a range of awaitables.

-
-
-
-
async::promise<void> task1();
-async::promise<void> task2();
-
-async::promise<void> do_gather()
-{
-  co_await async::gather(task1(), task2()); (1)
-  std::vector<async::promise<void>> aws {task1(), task2()};
-  co_await async::gather(aws); (2)
-}
-
-
-
- - - - - - - - - -
1Wait for a variadic set of awaitables
2Wait for a vector of awaitables
-
-
-

The gather will invoke the functions of the awaitable as if used in a co_await expression.

-
-
-
Signatures of join
-
-
extern promise<void> pv1, pv2;
-std::tuple<system::result<int>, system::result<int>> r1 = co_await gather(pv1, pv2);
-
-std::vector<promise<void>> pvv;
-pmr::vector<system::result<void>> r2 =  co_await gather(pvv);
-
-extern promise<int> pi1, pi2;
-std::tuple<system::result<monostate>,
-           system::result<monostate>,
-           system::result<int>,
-           system::result<int>> r3 = co_await gather(pv1, pv2, pi1, pi2);
-
-std::vector<promise<int>> piv;
-pmr::vector<system::result<int>> r4 = co_await gather(piv);
-
-
-
-

Outline

-
-
-
// Variadic gather
-template<asio::cancellation_type Ct = asio::cancellation_type::all, awaitable... Promise>
-awaitable gather(Promise && ... p);
-
-// Ranged gather
-template<asio::cancellation_type Ct = asio::cancellation_type::all, range<awaitable>>
-awaitable gather(PromiseRange && p);
-
-
-
-
-
-

async/join.hpp

-
-

The join function can be used to co_await multiple awaitable at once with properly connected cancellations.

-
-
-

The function will gather all completion and return them as values, unless an exception is thrown. -If an exception is thrown, all outstanding ops are cancelled (or detached if possible) -and the first exception gets rethrown.

-
-
- - - - - -
- - -void will be returned as variant2::monostate in the tuple, unless all awaitables yield void. -
-
-
-

It can be called as a variadic function with multiple Awaitable or as on a range of awaitables.

-
-
-
-
async::promise<void> task1();
-async::promise<void> task2();
-
-async::promise<void> do_join()
-{
-  co_await async::join(task1(), task2()); (1)
-  std::vector<async::promise<void>> aws {task1(), task2()};
-  co_await async::join(aws); (2)
-}
-
-
-
- - - - - - - - - -
1Wait for a variadic set of awaitables
2Wait for a vector of awaitables
-
-
-

The join will invoke the functions of the awaitable as if used in a co_await expression.

-
-
-
Signatures of join
-
-
extern promise<void> pv1, pv2;
-/* void */ co_await join(pv1, pv2);
-
-std::vector<promise<void>> pvv;
-/* void */ co_await join(pvv);
-
-extern promise<int> pi1, pi2;
-std::tuple<monostate, monostate, int, int> r1 = co_await join(pv1, pv2, pi1, pi2);
-
-std::vector<promise<int>> piv;
-pmr::vector<int> r2 = co_await join(piv);
-
-
-
-

Outline

-
-
-
// Variadic join
-template<asio::cancellation_type Ct = asio::cancellation_type::all, awaitable... Promise>
-awaitable join(Promise && ... p);
-
-// Ranged join
-template<asio::cancellation_type Ct = asio::cancellation_type::all, range<awaitable>>
-awaitable join(PromiseRange && p);
-
-
-
-
-
-

async/wait_group.hpp

-
-

The wait_group function can be used to manage -multiple coroutines of type promise<void>. -It works out of the box with async/with.hpp, by having the matching await_exit member.

-
-
-

Essentially, a wait_group is a dynamic list of -promises that has a select function (wait_one), -a gather function (wait_all) and will clean up on scope exit.

-
-
-
-
struct wait_group
-{
-    // create a wait_group
-    explicit
-    wait_group(asio::cancellation_type normal_cancel = asio::cancellation_type::none,
-               asio::cancellation_type exception_cancel = asio::cancellation_type::all);
-
-    // insert a task into the group
-    void push_back(promise<void> p);
-
-    // the number of tasks in the group
-    std::size_t size() const;
-    // remove completed tasks without waiting (i.e. zombie tasks)
-    std::size_t reap();
-    // cancel all tasks
-    void cancel(asio::cancellation_type ct = asio::cancellation_type::all);
-    // wait for one task to complete.
-    wait_one_op wait_one();
-    // wait for all tasks to complete
-    wait_op wait();
-    // wait for all tasks to complete
-    wait_op operator co_await ();
-    // when used with with , this will receive the exception
-    // and wait for the completion
-    // if ep is set, this will use the exception_cancel level,
-    // otherwise the normal_cancel to cancel all promises.
-    wait_op await_exit(std::exception_ptr ep);
-};
-
-
-
-
-

async/spawn.hpp

-
-

The spawn functions allow to use task directly with asio:

-
-
-
-
auto spawn(                            task<T>    && t, CompletionToken&& token);
-auto spawn(asio::io_context & context, task<T>    && t, CompletionToken&& token);
-auto spawn(Executor executor,          task<T>    && t, CompletionToken&& token);
-
-
-
-

Spawn will post both ways, so it is safe to use task to run the task -on another executor and consume the result on the current one with use_op.

-
-
-

Example

-
-
-
async::task<int> work();
-
-int main(int argc, char *argv[])
-{
-  asio::io_context ctx{BOOST_ASIO_CONCURRENCY_HINT_1};
-  auto f = spawn(ctx, work(), asio::use_future);
-  ctx.run();
-
-  return f.get();
-}
-
-
-
- - - - - -
- - -The caller needs to make sure that the executor is not running on multiple threads -concurrently, e,g, by using a single-threaded context. -
-
-
-
-
-

async/run.hpp

-
-

The run function is similar to spawn but running synchronously. -It will internally setup an execution context and the memory resources.

-
-
-

This can be useful when integrating a piece of async code into a synchronous application.

-
-
-

Outline

-
-
-
// Run the task and return it's value or rethrow any exception.
-T run(task<T> t);
-
-
-
-
-

Example

-
-
-
async::task<int> work();
-
-int main(int argc, char *argv[])
-{
-  return run(work());
-}
-
-
-
-
-
-

async/thread.hpp

-
-

The thread type is another way to create an environment that is similar to main, but doesn’t use a signal_set.

-
-
-
-
async::thread my_thread()
-{
-  auto exec = co_await async::this_coro::executor;             (1)
-  asio::steady_timer tim{exec, std::chrono::milliseconds(50)}; (2)
-  co_await tim.async_wait(async::use_op);                      (3)
-  co_return 0;
-}
-
-
-
- - - - - - - - - - - - - -
1get the executor thread running on
2Use it with an asio object
3co_await an async operation
-
-
-

To use a thread you can use it like a std::thread:

-
-
-
-
int main(int argc, char * argv[])
-{
-  auto thr = my_thread();
-  thr.join();
-  return 0;
-}
-
-
-
-

A thread is also an awaitable (including cancellation).

-
-
-
-
async::main co_main(int argc, char * argv[])
-{
-  auto thr = my_thread();
-  co_await thr;
-  co_return 0;
-}
-
-
-
- - - - - -
- - -Destructing a detached thread will cause a hard stop (io_context::stop) and join the thread. -
-
-
- - - - - -
- - -Nothing in this library, except for awaiting a async/thread.hpp and async/spawn.hpp, is thread-safe. -If you need to transfer data across threads, you’ll need a thread-safe utility like asio::conrurrenct_channel. -You cannot share any async primitives between threads, -with the sole exception of being able to spawn a task onto another thread’s executor. -
-
-
-

Executor

-
-

It will also create an asio::io_context to run on, which you can get through the this_coro::executor. -It will be assigned to the async::this_thread::get_executor() .

-
-
-
-

Memory Resource

-
-

It also creates a memory resource that will be used as a default for internal memory allocations. -It will be assigned to the thread_local to the async::this_thread::get_default_resoruce().

-
-
-
-

Outline

-
-
-
struct thread
-{
-  // Send a cancellation signal
-  void cancel(asio::cancellation_type type = asio::cancellation_type::all);
-
-  // Add the functions similar to `std::thread`
-  void join();
-  bool joinable() const;
-  void detach();
-  // Allow the thread to be awaited
-  auto operator co_await() &-> detail::thread_awaitable; (1)
-  auto operator co_await() && -> detail::thread_awaitable; (2)
-
-  // Stops the io_context & joins the executor
-  ~thread();
-  /// Move constructible
-  thread(thread &&) noexcept = default;
-
-  using executor_type = executor;
-
-  using id = std::thread::id;
-  id get_id() const noexcept;
-
-  executor_type get_executor() const;
-};
-
-
-
- - - - - - - - - -
1Supports Interrupt Wait
2Always forward cancel
-
-
-
-

Promise

-
-

The thread promise has the following properties.

-
- -
-
-
-

async/async_for.hpp

-
-

For types like generators a BOOST_ASYNC_FOR macro is provided, to emulate an async for loop.

-
-
-
-
async::generator<int> gen();
-
-async::main co_main(int argc, char * argv[])
-{
-    BOOST_ASYNC_FOR(auto i, gen())
-        printf("Generated value %d\n", i);
-
-    co_return 0;
-}
-
-
-
-

The requirement is that the awaitable used in the for loop has an operator bool to check if it -can be awaited again. This is the case for generator and promise.

-
-
-
-

async/error.hpp

-
-

In order to make errors easier to manage, async provides an error_category to be used with -boost::system::error_code.

-
-
-
-
enum class error
-{
-  moved_from,
-  detached,
-  completed_unexpected,
-  wait_not_ready,
-  already_awaited,
-  allocation_failed
-};
-
-system::error_category & async_category();
-system::error_code make_error_code(error e);
-
-
-
-
-

async/config.hpp

-
-

The config adder allows to config some implementation details of boost.async.

-
-
-

executor_type

-
-

The executor type defaults to boost::asio::any_io_executor.

-
-
-

You can set it to boost::asio::any_io_executor by defining BOOST_ASYNC_CUSTOM_EXECUTOR -and adding a boost::async::executor type yourself.

-
-
-

Alternatively, BOOST_ASYNC_USE_IO_CONTEXT can be defined -to set the executor to boost::asio::io_context::executor_type.

-
-
-
-

pmr

-
-

Boost.async can be used with different pmr implementations, defaulting to std::pmr.

-
-
-

The following macros can be used to configure it:

-
-
-
    -
  • -

    BOOST_ASYNC_USE_STD_PMR

    -
  • -
  • -

    BOOST_ASYNC_USE_BOOST_CONTAINER_PMR

    -
  • -
  • -

    BOOST_ASYNC_USE_CUSTOM_PMR

    -
  • -
-
-
-

If you define BOOST_ASYNC_USE_CUSTOM_PMR you will need to provide a boost::async::pmr namespace, -that is a drop-in replacement for std::pmr.

-
-
-

Alternatively, the pmr use can be disabled with

-
-
-
    -
  • -

    BOOST_ASYNC_NO_PMR.

    -
  • -
-
-
-

In this case, async will use a non-pmr monotonic resource for the -synchronization functions (select, gather and join).

-
-
-

use_op uses a small-buffer-optimized resource which’s size can be set by defining -BOOST_ASYNC_SBO_BUFFER_SIZE and defaults to 4096 bytes.

-
-
-
-
-

async/leaf.hpp

-
-

Async provides integration with boost.leaf. -It provides functions similar to leaf that take an awaitables -instead of a function object and return an awaitable.

-
-
-
-
template<awaitable TryAwaitable, typename ... H >
-auto try_catch(TryAwaitable && try_coro, H && ... h );
-
-template<awaitable TryAwaitable, typename ... H >
-auto try_handle_all(TryAwaitable && try_coro, H && ... h );
-
-template<awaitable TryAwaitable, typename ... H >
-auto try_handle_some(TryAwaitable && try_coro, H && ... h );
-
-
-
-

See the leaf documentation for details.

-
- -
-
-
-
-

Technical Background

-
-
-

Stackless

-
-

C++20 coroutines are stackless, meaning they don’t have their own stack.

-
-
-

A stack in C++ describes the callstack, i.e. all the function frames stacked. -A function frame is the memory a function needs to operate, i.e. a slice of memory -to store its variables and information such as the return address.

-
-
- - - - - -
- - -The size of a function frame is known at compile time, but not outside the compile unit containing its definition. -
-
-
-
-
int bar() {return 0;} // the deepest point of the stack
-int foo() {return bar();}
-
-int main()
-{
-    return bar();
-}
-
-
-
-

The call stack in the above example is:

-
-
-
-
main()
-  foo()
-    bar()
-
-
-
-
-Diagram -
-
-
-

Coroutines can be implemented a stackful, which means that it allocates a fixes chunk of memory and stacks function frames similar to a thread. -C++20 coroutines are stackless, i.e. they only allocate their own frame and use the callers stack on resumption. Using our previous example:

-
-
-
-
fictional_eager_coro_type<int> example()
-{
-    co_yield 0;
-    co_yield 1;
-}
-
-void nested_resume(fictional_eager_coro_type<int>& f)
-{
-    f.resume();
-}
-
-int main()
-{
-    auto f = example();
-    nested_resume(f);
-    f.reenter();
-    return 0;
-}
-
-
-
-

This will yield a call stack similar to this:

-
-
-
-
main()
-  f$example()
-  nested_resume()
-    f$example()
-  f$example()
-
-
-
-
-Diagram -
-
-
-

The same applies if a coroutine gets moved accross threads.

-
-
-
-

Lazy & eager

-
-

Coroutines are lazy if they only start execution of its code after it gets resumed, while an eager one will execute right-away until its first suspension point (i.e. a co_await, co_yield or co_return statement.)

-
-
-
-
lazy_coro co_example()
-{
-    printf("Entered coro\n");
-    co_yield 0;
-    printf("Coro done\n");
-}
-
-int main()
-{
-    printf("enter main\n");
-    auto lazy = co_example();
-    printf("constructed coro\n");
-    lazy.resume();
-    printf("resumed once\n");
-    lazy.resume();
-    printf("resumed twice\n");
-    return 0;
-}
-
-
-
-

Which will produce output like this:

-
-
-
-
enter main
-constructed coro
-Entered coro
-resumed once
-Coro Done
-resumed twice
-
-
-
-
-Diagram -
-
-
-

Whereas an eager coro would look like this:

-
-
-
-
eager_coro co_example()
-{
-    printf("Entered coro\n");
-    co_yield 0;
-    printf("Coro done\n");
-}
-
-int main()
-{
-    printf("enter main\n");
-    auto lazy = co_example();
-    printf("constructed coro\n");
-    lazy.resume();
-    printf("resumed once\n");
-    return 0;
-}
-
-
-
-

Which will produce output like this:

-
-
-
-
enter main
-Entered coro
-constructed coro
-resume once
-Coro Done
-
-
-
-
-Diagram -
-
-
-
-
-
-

Benchmarks

-
-
-

Run on 11th Gen Intel® Core™ i7-1185G7 @ 3.00GHz

-
-
-

Posting to an executor

-
-

The benchmark is running the following code, with async’s task, asio::awaitable and `asio’s -stackful coroutine (boost.context) based.

-
-
-
-
async::task<void> atest()
-{
-  for (std::size_t i = 0u; i < n; i++)
-    co_await asio::post(async::use_op);
-}
-
-
- - ----- - - - - - - - - - - - - - - - - - - - - - - - - -
Table 4. results for 50M times in ms
gcc 12clang 16

async

2472

2098

awaitable

2432

2253

stackful

3655

3725

-
-
-

Running noop coroutine in parallel

-
-

This benchmark uses an asio::experimental::channel that has a size of zero, -to read & write in parallel to it. It uses gather with async -and an awaitable_operator in the asio::awaitable.

-
-
-
-
async::task<void> atest()
-{
-  asio::experimental::channel<void(system::error_code)> chan{co_await async::this_coro::executor, 0u};
-  for (std::size_t i = 0u; i < n; i++)
-    co_await async::gather(
-              chan.async_send(system::error_code{}, async::use_task),
-              chan.async_receive(async::use_task));
-}
-
-asio::awaitable<void> awtest()
-{
-  asio::experimental::channel<void(system::error_code)> chan{co_await async::this_coro::executor, 0u};
-  using boost::asio::experimental::awaitable_operators::operator&&;
-  for (std::size_t i = 0u; i < n; i++)
-    co_await (
-        chan.async_send(system::error_code{}, asio::use_awaitable)
-        &&
-        chan.async_receive(asio::use_awaitable));
-}
-
-
- - ----- - - - - - - - - - - - - - - - - - - - -
Table 5. results for 3M times in ms
gcc 12clang 16

async

1563

1468

awaitable

2800

2805

-
-
-

Immediate

-
-

This benchmark utilizes the immediate completion, by using a channel -with a size of 1, so that every operation is immediate.

-
-
-
-
async::task<void> atest()
-{
-  asio::experimental::channel<void(system::error_code)> chan{co_await async::this_coro::executor, 1u};
-  for (std::size_t i = 0u; i < n; i++)
-  {
-    co_await chan.async_send(system::error_code{}, async::use_op);
-    co_await chan.async_receive(async::use_op);
-  }
-}
-
-
- - ----- - - - - - - - - - - - - - - - - - - - - - - - - -
Table 6. result for 10M times in ms
gcc 12clang 16

async

1810

1864

awaitable

3109

4110

stackful

3922

4705

-
-
-

Channels

-
-

As In this benchmark asio::experimental::channel and async::channel get compared.

-
-
-

This si similar to the parallel test, but uses the async::channel instead.

-
- - ----- - - - - - - - - - - - - - - - - - - - - - - - - -
Table 7. result of running the test 3M times in ms
gccclang

async

500

350

awaitable

790

770

stackful

867

907

-
-
-
-
-

Compiler support

-
-
-

This library is supported since Clang 14, Gcc 10 & MSVC 19.28.

-
-
- - - - - -
- - -Gcc versions 12.1 and 12.2 appear to have a bug for coroutines with out stack variables -as can be seen [here](https://godbolt.org/z/6adGcqP1z) and should be avoided for coroutines. -
-
-
-

Clang only added std::pmr support in 16, so older clang versions use boost::contianer::pmr as a drop-in replacement.

-
-
- - - - - -
- - -Some if not all MSVC versions have a broken coroutine implementation, -that this library needs to workaround. This may cause non-deterministic behaviour and overhead. -
-
-
-

A coroutine continuation may be done in the awaitable returned from a final_suspend, like this:

-
-
-
-
// in promise
-auto final_suspend() noexcept
-{
-    struct final_awaitable
-    {
-      std::coroutine_handle<void> continuation{std::noop_coroutine()}; (1)
-      bool await_ready() const noexcept;
-      std::coroutine_handle<void> await_suspend(std::coroutine_handle<void> h) noexcept
-      {
-        auto cc = continuation;
-        h.destroy(); (2)
-        return cc;
-      }
-
-      void await_resume() noexcept {}
-    };
-    return final_awaitable{my_continuation};
-};
-
-
-
- - - - - - - - - -
1The continuation
2Self destroying the coroutine before continuation
-
-
-

The final_suspend doesnt not properly suspend the coroutine on MSVC, so that the h.destroy() will cause -double destruction of elements on the coroutine frame. -Therefor, msvc will need to post the destruction, to do it out of line. -This will cause overhead and make the actual freeing of memory indeterministic.

-
-
-
-
-
-
-
-1. the promise the C++ name for a coroutine state. Not to be confused with async/promise.hpp +
diff --git a/motivation.html b/motivation.html new file mode 100644 index 00000000..b2976790 --- /dev/null +++ b/motivation.html @@ -0,0 +1,544 @@ + + + + + + + + +Documentation boost.async + + + + + + + +
+
+

Motivation

+
+
+

Many languages provide a simple programming languages +like node.js and python provide easy to use single-threaded concurrency frameworks. +While more complex than synchronous code, +single threaded asynchronicity avoids many of the pitfalls & overhead of multi-threading.

+
+
+

That is, one coroutine can work, while others wait for events (e.g. a response from a server). +This allows to write applications that do multiple things simultaneously on a single thread.

+
+
+

This library is meant to provide this to C++: simple single threaded asynchronicity +akin to node.js and asyncio in python that works with existing libraries like +boost.beast, boost.mysql or boost.redis.

+
+
+

It takes a collection of concepts from other languages and provides them based on C++20 coroutines.

+
+
+ +
+
+

Unlike asio::awaitable and asio::experimental::coro, async coroutines are open. +That is, an asio::awaitable can only await and be awaited by other asio::awaitable +and does not provide coroutine specific synchronization mechanisms.

+
+
+

async on the other hand provides a coroutine specific channel +and different wait types (select, gather etc.) that are optimized +to work with coroutines and awaitables.

+
+
+
+ +
+ + + \ No newline at end of file diff --git a/overview.html b/overview.html new file mode 100644 index 00000000..18a00332 --- /dev/null +++ b/overview.html @@ -0,0 +1,564 @@ + + + + + + + + +Documentation boost.async + + + + + + + +
+
+

Overview

+
+
+

Here’s a list of relevant featuers in async:

+
+ + ++++ + + + + + + + + + + + + + + + + + + +
Table 1. Coroutine types

promise

An eager coroutine returning a single result- consider it the default

generator

An eager coroutine that can yield multiple values.

task

A lazy version of promise that can be spawned onto other executors.

detached

A coroutine similar to promise, without a handle

+ + ++++ + + + + + + + + + + + + + + + + + + +
Table 2. Synchronization Functions

select

A function that waits for one coroutine out of a set that is ready in a pseudo-random way, to avoid starvation.

join

A function that waits for a set of coroutines and returns all of them as value or throws an exception if any awaitable does so.

gather

A function that waits for a set of coroutines and returns all of them as result, capturing all exceptions individually.

left_select

A deterministic select that evaluates left-to-right.

+ + ++++ + + + + + + + + + + +
Table 3. Utilities

channel

A thread-local utility to send values between coroutines.

with

An async RAII helper, that allows async teardown when exceptions occur

+
+
+ +
+ + + \ No newline at end of file diff --git a/reference.html b/reference.html new file mode 100644 index 00000000..de7f6d8c --- /dev/null +++ b/reference.html @@ -0,0 +1,3148 @@ + + + + + + + + +Documentation boost.async + + + + + + + + +
+
+

Reference

+
+
+

async/main.hpp

+
+

The easiest way to get started with an async application is to use the co_main function with the following signature:

+
+
+
+
async::main co_main(int argc, char *argv[]);
+
+
+
+

Declaring co_main will add a main function that performs all the necessary steps to run a coroutine +on an event loop. +This allows us to write a very simple asynchronous programs;

+
+
+
+
async::main co_main(int argc, char *argv[])
+{
+  auto exec = co_await async::this_coro::executor;             (1)
+  asio::steady_timer tim{exec, std::chrono::milliseconds(50)}; (2)
+  co_await tim.async_wait(async::use_op);                      (3)
+  co_return 0;
+}
+
+
+
+ + + + + + + + + + + + + +
1get the executor main running on
2Use it with an asio object
3co_await an async operation
+
+
+

The main promise will create an asio::signal_set and uses it for cancellation. +SIGINT becomes total , while SIGTERM becomes terminal cancellation.

+
+
+ + + + + +
+ + +The cancellation will not be forwarded to detached coroutines. +The user will need to take care to end then on cancellation, +since the program otherwise doesn’t allow graceful termination. +
+
+
+

Executor

+
+

It will also create an asio::io_context to run on, which you can get through the this_coro::executor. +It will be assigned to the async::this_thread::get_executor() .

+
+
+
+

Memory Resource

+
+

It also creates a memory resource that will be used as a default for internal memory allocations. +It will be assigned to the thread_local to the async::this_thread::get_default_resoruce().

+
+
+
+

Promise

+
+

Every coroutine has an internal state, called promise (not to be confused with the async::promise). +Depending on the coroutine properties different things can be co_await-ed, like we used in the example above.

+
+
+

They are implemented through inheritance, and shared among different promise types

+
+
+

The main promise has the following properties.

+
+ +
+
+

Specification

+
+
    +
  1. +

    declaring co_main will implicitly declare a main function

    +
  2. +
  3. +

    main is only present when co_main is defined.

    +
  4. +
  5. +

    SIGINT and SIGTERM will cause cancellation of the internal task.

    +
  6. +
+
+
+
+
+

async/promise.hpp

+
+

A promise is an eager coroutine that can co_await and co_return values. That is, it cannot use co_yield.

+
+
+
+
async::promise<void> delay(std::chrono::milliseconds ms)
+{
+  asio::steady_timer tim{co_await async::this_coro::executor, ms};
+  co_await tim.async_wait(async::use_op);
+}
+
+async::main co_main(int argc, char *argv[])
+{
+  co_await delay(std::chrono::milliseconds(50));
+  co_return 0;
+}
+
+
+
+

Promises can also be used to spawn tasks easily.

+
+
+

Promises are by default attached. +This means, that a cancellation is sent when the promise handles goes out of scope.

+
+
+

A promise can be detached by calling detach or by using the prefix + operator.

+
+
+
+
async::promise<void> my_task();
+
+async::main co_main(int argc, char *argv[])
+{
+  +my_task(); (1)
+  co_await delay(std::chrono::milliseconds(50));
+  co_return 0;
+}
+
+
+
+ + + + + +
1By using + the task gets detached. Without it, the compiler would generate a nodiscard warning.
+
+
+

Executor

+
+

The executor is taken from the thread_local get_executor function, unless a asio::executor_arg is used +in any position followed by the executor argument.

+
+
+
+
async::promise<int> my_gen(asio::executor_arg_t, asio::io_context::executor_type exec_to_use);
+
+
+
+
+

Memory Resource

+
+

The memory resource is taken from the thread_local get_default_resource function, +unless a std::allocator_arg is used in any position followed by a polymorphic_allocator argument.

+
+
+
+
async::promise<int> my_gen(std::allocator_arg_t, pmr::polymorphic_allocator<void> alloc);
+
+
+
+
+

Outline

+
+
+
template<typename Return>
+struct [[nodiscard]] promise
+{
+    promise(promise &&lhs) noexcept;
+    promise& operator=(promise && lhs) noexcept;
+
+    // enable `co_await`. (1)
+    auto operator co_await ();
+
+    // Ignore the return value, i.e. detach it. (2)
+    void operator +() &&;
+
+    // Cancel the promise.
+    void cancel(asio::cancellation_type ct = asio::cancellation_type::all);
+
+    // Check if the result is ready
+    bool ready() const;
+    // Check if the promise can be awaited.
+    explicit operator bool () const; (3)
+
+    // Detach or attach
+    bool attached() const;
+    void detach();
+    void attach();
+    // Get the return value if ready - otherwise throw
+    Return get();
+};
+
+
+
+ + + + + + + + + + + + + +
1Supports Interrupt Wait
2This allows to spawn promised with a simple +my_task() expression.
3This allows code like while (p) co_await p;
+
+
+
+

Promise

+
+

The coroutine promise (promise::promise_type) has the following properties.

+
+ +
+
+
+

async/generator.hpp

+
+

A generator is an eager coroutine that can co_await and co_yield values to the caller.

+
+
+
+
async::generator<int> example()
+{
+  printf("In coro 1\n");
+  co_yield 2;
+  printf("In coro 3\n");
+  co_return 4;
+}
+
+async::main co_main(int argc, char * argv[])
+{
+  printf("In main 0\n");
+  auto f = example(); // call and let it run until the first co_yield
+  printf("In main 1\n");
+  printf("In main %d\n", co_await f);
+  printf("In main %d\n", co_await f);
+  return 0;
+}
+
+
+
+

Which will generate the following output

+
+
+
+
In main 0
+In coro 1
+In main 1
+In main 2
+In coro 3
+In main 4
+
+
+
+
+Diagram +
+
+
+

Values can be pushed into the generator, when Push (the second template parameter) is set to non-void:

+
+
+
+
async::generator<int, int> example()
+{
+  printf("In coro 1\n");
+  int i =  co_yield 2;
+  printf("In coro %d\n");
+  co_return 4;
+}
+
+async::main co_main(int argc, char * argv[])
+{
+  printf("In main 0\n");
+  auto f = example(); // call and let it run until the first co_yield
+  printf("In main %d\n", co_await f(3)); (1)
+  return 0;
+}
+
+
+
+ + + + + +
1The pushed value gets passed through operator() to the result of co_yield.
+
+
+

Which will generate the following output

+
+
+
+
In main 0
+In coro 1
+In main 2
+Pushed 2
+In coro 3
+In main 4
+
+
+
+

Lazy

+
+

A generator can be turned lazy by awaiting initial. +This co_await expression will produce the Push value. +This means the generator will wait until it’s awaited for the first time, +and then process the newly pushed value and resume at the next co_yield.

+
+
+
+
async::generator<int, int> example()
+{
+  int v = co_await async::this_coro::initial;
+  printf("In coro %d\n", v);
+  co_yield 2;
+  printf("In coro %d\n", v);
+  co_return 4;
+}
+
+async::main co_main(int argc, char * argv[])
+{
+  printf("In main 0\n");
+  auto f = example(); // call and let it run until the first co_yield
+  printf("In main 1\n"); // < this is now before the co_await initial
+  printf("In main %d\n", co_await f(1));
+  printf("In main %d\n", co_await f(3));
+  return 0;
+}
+
+
+
+

Which will generate the following output

+
+
+
+
In main 0
+In main 1
+In coro 1
+In main 2
+In coro 3
+In main 4
+
+
+
+
+Diagram +
+
+
+
+

Executor

+
+

The executor is taken from the thread_local get_executor function, unless a asio::executor_arg is used +in any position followed by the executor argument.

+
+
+
+
async::generator<int> my_gen(asio::executor_arg_t, asio::io_context::executor_type exec_to_use);
+
+
+
+
+

Memory Resource

+
+

The memory resource is taken from the thread_local get_default_resource function, +unless a std::allocator_arg is used in any position followed by a polymorphic_allocator argument.

+
+
+
+
async::generator<int> my_gen(std::allocator_arg_t, pmr::polymorphic_allocator<void> alloc);
+
+
+
+
+

Outline

+
+
+
template<typename Yield, typename Push = void>
+struct [[nodiscard]] generator
+{
+  // Movable
+
+  generator(generator &&lhs) noexcept = default;
+  generator& operator=(generator &&) noexcept = default;
+
+  // True until it co_returns & is co_awaited after (1)
+  explicit operator bool() const;
+
+  // Cancel the generator. (3)
+  void cancel(asio::cancellation_type ct = asio::cancellation_type::all);
+
+  // Check if a value is available
+  bool ready() const;
+
+  // Get the return value. Throws if not ready.
+  Yield get();
+
+  // Cancel & detach the generator.
+  ~generator();
+
+  // an awaitable that results in value of Yield.
+  using generator_awaitable = unspecified;
+
+  // Present when Push != void
+  generator_awaitable operator()(      Push && push);
+  generator_awaitable operator()(const Push &  push);
+
+  // Present when Push == void, i.e. can co_await the generator directly.
+  generator_awaitable operator co_await (); (2)
+
+};
+
+
+
+ + + + + + + + + + + + + +
1This allows code like while (gen) co_await gen:
2Supports Interrupt Wait
3A cancelled generator maybe be resumable
+
+
+
+

Promise

+
+

The generator promise has the following properties.

+
+ +
+
+
+

async/task.hpp

+
+

A task is a lazy coroutine that can co_await and co_return values. That is, it cannot use co_yield.

+
+
+
+
async::task<void> delay(std::chrono::milliseconds ms)
+{
+  asio::steady_timer tim{co_await async::this_coro::executor, ms};
+  co_await tim.async_wait(async::use_op);
+}
+
+async::main co_main(int argc, char *argv[])
+{
+  co_await delay(std::chrono::milliseconds(50));
+  co_return 0;
+}
+
+
+
+

Unlike a promise, a task can be awaited or spawned on another executor than it was created on.

+
+
+

Executor

+
+

Since a task it lazy, it does not need to have an executor on construction. +It rather attempts to take it from the caller or awaiter if present. +Otherwise, it’ll default to the thread_local executor.

+
+
+
+

Memory Resource

+
+

The memory resource is NOT taken from the thread_local get_default_resource function, +but pmr::get_default_resource(), +unless a `std::allocator_arg is used in any position followed by a polymorphic_allocator argument.

+
+
+
+
async::task<int> my_gen(std::allocator_arg_t, pmr::polymorphic_allocator<void> alloc);
+
+
+
+
+

Outline

+
+
+
template<typename Return>
+struct [[nodiscard]] task
+{
+    task(task &&lhs) noexcept = default;
+    task& operator=(task &&) noexcept = default;
+
+    // enable `co_await`
+    auto operator co_await ();
+
+};
+
+
+
+ + + + + +
+ + +Tasks can be used synchronously from a sync function by calling run(my_task()). +
+
+
+
+

Promise

+
+

The task promise has the following properties.

+
+ +
+
+

use_task

+
+

The use_task completion token can be used to create a task from an async_ function. +This is less efficient than use_op as it needs to allocate a coroutine frame, +but has an obvious return type and support Interrupt Wait.

+
+
+
+
+

async/detached.hpp

+
+

A detached is an eager coroutine that can co_await but not co_return values. +That is, it cannot be resumed and is usually not awaited.

+
+
+
+
async::detached delayed_print(std::chrono::milliseconds ms)
+{
+  asio::steady_timer tim{co_await async::this_coro::executor, ms};
+  co_await tim.async_wait(async::use_op);
+  printf("Hello world\n");
+}
+
+async::main co_main(int argc, char *argv[])
+{
+  delayed_print();
+  co_return 0;
+}
+
+
+
+

Promises are mainly used to spawn tasks easily.

+
+
+
+
async::detached my_task();
+
+async::main co_main(int argc, char *argv[])
+{
+  my_task(); (1)
+  co_await delay(std::chrono::milliseconds(50));
+  co_return 0;
+}
+
+
+
+ + + + + +
1Spawn off the detached coro.
+
+
+

A detached can assign itself a new cancellation source like this:

+
+
+
+
async::detached my_task(asio::cancellation_slot sl)
+{
+   co_await this_coro::reset_cancellation_source(sl);
+   // do somework
+}
+
+async::main co_main(int argc, char *argv[])
+{
+  asio::cancellation_signal sig;
+  my_task(sig.slot()); (1)
+  co_await delay(std::chrono::milliseconds(50));
+  sig.emit(asio::cancellation_type::all);
+  co_return 0;
+}
+
+
+
+

Executor

+
+

The executor is taken from the thread_local get_executor function, unless a asio::executor_arg is used +in any position followed by the executor argument.

+
+
+
+
async::detached my_gen(asio::executor_arg_t, asio::io_context::executor_type exec_to_use);
+
+
+
+
+

Memory Resource

+
+

The memory resource is taken from the thread_local get_default_resource function, +unless a std::allocator_arg is used in any position followed by a polymorphic_allocator argument.

+
+
+
+
async::detached my_gen(std::allocator_arg_t, pmr::polymorphic_allocator<void> alloc);
+
+
+
+
+

Outline

+
+
+
struct detached {};
+
+
+
+ + + + + +
1Supports Interrupt Wait
+
+
+
+

Promise

+
+

The thread detached has the following properties.

+
+ +
+
+
+

async/op.hpp

+
+

An async operation is an awaitable wrapping an asio operation.

+
+
+

E.g. this is an async_operation with the completion signature void().

+
+
+
+
auto op = asio::post(ctx, async::use_op);
+
+
+
+

Or the async_operation can be templated like this:

+
+
+
+
auto op = [&ctx](auto token) {return asio::post(ctx, std::move(token)); };
+
+
+
+

use_op

+
+

The use_op token is the direct to create an op, +i.e. using async::use_op as the completion token will create the required awaitable.

+
+
+

It also supports defaults_on so that async_ops can be awaited without the token:

+
+
+
+
auto tim = async::use_op.as_default_on(asio::steady_timer{co_await async::this_coro::executor});
+co_await tim.async_wait();
+
+
+
+

Depending on the completion signature the co_await expression may throw.

+
+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SignatureReturn typeException

void()

void

noexcept

void(T)

T

noexcept

void(T…​)

std::tuple<T…​>

noexcept

void(system::error_code, T)

T

system::system_error

void(system::error_code, T…​)

std::tuple<T…​>

system::system_error

void(std::exception_ptr, T)

T

any exception

void(std::exception_ptr, T…​)

std::tuple<T…​>

any exception

+
+ + + + + +
+ + +use_op will never complete immediately, i.e. await_ready will always return false, but always suspend the coroutine. +
+
+
+
+

Hand coded Operations

+
+

Operations are a more advanced implementation of the async/op.hpp feature.

+
+
+

This library makes it easy to create asynchronous operations with an early completion condition, +i.e. a condition that avoids suspension of coroutines altogether.

+
+
+

We can for example create a wait_op that does nothing if the timer is already expired.

+
+
+
+
struct wait_op : async::op<system::error_code> (1)
+{
+  asio::steady_timer & tim;
+
+  wait_op(asio::steady_timer & tim) : tim(tim) {}
+
+  bool ready(async::handler<system::error_code> ) (2)
+  {
+    if (tim.expiry() < std::chrono::steady_clock::now())
+        h(system::error_code{});
+  }
+  void initiate(async::completion_handler<system::error_code> complete) (3)
+  {
+    tim.async_wait(std::move(complete));
+  }
+};
+
+
+
+ + + + + + + + + + + + + +
1Inherit op with the matching signature await_transform picks it up
2Check if the operation is ready - called from await_ready
3Initiate the async operation if its not ready.
+
+
+
+
+

async/concepts.hpp

+
+

Awaitable

+
+

An awaitable is an expression that can be used with co_await.

+
+
+
+
template<typename Awaitable, typename Promise = void>
+concept awaitable_type = requires (Awaitable aw, std::coroutine_handle<Promise> h)
+{
+    {aw.await_ready()} -> std::convertible_to<bool>;
+    {aw.await_suspend(h)};
+    {aw.await_resume()};
+};
+
+template<typename Awaitable, typename Promise = void>
+concept awaitable =
+        awaitable_type<Awaitable, Promise>
+    || requires (Awaitable && aw) { {std::forward<Awaitable>(aw).operator co_await()} -> awaitable_type<Promise>;}
+    || requires (Awaitable && aw) { {operator co_await(std::forward<Awaitable>(aw))} -> awaitable_type<Promise>;};
+
+
+
+ + + + + +
+ + +awaitables in this library require that the coroutine promise +return their executor by const reference if they provide one. Otherwise it’ll use this_thread::get_executor(). +
+
+
+
+

Enable awaitables

+
+

Inheriting enable_awaitables will enable a coroutine to co_await anything through await_transform +that would be co_await-able in the absence of any await_transform.

+
+
+
+
+

async/this_coro.hpp

+
+

The this_coro namespace provides utilities to access the internal state of a coroutine promise.

+
+
+

Pseudo-awaitables:

+
+
+
+
// Awaitable type that returns the executor of the current coroutine.
+struct executor_t {}
+constexpr executor_t executor;
+
+// Awaitable type that returns the cancellation state of the current coroutine.
+struct cancellation_state_t {};
+constexpr cancellation_state_t cancellation_state;
+
+// Reset the cancellation state with custom or default filters.
+constexpr unspecified reset_cancellation_state();
+template<typename Filter>
+constexpr unspecified reset_cancellation_state(
+    Filter && filter);
+template<typename InFilter, typename OutFilter>
+constexpr unspecified reset_cancellation_state(
+    InFilter && in_filter,
+    OutFilter && out_filter);
+
+// get & set the throw_if_cancelled setting.
+unspecified throw_if_cancelled();
+unspecified throw_if_cancelled(bool value);
+
+// Set the cancellation source in a detached.
+unspecified reset_cancellation_source();
+unspecified reset_cancellation_source(asio::cancellation_slot slot);
+
+
+// get the allocator the promise
+struct allocator_t {};
+constexpr allocator_t allocator;
+
+// get the current cancellation state-type
+struct cancelled_t {};
+constexpr cancelled_t cancelled;
+
+// set the over-eager mode of a generator
+struct initial_t {};
+constexpr initial_t initial;
+
+
+
+

Await Allocator

+
+

The allocator of a coroutine supporting enable_await_allocator can be obtained the following way:

+
+
+
+
co_await async::this_coro::allocator;
+
+
+
+

In order to enable this for your own coroutine you can inherit enable_await_allocator with the CRTP pattern:

+
+
+
+
struct my_promise : async::enable_await_allocator<my_promise>
+{
+  using allocator_type = __your_allocator_type__;
+  allocator_type get_allocator();
+};
+
+
+
+ + + + + +
+ + +If available the allocator gets used by use_op +
+
+
+
+

Await Executor

+
+

The allocator of a coroutine supporting enable_await_executor can be obtained the following way:

+
+
+
+
co_await async::this_coro::executor;
+
+
+
+

In order to enable this for your own coroutine you can inherit enable_await_executor with the CRTP pattern:

+
+
+
+
struct my_promise : async::enable_await_executor<my_promise>
+{
+  using executor_type = __your_executor_type__;
+  executor_type get_executor();
+};
+
+
+
+ + + + + +
+ + +If available the executor gets used by use_op +
+
+
+
+

Memory resource base

+
+

The promise_memory_resource_base base of a promise will provide a get_allocator in the promise taken from +either the default resource or one passed following a std::allocator_arg argument. +Likewise, it will add operator new overloads so the coroutine uses the same memory resource for its frame allocation.

+
+
+
+

Throw if cancelled

+
+

The promise_throw_if_cancelled_base provides the basic options to allow operation to enable a coroutines +to turn throw an exception when another actual awaitable is awaited.

+
+
+
+
co_await async::this_coro::throw_if_cancelled;
+
+
+
+
+

Cancellation state

+
+

The promise_cancellation_base provides the basic options to allow operation to enable a coroutines +to have a cancellation_state that is resettable by +reset_cancellation_state

+
+
+
+
co_await async::this_coro::reset_cancellation_state();
+
+
+
+

For convenience there is also a short-cut to check the current cancellation status:

+
+
+
+
asio::cancellation_type ct = (co_await async::this_coro::cancellation_state).cancelled();
+asio::cancellation_type ct = co_await async::this_coro::cancelled; // same as above
+
+
+
+
+
+

async/this_thread.hpp

+
+

Since everything is single threaded this library provides an executor +& default memory-resource for every thread.

+
+
+
+
namespace boost::async::this_thread
+{
+
+pmr::memory_resource* get_default_resource() noexcept; (1)
+pmr::memory_resource* set_default_resource(pmr::memory_resource* r) noexcept; (2)
+pmr::polymorphic_allocator<void> get_allocator(); (3)
+
+typename asio::io_context::executor_type & get_executor(); (4)
+void set_executor(asio::io_context::executor_type exec) noexcept; (5)
+
+}
+
+
+
+ + + + + + + + + + + + + + + + + + + + + +
1Get the default resource - will be pmr::get_default_resource unless set
2Set the default resource - returns the previously set one
3Get an allocator wrapping (1)
4Get the executor of the thread - throws if not set
5Set the executor of the current thread.
+
+
+

The coroutines will use these as defaults, but keep a copy just in case.

+
+
+ + + + + +
+ + +The only exception is the initialization of an async-operation, +which will use the this_thread::executor to rethrow from. +
+
+
+
+

async/channel.hpp

+
+

Channels can be used to exchange data between different coroutines +on a single thread.

+
+
+

Outline

+
+
channel outline
+
+
template<typename T>
+struct channel
+{
+  // create a channel with a buffer limit, executor & resource.
+  explicit
+  channel(std::size_t limit = 0u,
+          executor executor = this_thread::get_executor(),
+          pmr::memory_resource * resource = this_thread::get_default_resource());
+  // movable. moving with active operations is undefined behaviour.
+  channel(channel && ) noexcept = default;
+  channel & operator=(channel && lhs) noexcept = delete;
+
+  using executor_type = executor;
+  const executor_type & get_executor();
+
+  // Closes the channel
+  ~channel();
+  bool is_open() const;
+  // close the operation, will cancel all pending ops, too
+  void close();
+
+  // an awaitable that yields T
+  using read_op = unspecified;
+
+  // an awaitable that yields void
+  using write_op = unspecified;
+
+  // read a value to a channel
+  read_op  read();
+
+  // write a value to the channel
+  write_op write(const T  && value);
+  write_op write(const T  &  value);
+  write_op write(      T &&  value);
+  write_op write(      T  &  value);
+
+  // write a value to the channel if T is void
+
+};
+
+
+
+
+

Description

+
+

Channels are a tool for two coroutines to communicate and synchronize.

+
+
+
+
const std::size_t buffer_size = 2;
+channel<int> ch{exec, buffer_size};
+
+// in coroutine (1)
+co_await ch.write(42);
+
+// in coroutine (2)
+auto val = co_await ch.read();
+
+
+
+ + + + + + + + + +
1Send a value to the channel - will block until it can be sent
2Read a value from the channel - will block until a value is awaitable.
+
+
+

Both operations maybe be blocking depending on the channel buffer size.

+
+
+

If the buffer size is zero, a read & write will need to occur at the same time, +i.e. act as a rendezvous.

+
+
+

If the buffer is not full, the write operation will not suspend the coroutine; +likewise if the buffer is not empty, the read operation will not suspend.

+
+
+

If two operations complete at once (as is always the case with an empty buffer), +the second operation gets posted to the executor for later completion.

+
+
+ + + + + +
+ + +A channel type can be void, in which case write takes no parameter. +
+
+
+

The channel operations can be cancelled without losing data. +This makes them usable with select.

+
+
+
+
generator<variant2::variant<int, double>> merge(
+    channel<int> & c1,
+    channel<double> & c2)
+{
+    while (c1 && c2)
+       co_yield co_await select(c1, c2);
+}
+
+
+
+
+

Example

+
+
+
async::promise<void> producer(async::channel<int> & chan)
+{
+  for (int i = 0; i < 4; i++)
+    co_await chan.write(i);
+
+  chan.close();
+}
+
+async::main co_main(int argc, char * argv[])
+{
+  async::channel<int> c;
+
+  auto p = producer(c);
+  while (c.is_open())
+    std::cout << co_await c.read() << std::endl;
+
+  co_await p;
+  co_return 0;
+}
+
+
+
+

Additionally, a channel_reader is provided to make reading channels more convenient & usable with +BOOST_ASYNC_FOR.

+
+
+
+
async::main co_main(int argc, char * argv[])
+{
+  async::channel<int> c;
+
+  auto p = producer(c);
+  BOOST_ASYNC_FOR(int value, async::channel_reader(c))
+    std::cout << value << std::endl;
+
+  co_await p;
+  co_return 0;
+}
+
+
+
+
+
+

async/with.hpp

+
+

The with facility provides a way to perform asynchronous tear-down of coroutines. +That is it like an asynchronous destructor call.

+
+
+
+
struct my_resource
+{
+  async::promise<void> await_exit(std::exception_ptr e);
+};
+
+async::promise<void> work(my_resource & res);
+
+async::promise<void> outer()
+{
+  co_await async::with(my_resource(), &work);
+}
+
+
+
+

The teardown can either be done by providing an await_exit member function or a tag_invoke function +that returns an awaitable or by providing the teardown as the third argument to with.

+
+
+
+
using ws_stream = beast::websocket::stream<asio::ip::tcp::socket>>;
+async::promise<ws_stream> connect(urls::url); (1)
+async::promise<void>   disconnect(ws_stream &ws); (2)
+
+auto teardown(const boost::async::with_exit_tag & wet , ws_stream & ws, std::exception_ptr e)
+{
+  return disconnect(ws);
+}
+
+async::promise<void> run_session(ws_stream & ws);
+
+async::main co_main(int argc, char * argv[])
+{
+  co_await async::with(co_await connect(argv[1]), &run_session, &teardown);
+  co_return 0;
+}
+
+
+
+ + + + + + + + + +
1Implement websocket connect & websocket initiation
2Implement an orderly shutdown.
+
+
+ + + + + +
+ + +The std::exception_ptr is null if the scope is exited without exception. +NOTE: It’s legal for the exit functions to take the exception_ptr by reference and modify it. +
+
+
+
+

async/select.hpp

+
+

The select function can be used to co_await one awaitable out of a set of them.

+
+
+

It can be called as a variadic function with multiple awaitable or as on a range of awaitables.

+
+
+
+
async::promise<void> task1();
+async::promise<void> task2();
+
+async::promise<void> do_wait()
+{
+  co_await async::select(task1(), task2()); (1)
+  std::vector<async::promise<void>> aws {task1(), task2()};
+  co_await async::select(aws); (2)
+}
+
+
+
+ + + + + + + + + +
1Wait for a variadic set of awaitables
2wait for a vector of awaitables
+
+
+

The first parameter so select can be a uniform random bit generator.

+
+
+
Signatures of select
+
+
extern promise<void> pv1, pv2;
+std::vector<promise<void>> pvv;
+
+std::mt1337 rdm{1};
+// if everything returns void select returns the index
+std::size_t r1 = co_await select(pv1, pv2);
+std::size_t r2 = co_await select(rdm, pv1, pv2);
+std::size_t r3 = co_await select(pvv);
+std::size_t r4 = co_await select(rdm, pvv);
+
+// variant if not everything is void. void become monostate
+extern promise<int> pi1, pi2;
+variant2::variant<monostate, int, int> r5 = co_await select(pv1, pi1, pi2);
+variant2::variant<monostate, int, int> r6 = co_await select(rdm, pv1, pi1, pi2);
+
+// a range returns a pair of the index and the result if non-void
+std::vector<promise<int>> piv;
+std::pair<std::size_t, int> r7 = co_await select(piv);
+std::pair<std::size_t, int> r8 = co_await select(rdm, piv);
+
+
+
+

Interrupt Wait

+
+

When arguments are passed as rvalue reference, the select will attempt to use .interrupt_await +on the awaitable to detach the not completed awaitables. If supported, the Awaitable must complete immediately. +If the select doesn’t detect the immediate completion, it will send a cancellation.

+
+
+

This means that you can reuse select like this:

+
+
+
+
async::promise<void> do_wait()
+{
+  auto t1 = task1();
+  auto t2 = task2();
+  co_await async::select(t1, t2); (1)
+  co_await async::select(t1, t2); (2)
+}
+
+
+
+ + + + + + + + + +
1Wait for the first task to complete
2Wait for the other task to complete
+
+
+

This is supported by promise, generator and gather.

+
+
+

The select will invoke the functions of the awaitable as if used in a co_await expression +or not evaluate them at all.

+
+
+
+

left_select

+
+

The left_select functions are like select but follow a strict left-to-right scan. +This can lead to starvation issues, which is why this is not the recommended default, but can +be useful for prioritization if proper care is taken.

+
+
+
+

Outline

+
+
+
// Concept for the random number generator.
+template<typename G>
+  concept uniform_random_bit_generator =
+    requires ( G & g)
+    {
+      {typename std::decay_t<G>::result_type() } -> std::unsigned_integral; // is an unsigned integer type
+      // T	Returns the smallest value that G's operator() may return. The value is strictly less than G::max(). The function must be constexpr.
+      {std::decay_t<G>::min()} -> std::same_as<typename std::decay_t<G>::result_type>;
+      // T	Returns the largest value that G's operator() may return. The value is strictly greater than G::min(). The function must be constexpr.
+      {std::decay_t<G>::max()} -> std::same_as<typename std::decay_t<G>::result_type>;
+      {g()} -> std::same_as<typename std::decay_t<G>::result_type>;
+    } && (std::decay_t<G>::max() > std::decay_t<G>::min());
+
+
+// Variadic select with a custom random number generator
+template<asio::cancellation_type Ct = asio::cancellation_type::all,
+         uniform_random_bit_generator URBG, awaitable ... Promise>
+awaitable select(URBG && g, Promise && ... p);
+
+// Ranged select with a custom random number generator
+template<asio::cancellation_type Ct = asio::cancellation_type::all,
+         uniform_random_bit_generator URBG, range<awaitable> PromiseRange>
+awaitable select(URBG && g, PromiseRange && p);
+
+// Variadic select with the default random number generator
+template<asio::cancellation_type Ct = asio::cancellation_type::all, awaitable... Promise>
+awaitable select(Promise && ... p);
+
+// Ranged select with the default random number generator
+template<asio::cancellation_type Ct = asio::cancellation_type::all, range<awaitable>>
+awaitable select(PromiseRange && p);
+
+// Variadic left select
+template<asio::cancellation_type Ct = asio::cancellation_type::all, awaitable... Promise>
+awaitable left_select(Promise && ... p);
+
+// Ranged left select
+template<asio::cancellation_type Ct = asio::cancellation_type::all, range<awaitable>>
+awaitable left_select(PromiseRange && p);
+
+
+
+ + + + + +
+ + +Selecting an empty range will cause an exception to be thrown. +
+
+
+
+
+

async/gather.hpp

+
+

The gather function can be used to co_await multiple awaitables +at once with cancellations being passed through.

+
+
+

The function will gather all completion and return them as system::result, +i.e. capture conceptions as values. One awaitable throwing an exception will not cancel the others.

+
+
+

It can be called as a variadic function with multiple Awaitable or as on a range of awaitables.

+
+
+
+
async::promise<void> task1();
+async::promise<void> task2();
+
+async::promise<void> do_gather()
+{
+  co_await async::gather(task1(), task2()); (1)
+  std::vector<async::promise<void>> aws {task1(), task2()};
+  co_await async::gather(aws); (2)
+}
+
+
+
+ + + + + + + + + +
1Wait for a variadic set of awaitables
2Wait for a vector of awaitables
+
+
+

The gather will invoke the functions of the awaitable as if used in a co_await expression.

+
+
+
Signatures of join
+
+
extern promise<void> pv1, pv2;
+std::tuple<system::result<int>, system::result<int>> r1 = co_await gather(pv1, pv2);
+
+std::vector<promise<void>> pvv;
+pmr::vector<system::result<void>> r2 =  co_await gather(pvv);
+
+extern promise<int> pi1, pi2;
+std::tuple<system::result<monostate>,
+           system::result<monostate>,
+           system::result<int>,
+           system::result<int>> r3 = co_await gather(pv1, pv2, pi1, pi2);
+
+std::vector<promise<int>> piv;
+pmr::vector<system::result<int>> r4 = co_await gather(piv);
+
+
+
+

Outline

+
+
+
// Variadic gather
+template<asio::cancellation_type Ct = asio::cancellation_type::all, awaitable... Promise>
+awaitable gather(Promise && ... p);
+
+// Ranged gather
+template<asio::cancellation_type Ct = asio::cancellation_type::all, range<awaitable>>
+awaitable gather(PromiseRange && p);
+
+
+
+
+
+

async/join.hpp

+
+

The join function can be used to co_await multiple awaitable at once with properly connected cancellations.

+
+
+

The function will gather all completion and return them as values, unless an exception is thrown. +If an exception is thrown, all outstanding ops are cancelled (or detached if possible) +and the first exception gets rethrown.

+
+
+ + + + + +
+ + +void will be returned as variant2::monostate in the tuple, unless all awaitables yield void. +
+
+
+

It can be called as a variadic function with multiple Awaitable or as on a range of awaitables.

+
+
+
+
async::promise<void> task1();
+async::promise<void> task2();
+
+async::promise<void> do_join()
+{
+  co_await async::join(task1(), task2()); (1)
+  std::vector<async::promise<void>> aws {task1(), task2()};
+  co_await async::join(aws); (2)
+}
+
+
+
+ + + + + + + + + +
1Wait for a variadic set of awaitables
2Wait for a vector of awaitables
+
+
+

The join will invoke the functions of the awaitable as if used in a co_await expression.

+
+
+
Signatures of join
+
+
extern promise<void> pv1, pv2;
+/* void */ co_await join(pv1, pv2);
+
+std::vector<promise<void>> pvv;
+/* void */ co_await join(pvv);
+
+extern promise<int> pi1, pi2;
+std::tuple<monostate, monostate, int, int> r1 = co_await join(pv1, pv2, pi1, pi2);
+
+std::vector<promise<int>> piv;
+pmr::vector<int> r2 = co_await join(piv);
+
+
+
+

Outline

+
+
+
// Variadic join
+template<asio::cancellation_type Ct = asio::cancellation_type::all, awaitable... Promise>
+awaitable join(Promise && ... p);
+
+// Ranged join
+template<asio::cancellation_type Ct = asio::cancellation_type::all, range<awaitable>>
+awaitable join(PromiseRange && p);
+
+
+
+ + + + + +
+ + +Selecting an on empty range will cause an exception. +
+
+
+
+
+

async/wait_group.hpp

+
+

The wait_group function can be used to manage +multiple coroutines of type promise<void>. +It works out of the box with async/with.hpp, by having the matching await_exit member.

+
+
+

Essentially, a wait_group is a dynamic list of +promises that has a select function (wait_one), +a gather function (wait_all) and will clean up on scope exit.

+
+
+
+
struct wait_group
+{
+    // create a wait_group
+    explicit
+    wait_group(asio::cancellation_type normal_cancel = asio::cancellation_type::none,
+               asio::cancellation_type exception_cancel = asio::cancellation_type::all);
+
+    // insert a task into the group
+    void push_back(promise<void> p);
+
+    // the number of tasks in the group
+    std::size_t size() const;
+    // remove completed tasks without waiting (i.e. zombie tasks)
+    std::size_t reap();
+    // cancel all tasks
+    void cancel(asio::cancellation_type ct = asio::cancellation_type::all);
+    // wait for one task to complete.
+    wait_one_op wait_one();
+    // wait for all tasks to complete
+    wait_op wait();
+    // wait for all tasks to complete
+    wait_op operator co_await ();
+    // when used with with , this will receive the exception
+    // and wait for the completion
+    // if ep is set, this will use the exception_cancel level,
+    // otherwise the normal_cancel to cancel all promises.
+    wait_op await_exit(std::exception_ptr ep);
+};
+
+
+
+
+

async/spawn.hpp

+
+

The spawn functions allow to use task directly with asio:

+
+
+
+
auto spawn(                            task<T>    && t, CompletionToken&& token);
+auto spawn(asio::io_context & context, task<T>    && t, CompletionToken&& token);
+auto spawn(Executor executor,          task<T>    && t, CompletionToken&& token);
+
+
+
+

Spawn will post both ways, so it is safe to use task to run the task +on another executor and consume the result on the current one with use_op.

+
+
+

Example

+
+
+
async::task<int> work();
+
+int main(int argc, char *argv[])
+{
+  asio::io_context ctx{BOOST_ASIO_CONCURRENCY_HINT_1};
+  auto f = spawn(ctx, work(), asio::use_future);
+  ctx.run();
+
+  return f.get();
+}
+
+
+
+ + + + + +
+ + +The caller needs to make sure that the executor is not running on multiple threads +concurrently, e,g, by using a single-threaded context. +
+
+
+
+
+

async/run.hpp

+
+

The run function is similar to spawn but running synchronously. +It will internally setup an execution context and the memory resources.

+
+
+

This can be useful when integrating a piece of async code into a synchronous application.

+
+
+

Outline

+
+
+
// Run the task and return it's value or rethrow any exception.
+T run(task<T> t);
+
+
+
+
+

Example

+
+
+
async::task<int> work();
+
+int main(int argc, char *argv[])
+{
+  return run(work());
+}
+
+
+
+
+
+

async/thread.hpp

+
+

The thread type is another way to create an environment that is similar to main, but doesn’t use a signal_set.

+
+
+
+
async::thread my_thread()
+{
+  auto exec = co_await async::this_coro::executor;             (1)
+  asio::steady_timer tim{exec, std::chrono::milliseconds(50)}; (2)
+  co_await tim.async_wait(async::use_op);                      (3)
+  co_return 0;
+}
+
+
+
+ + + + + + + + + + + + + +
1get the executor thread running on
2Use it with an asio object
3co_await an async operation
+
+
+

To use a thread you can use it like a std::thread:

+
+
+
+
int main(int argc, char * argv[])
+{
+  auto thr = my_thread();
+  thr.join();
+  return 0;
+}
+
+
+
+

A thread is also an awaitable (including cancellation).

+
+
+
+
async::main co_main(int argc, char * argv[])
+{
+  auto thr = my_thread();
+  co_await thr;
+  co_return 0;
+}
+
+
+
+ + + + + +
+ + +Destructing a detached thread will cause a hard stop (io_context::stop) and join the thread. +
+
+
+ + + + + +
+ + +Nothing in this library, except for awaiting a async/thread.hpp and async/spawn.hpp, is thread-safe. +If you need to transfer data across threads, you’ll need a thread-safe utility like asio::conrurrenct_channel. +You cannot share any async primitives between threads, +with the sole exception of being able to spawn a task onto another thread’s executor. +
+
+
+

Executor

+
+

It will also create an asio::io_context to run on, which you can get through the this_coro::executor. +It will be assigned to the async::this_thread::get_executor() .

+
+
+
+

Memory Resource

+
+

It also creates a memory resource that will be used as a default for internal memory allocations. +It will be assigned to the thread_local to the async::this_thread::get_default_resoruce().

+
+
+
+

Outline

+
+
+
struct thread
+{
+  // Send a cancellation signal
+  void cancel(asio::cancellation_type type = asio::cancellation_type::all);
+
+  // Add the functions similar to `std::thread`
+  void join();
+  bool joinable() const;
+  void detach();
+  // Allow the thread to be awaited
+  auto operator co_await() &-> detail::thread_awaitable; (1)
+  auto operator co_await() && -> detail::thread_awaitable; (2)
+
+  // Stops the io_context & joins the executor
+  ~thread();
+  /// Move constructible
+  thread(thread &&) noexcept = default;
+
+  using executor_type = executor;
+
+  using id = std::thread::id;
+  id get_id() const noexcept;
+
+  executor_type get_executor() const;
+};
+
+
+
+ + + + + + + + + +
1Supports Interrupt Wait
2Always forward cancel
+
+
+
+

Promise

+
+

The thread promise has the following properties.

+
+ +
+
+
+

async/result.hpp

+
+

Awaitables can be modified to return system::result or +std::tuple instead of using exceptions.

+
+
+
+
// value only
+T res = co_await foo();
+
+// as result
+system::result<T, std::exception_ptr> res = co_await async::as_result(foo());
+
+// as tuple
+std::tuple<std::exception_ptr, T> res = co_await async::as_tuple(foo());
+
+
+
+

Awaitables can also provide custom ways to handle results and tuples, +by providing await_resume overloads using async::as_result_tag and async::as_tuple_tag.:

+
+
+
+
your_result_type await_resume(async::as_result_tag);
+your_tuple_type  await_resume(async::as_tuple_tag);
+
+
+
+

This allows an awaitable to provide other error types than std::exception_ptr, +for example system::error_code. This is done by op and channel.

+
+
+
+
// example of an op with result system::error_code, std::size_t
+system::result<std::size_t>                 await_resume(async::as_result_tag);
+std::tuple<system::error_code, std::size_t> await_resume(async::as_tuple_tag);
+
+
+
+ + + + + +
+ + +Awaitables are still allowed to throw exceptions, e.g. for critical exceptions such as OOM. +
+
+
+
+

async/async_for.hpp

+
+

For types like generators a BOOST_ASYNC_FOR macro is provided, to emulate an async for loop.

+
+
+
+
async::generator<int> gen();
+
+async::main co_main(int argc, char * argv[])
+{
+    BOOST_ASYNC_FOR(auto i, gen())
+        printf("Generated value %d\n", i);
+
+    co_return 0;
+}
+
+
+
+

The requirement is that the awaitable used in the for loop has an operator bool to check if it +can be awaited again. This is the case for generator and promise.

+
+
+
+

async/error.hpp

+
+

In order to make errors easier to manage, async provides an error_category to be used with +boost::system::error_code.

+
+
+
+
enum class error
+{
+  moved_from,
+  detached,
+  completed_unexpected,
+  wait_not_ready,
+  already_awaited,
+  allocation_failed
+};
+
+system::error_category & async_category();
+system::error_code make_error_code(error e);
+
+
+
+
+

async/config.hpp

+
+

The config adder allows to config some implementation details of boost.async.

+
+
+

executor_type

+
+

The executor type defaults to boost::asio::any_io_executor.

+
+
+

You can set it to boost::asio::any_io_executor by defining BOOST_ASYNC_CUSTOM_EXECUTOR +and adding a boost::async::executor type yourself.

+
+
+

Alternatively, BOOST_ASYNC_USE_IO_CONTEXT can be defined +to set the executor to boost::asio::io_context::executor_type.

+
+
+
+

pmr

+
+

Boost.async can be used with different pmr implementations, defaulting to std::pmr.

+
+
+

The following macros can be used to configure it:

+
+
+
    +
  • +

    BOOST_ASYNC_USE_STD_PMR

    +
  • +
  • +

    BOOST_ASYNC_USE_BOOST_CONTAINER_PMR

    +
  • +
  • +

    BOOST_ASYNC_USE_CUSTOM_PMR

    +
  • +
+
+
+

If you define BOOST_ASYNC_USE_CUSTOM_PMR you will need to provide a boost::async::pmr namespace, +that is a drop-in replacement for std::pmr.

+
+
+

Alternatively, the pmr use can be disabled with

+
+
+
    +
  • +

    BOOST_ASYNC_NO_PMR.

    +
  • +
+
+
+

In this case, async will use a non-pmr monotonic resource for the +synchronization functions (select, gather and join).

+
+
+

use_op uses a small-buffer-optimized resource which’s size can be set by defining +BOOST_ASYNC_SBO_BUFFER_SIZE and defaults to 4096 bytes.

+
+
+
+
+

async/leaf.hpp

+
+

Async provides integration with boost.leaf. +It provides functions similar to leaf that take an awaitables +instead of a function object and return an awaitable.

+
+
+
+
template<awaitable TryAwaitable, typename ... H >
+auto try_catch(TryAwaitable && try_coro, H && ... h );
+
+template<awaitable TryAwaitable, typename ... H >
+auto try_handle_all(TryAwaitable && try_coro, H && ... h );
+
+template<awaitable TryAwaitable, typename ... H >
+auto try_handle_some(TryAwaitable && try_coro, H && ... h );
+
+
+
+

See the leaf documentation for details.

+
+ +
+
+
+ +
+ + + \ No newline at end of file diff --git a/technical_background.html b/technical_background.html new file mode 100644 index 00000000..650b3548 --- /dev/null +++ b/technical_background.html @@ -0,0 +1,896 @@ + + + + + + + + +Documentation boost.async + + + + + + + + +
+
+

Technical Background

+
+
+

Stackless

+
+

C++20 coroutines are stackless, meaning they don’t have their own stack.

+
+
+

A stack in C++ describes the callstack, i.e. all the function frames stacked. +A function frame is the memory a function needs to operate, i.e. a slice of memory +to store its variables and information such as the return address.

+
+
+ + + + + +
+ + +The size of a function frame is known at compile time, but not outside the compile unit containing its definition. +
+
+
+
+
int bar() {return 0;} // the deepest point of the stack
+int foo() {return bar();}
+
+int main()
+{
+    return bar();
+}
+
+
+
+

The call stack in the above example is:

+
+
+
+
main()
+  foo()
+    bar()
+
+
+
+
+Diagram +
+
+
+

Coroutines can be implemented a stackful, which means that it allocates a fixes chunk of memory and stacks function frames similar to a thread. +C++20 coroutines are stackless, i.e. they only allocate their own frame and use the callers stack on resumption. Using our previous example:

+
+
+
+
fictional_eager_coro_type<int> example()
+{
+    co_yield 0;
+    co_yield 1;
+}
+
+void nested_resume(fictional_eager_coro_type<int>& f)
+{
+    f.resume();
+}
+
+int main()
+{
+    auto f = example();
+    nested_resume(f);
+    f.reenter();
+    return 0;
+}
+
+
+
+

This will yield a call stack similar to this:

+
+
+
+
main()
+  f$example()
+  nested_resume()
+    f$example()
+  f$example()
+
+
+
+
+Diagram +
+
+
+

The same applies if a coroutine gets moved accross threads.

+
+
+
+

Lazy & eager

+
+

Coroutines are lazy if they only start execution of its code after it gets resumed, while an eager one will execute right-away until its first suspension point (i.e. a co_await, co_yield or co_return expression.)

+
+
+
+
lazy_coro co_example()
+{
+    printf("Entered coro\n");
+    co_yield 0;
+    printf("Coro done\n");
+}
+
+int main()
+{
+    printf("enter main\n");
+    auto lazy = co_example();
+    printf("constructed coro\n");
+    lazy.resume();
+    printf("resumed once\n");
+    lazy.resume();
+    printf("resumed twice\n");
+    return 0;
+}
+
+
+
+

Which will produce output like this:

+
+
+
+
enter main
+constructed coro
+Entered coro
+resumed once
+Coro Done
+resumed twice
+
+
+
+
+Diagram +
+
+
+

Whereas an eager coro would look like this:

+
+
+
+
eager_coro co_example()
+{
+    printf("Entered coro\n");
+    co_yield 0;
+    printf("Coro done\n");
+}
+
+int main()
+{
+    printf("enter main\n");
+    auto lazy = co_example();
+    printf("constructed coro\n");
+    lazy.resume();
+    printf("resumed once\n");
+    return 0;
+}
+
+
+
+

Which will produce output like this:

+
+
+
+
enter main
+Entered coro
+constructed coro
+resume once
+Coro Done
+
+
+
+
+Diagram +
+
+
+
+
+ +
+ + + \ No newline at end of file diff --git a/tutorial.html b/tutorial.html new file mode 100644 index 00000000..8d654fca --- /dev/null +++ b/tutorial.html @@ -0,0 +1,1613 @@ + + + + + + + + +Documentation boost.async + + + + + + + + +
+
+

Tutorial

+
+
+

delay

+
+

Let’s start with the simplest example possible: a simple delay.

+
+
+
example/delay.cpp
+
+
async::main co_main(int argc, char * argv[]) (1)
+{
+  asio::steady_timer tim{co_await asio::this_coro::executor, (2)
+                         std::chrono::milliseconds(std::stoi(argv[1]))}; (3)
+  co_await tim.async_wait(async::use_op); (4)
+  co_return 0; (5)
+}
+
+
+
+ + + + + + + + + + + + + + + + + + + + + +
1The co_main function defines an implicit main when defined +and is the easiest way to set up an environment to run asynchronous code.
2Take the executor from the current coroutine promise.
3Use an argument to set the timeout
4Perform the wait by using async::use_op.
5Return a value that gets returned from the implicit main.
+
+
+

In this example we use the async/main.hpp header, which provides us with a main coroutine if co_main +is defined as above. This has a few advantages:

+
+
+
    +
  • +

    The environment get set up correctly (executor & memory)

    +
  • +
  • +

    asio is signaled that the context is single threaded

    +
  • +
  • +

    an asio::signal_set with SIGINT & SIGTERM is automatically connected to cancellations (i.e. Ctrl+C causes cancellations)

    +
  • +
+
+
+

This coroutine then has an executor in its promise (the promise the C++ name for a coroutine state. +Not to be confused with async/promise.hpp) which we can obtain through the dummy-awaitables in +the this_coro namespace.

+
+
+

We can then construct a timer and initiate the async_wait with use_op. +async provides multiple ways to co_await to interact with asio, of which use_op is the easiest.

+
+
+
+

echo server

+
+

We’ll be using the use_op (asio completion) token everywhere, +so we’re using a default completion token, so that we can skip the last parameters.

+
+
+
example/echo_server.cpp declarations
+
+
namespace async = boost::async;
+using boost::asio::ip::tcp;
+using boost::asio::detached;
+using tcp_acceptor = async::use_op_t::as_default_on_t<tcp::acceptor>;
+using tcp_socket   = async::use_op_t::as_default_on_t<tcp::socket>;
+namespace this_coro = boost::async::this_coro;
+
+
+
+

We’re writing the echo function as a promise coroutine. +It’s an eager coroutine and recommended as the default; +in case a lazy coro is needed, task is available.

+
+
+
example/echo_server.cpp echo function
+
+
async::promise<void> echo(tcp_socket socket)
+{
+  try (1)
+  {
+    char data[4096];
+    while (socket.is_open()) (2)
+    {
+      std::size_t n = co_await socket.async_read_some(boost::asio::buffer(data)); (3)
+      co_await async_write(socket, boost::asio::buffer(data, n)); (4)
+    }
+  }
+  catch (std::exception& e)
+  {
+    std::printf("echo: exception: %s\n", e.what());
+  }
+}
+
+
+
+ + + + + + + + + + + + + + + + + +
1When using the use_op completion token, I/O errors are translated into C++ exceptions. Additionally, +if the coroutine gets cancelled (e.g. because the user hit Ctrl-C), +an exception will be raised, too. Under these conditions, we print the error and exit the loop.
2We run the loop until we get cancelled (exception) or the user closes the connection.
3Read as much as is available.
4Write all the read bytes.
+
+
+

Note that promise is eager. Calling echo will immediately execute code until async_read_some +and then return control to the caller.

+
+
+

Next, we also need an acceptor function. Here, we’re using a generator to manage the acceptor state. +This is a coroutine that can be co_awaited multiple times, until a co_return expression is reached.

+
+
+
example/echo_server.cpp listen function
+
+
async::generator<tcp_socket> listen()
+{
+  tcp_acceptor acceptor({co_await async::this_coro::executor}, {tcp::v4(), 55555});
+  for (;;) (1)
+  {
+    tcp_socket sock = co_await acceptor.async_accept(); (2)
+    co_yield std::move(sock); (3)
+  }
+  co_return tcp_socket{acceptor.get_executor()}; (4)
+}
+
+
+
+ + + + + + + + + + + + + + + + + +
1Cancellation will also lead to an exception here being thrown from the co_await
2Asynchronously accept the connection
3Yield it to the awaiting coroutine
4co_return a value for C++ conformance.
+
+
+

With those two functions we can now write the server:

+
+
+
example/echo_server.cpp run_server function
+
+
async::promise<void> run_server(async::wait_group & workers)
+{
+  auto l = listen(); (1)
+  while (true)
+  {
+    if (workers.size() == 10u)
+      co_await workers.wait_one();  (2)
+    else
+      workers.push_back(echo(co_await l)); (3)
+  }
+}
+
+
+
+ + + + + + + + + + + + + +
1Construct the listener generator coroutine. When the object is destroyed, +the coroutine will be cancelled, performing all required cleanup.
2When we have more than 10 workers, we wait for one to finish
3Accept a new connection & launch it.
+
+
+

The wait_group is used to manage the running echo functions. +This class will cancel & await the running echo coroutines.

+
+
+

We do not need to do the same for the listener, because it will just stop on its own, when l gets destroyed. +The destructor of a generator will cancel it.

+
+
+

Since the promise is eager, just calling it is enough to launch. +We then put those promises into a wait_group which will allow us to tear down all the workers on scope exit.

+
+
+
example/echo_server.cpp co_main function
+
+
async::main co_main(int argc, char ** argv)
+{
+  co_await async::with(async::wait_group(), &run_server); (1)
+  co_return 0u;
+}
+
+
+
+ + + + + +
1Run run_server with an async scope.
+
+
+

The with function shown above, will run a function with a resource such as wait_group. +On scope exit with will invoke & co_await an asynchronous teardown function. +This will cause all connections to be properly shutdown before co_main exists.

+
+
+
+

price ticker

+
+

To demonstrate channels and other tools, we need a certain complexity. +For that purpose our project is a price ticker, that connects to +https://blockchain.info. A user can then connection to localhost +to query a given currency pair, like this:

+
+
+
+
wscat -c localhost:8080/btc/usd
+
+
+
+

First we do the same declarations as echo-server.

+
+
+
example/ticker.cpp declarations
+
+
using executor_type = async::use_op_t::executor_with_default<async::executor>;
+using socket_type   = typename asio::ip::tcp::socket::rebind_executor<executor_type>::other;
+using acceptor_type = typename asio::ip::tcp::acceptor::rebind_executor<executor_type>::other;
+using websocket_type = beast::websocket::stream<asio::ssl::stream<socket_type>>;
+namespace http = beast::http;
+
+
+
+

The next step is to write a function to connect an ssl-stream, +to connect upstream:

+
+
+
example/ticker.cpp connect
+
+
async::promise<asio::ssl::stream<socket_type>> connect(
+        std::string host, boost::asio::ssl::context & ctx)
+{
+    asio::ip::tcp::resolver res{async::this_thread::get_executor()};
+    auto ep = co_await res.async_resolve(host, "https", async::use_op); (1)
+
+    asio::ssl::stream<socket_type> sock{async::this_thread::get_executor(), ctx};
+    co_await sock.next_layer().async_connect(*ep.begin()); (2)
+    co_await sock.async_handshake(asio::ssl::stream_base::client); (3)
+
+    co_return sock; (4)
+}
+
+
+
+ + + + + + + + + + + + + + + + + +
1Lookup the host
2Connect to the endpoint
3Do the ssl handshake
4Return the socket to the caller
+
+
+

Next, we’ll need a function to do the websocket upgrade +on an existing ssl-stream.

+
+
+
example/ticker.cpp connect_to_blockchain_info
+
+
async::promise<void> connect_to_blockchain_info(websocket_type & ws)
+{
+ ws.set_option(beast::websocket::stream_base::decorator(
+     [](beast::websocket::request_type& req)
+     {
+       req.set(http::field::user_agent,
+               std::string(BOOST_BEAST_VERSION_STRING) + " async-ticker");
+       req.set(http::field::origin,
+               "https://exchange.blockchain.com"); (1)
+     }));
+
+ co_await ws.async_handshake("ws.blockchain.info", "/mercury-gateway/v1/ws"); (2)
+}
+
+
+
+ + + + + + + + + +
1blockchain.info requires this header to be set.
2Perform the websocket handshake.
+
+
+

Once the websocket is connected, we want to continuously receive json messages, +for which a generator is a good choice.

+
+
+
example/ticker.cpp json_read
+
+
async::generator<json::object> json_reader(websocket_type & ws)
+try
+{
+    beast::flat_buffer buf;
+    while (ws.is_open()) (1)
+    {
+        auto sz = co_await ws.async_read(buf); (2)
+        json::string_view data{static_cast<const char*>(buf.cdata().data()), sz};
+        auto obj = json::parse(data);
+        co_yield obj.as_object(); (3)
+        buf.consume(sz);
+    }
+    co_return {};
+}
+catch (std::exception & e)
+{
+  std::cerr << "Error reading: " << e.what() << std::endl;
+  throw;
+}
+
+
+
+ + + + + + + + + + + + + +
1Keep running as long as the socket is open
2Read a frame from the websocket
3Parse & co_yield it as an object.
+
+
+

This then needs to be connected to subscriber, for which we’ll utilize channels to pass raw json. +To make life-time management easy, the subscriber will hold a shared_ptr, and the producer a weak_ptr.

+
+
+
example/ticker.cpp subscription types
+
+
using subscription = std::pair<std::string, std::weak_ptr<async::channel<json::object>>>;
+using subscription_channel = std::weak_ptr<async::channel<json::object>>;
+using subscription_map = boost::unordered_multimap<std::string, subscription_channel>;
+
+
+
+

The main function running the blockchain connector, operates on two inputs: +data coming from the websocket and a channel to handle new subscriptions.

+
+
+
example/ticker.cpp run blockchain_info
+
+
async::promise<void> run_blockchain_info(async::channel<subscription> & subc)
+try
+{
+    asio::ssl::context ctx{asio::ssl::context_base::tls_client};
+    websocket_type ws{co_await connect("blockchain.info", ctx)};
+    co_await connect_to_blockchain_info(ws); (1)
+
+    subscription_map subs;
+    std::list<std::string> unconfirmed;
+
+    auto rd = json_reader(ws); (2)
+    while (ws.is_open()) (3)
+    {
+      switch (auto msg = co_await async::select(rd, subc.read()); msg.index()) (4)
+      {
+        case 0: (5)
+          if (auto ms = get<0>(msg);
+              ms.at("event") == "rejected") // invalid sub, cancel however subbed
+            co_await handle_rejections(unconfirmed, subs, ms);
+          else
+            co_await handle_update(unconfirmed, subs, ms, ws);
+        break;
+        case 1: // (6)
+            co_await handle_new_subscription(
+                unconfirmed, subs,
+                std::move(get<1>(msg)), ws);
+        break;
+      }
+    }
+
+    for (auto & [k ,c] : subs)
+    {
+        if (auto ptr = c.lock())
+            ptr->close();
+    }
+}
+catch(std::exception & e)
+{
+  std::cerr << "Exception: " << e.what() << std::endl;
+  throw;
+}
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
1Initialize the connection
2Instantiate the json_reader
3Run as long as the websocket is open
4Select, i.e. wait for either a new json message or subscription
5When its a json handle an update or a rejection
6Handle new subscription messages
+
+
+

The handle_* function’s contents are not as important for the async functionality, +so its skipped in this tutorial.

+
+
+

The handle_new_subscription function sends a message to the blockchain.info, +which will send a confirmation or rejection back. +The handle_rejection and handle_update will take the json values +and forward them to the subscription channel.

+
+
+

On the consumer side, our server will just forward data to the client. +If the client inputs data, we’ll close the websocket immediately. +We’re using as_tuple to ignore potential errors.

+
+
+
example/ticker.cpp read and close
+
+
async::promise<void> read_and_close(beast::websocket::stream<socket_type> & st, beast::flat_buffer buf)
+{
+    system::error_code ec;
+    co_await st.async_read(buf, asio::as_tuple(async::use_op));
+    co_await st.async_close(beast::websocket::close_code::going_away, asio::as_tuple(async::use_op));
+    st.next_layer().close(ec);
+}
+
+
+
+

Next, we’re running the session that the users sends

+
+
+
example/ticker.cpp run_session
+
+
async::promise<void> run_session(beast::websocket::stream<socket_type> st,
+                                 async::channel<subscription> & subc)
+try
+{
+    http::request<http::empty_body> req;
+    beast::flat_buffer buf;
+    co_await http::async_read(st.next_layer(), buf, req); (1)
+    // check the target
+    auto r = urls::parse_uri_reference(req.target());
+    if (r.has_error() || (r->segments().size() != 2u)) (2)
+    {
+        http::response<http::string_body> res{http::status::bad_request, 11};
+        res.body() = r.has_error() ? r.error().message() :
+                    "url needs two segments, e.g. /btc/usd";
+        co_await http::async_write(st.next_layer(), res);
+        st.next_layer().close();
+        co_return ;
+    }
+
+    co_await st.async_accept(req); (3)
+
+    auto sym = std::string(r->segments().front()) + "-" +
+               std::string(r->segments().back());
+    boost::algorithm::to_upper(sym);
+    // close when data gets sent
+    auto p = read_and_close(st, std::move(buf)); (4)
+
+    auto ptr = std::make_shared<async::channel<json::object>>(1u); (5)
+    co_await subc.write(subscription{sym, ptr}); (6)
+
+    while (ptr->is_open() && st.is_open()) (7)
+    {
+      auto bb = json::serialize(co_await ptr->read());
+      co_await st.async_write(asio::buffer(bb));
+    }
+
+    co_await st.async_close(beast::websocket::close_code::going_away,
+                            asio::as_tuple(async::use_op)); (8)
+    st.next_layer().close();
+    co_await p; (9)
+
+}
+catch(std::exception & e)
+{
+    std::cerr << "Session ended with exception: " << e.what() << std::endl;
+}
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
1Read the http request, because we want the path
2Check the path, e.g. /btc/usd.
3Accept the websocket
4Start reading & close if the consumer sends something
5Create the channel to receive updates
6Send a subscription requests to run_blockchain_info
7While the channel & websocket are open, we’re forwarding data.
8Close the socket & ignore the error
9Since the websocket is surely closed by now, wait for the read_and_close to close.
+
+
+

With run_session and run_blockchain_info written, we can not move on to main:

+
+
+
example/ticker.cpp main
+
+
async::main co_main(int argc, char * argv[])
+{
+    acceptor_type acc{co_await async::this_coro::executor,
+                      asio::ip::tcp::endpoint (asio::ip::tcp::v4(), 8080)};
+    std::cout << "Listening on localhost:8080" << std::endl;
+
+    constexpr int limit = 10; // allow 10 ongoing sessions
+    async::channel<subscription> sub_manager; (1)
+
+    co_await join( (2)
+      run_blockchain_info(sub_manager),
+      async::with( (3)
+        async::wait_group(
+            asio::cancellation_type::all,
+            asio::cancellation_type::all),
+        [&](async::wait_group & sessions) -> async::promise<void>
+        {
+          while (!co_await async::this_coro::cancelled) (4)
+          {
+            if (sessions.size() >= limit) (5)
+              co_await sessions.wait_one();
+
+            auto conn = co_await acc.async_accept(); (6)
+            sessions.push_back( (7)
+                run_session(
+                    beast::websocket::stream<socket_type>{std::move(conn)},
+                    sub_manager));
+          }
+        })
+      );
+
+    co_return 0;
+}
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
1Create the channel to manage subscriptions
2Use join to run both tasks in parallel.
3Use an async scope to provide a wait_group.
4Run until cancelled.
5When we’ve reached the limit we wait for one task to complete.
6Wait for a new connection.
7Insert the session into the wait_group.
+
+
+

Main is using join because one task failing should cancel the other one.

+
+
+
+

delay op

+
+

We’ve used the use_op so far, to use an implicit operation based on asio’s completion token mechanic.

+
+
+

We can however implement our own ops, that can also utilize the async_ready optimization. +To leverage this coroutine feature, async provides an easy way to create a skipable operation:

+
+
+
example/delay_op.cpp
+
+
struct wait_op final : async::op<system::error_code> (1)
+{
+  asio::steady_timer & tim;
+  wait_op(asio::steady_timer & tim) : tim(tim) {}
+  void ready(async::handler<system::error_code> h ) override (2)
+  {
+    if (tim.expiry() < std::chrono::steady_clock::now())
+      h(system::error_code{});
+  }
+  void initiate(async::completion_handler<system::error_code> complete) override (3)
+  {
+    tim.async_wait(std::move(complete));
+  }
+};
+
+
+async::main co_main(int argc, char * argv[])
+{
+  asio::steady_timer tim{co_await asio::this_coro::executor,
+                         std::chrono::milliseconds(std::stoi(argv[1]))};
+  co_await wait_op(tim); (4)
+  co_return 0; //
+}
+
+
+
+ + + + + + + + + + + + + + + + + +
1Declare the op. We inherit op to make it awaitable.
2The pre-suspend check is implemented here
3Do the wait if we need to
4Use the op just like any other awaitable.
+
+
+

This way we can minimize the amounts of coroutine suspensions.

+
+
+

While the above is used with asio, you can also use these handlers +with any other callback based code.

+
+
+
+

Generator with push value

+
+

Coroutines with push values are not as common, +but can simplify certain issues significantly.

+
+
+

Since we’ve already got a json_reader in the previous example, +here’s how we can write a json_writer that gets values pushed in.

+
+
+

The advantage of using a generator is the internal state management.

+
+
+
+
async::generator<system::error_code, json::object>
+    json_writer(websocket_type & ws)
+try
+{
+    char buffer[4096];
+    json::serializer ser;
+
+    while (ws.is_open()) (1)
+    {
+        auto val = co_yield system::error_code{}; (2)
+
+        while (!ser.done())
+        {
+            auto sv = ser.read(buffer);
+            co_await ws.async_write({sv.data(), sv.size()}); (3)
+        }
+
+    }
+    co_return {};
+}
+catch (system::system_error& e)
+{
+    co_return e.code();
+}
+catch (std::exception & e)
+{
+    std::cerr << "Error reading: " << e.what() << std::endl;
+    throw;
+}
+
+
+
+ + + + + + + + + + + + + +
1Keep running as long as the socket is open
2co_yield the current error and retrieve a new value.
3Write a frame to the websocket
+
+
+

Now we can use the generator like this:

+
+
+
+
auto g = json_writer(my_ws);
+
+extern std::vector<json::value> to_write;
+
+for (auto && tw : std::move(to_write))
+{
+    if (auto ec = co_await g(std::move(tw)))
+        return ec; // yield error
+}
+
+
+
+
+

Advanced examples

+
+

More examples are provided in the repository as code only. All examples are listed below.

+
+ + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 4. All examples

example/http.cpp

An http client that performs a single http get request.

example/outcome.cpp

Using the boost.outcome coroutine types.

example/python.cpp & example/python.py

Uisng nanobind to integrate async with python. +It uses python’s asyncio as executor and allows C++ to co_await python functions et vice versa.

example/signals.cpp

Adopting boost.signals2 into an awaitable type (single threaded).

example/spsc.cpp

Creating a boost.lockfree based & awaitable spsc_queue (multi threaded).

example/thread.cpp

Using worker threads with asio’s `concurrent_channel.

example/thread_pool.cpp

Using an asio::thread_pool and spawning tasks onto them.

example/delay.cpp

The example used by the delay section

example/delay_op.cpp

The example used by the delay op section

example/echo_server.cpp

The example used by the echo server section

example/ticker.cpp

The example used by the price ticker section

example/channel.cpp

The example used by the channel reference

+
+
+
+ +
+ + + \ No newline at end of file