Skip to content

Commit

Permalink
Merge pull request #1342 from msimberg/examples-80-columns
Browse files Browse the repository at this point in the history
Format examples shown in documentation to 80 columns to fit better in text
  • Loading branch information
msimberg authored Nov 20, 2024
2 parents f1041b1 + ea95f62 commit 4011d32
Show file tree
Hide file tree
Showing 9 changed files with 62 additions and 33 deletions.
12 changes: 12 additions & 0 deletions examples/documentation/.clang-format
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Copyright (c) 2024 ETH Zurich
#
# SPDX-License-Identifier: BSL-1.0
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)

# See root of repository for main configuration. We override ColumnLimit to a
# smaller value for examples that are shown in documentation to make sure they
# fit in a typical column of text.

BasedOnStyle: InheritParentConfig
ColumnLimit: 80
10 changes: 6 additions & 4 deletions examples/documentation/drop_operation_state_documentation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,17 @@ int main(int argc, char* argv[])
auto sp = std::make_shared<int>(42);
std::weak_ptr<int> sp_weak = sp;

auto s = ex::just(std::move(sp)) | ex::then([&](auto&&) { assert(sp_weak.use_count() == 1); }) |
// Even though the shared_ptr is no longer in use, it may be kept alive by the operation state
auto s = ex::just(std::move(sp)) |
ex::then([&](auto&&) { assert(sp_weak.use_count() == 1); }) |
// Even though the shared_ptr is no longer in use, it may be kept alive
// by the operation state
ex::then([&]() {
assert(sp_weak.use_count() == 1);
return 42;
}) |
ex::drop_operation_state() |
// Once drop_operation_state has been used, the shared_ptr is guaranteed to be released.
// Values are passed through the adaptor.
// Once drop_operation_state has been used, the shared_ptr is guaranteed
// to be released. Values are passed through the adaptor.
ex::then([&]([[maybe_unused]] int x) {
assert(sp_weak.use_count() == 0);
assert(x == 42);
Expand Down
6 changes: 4 additions & 2 deletions examples/documentation/drop_value_documentation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,10 @@ int main(int argc, char* argv[])
pika::start(argc, argv);
ex::thread_pool_scheduler sched{};

auto s = ex::just(42, custom_type{}, std::tuple("hello")) | ex::drop_value() |
// No matter what is sent to drop_value, it won't be sent from drop_value
auto s = ex::just(42, custom_type{}, std::tuple("hello")) |
ex::drop_value() |
// No matter what is sent to drop_value, it won't be sent from
// drop_value
ex::then([] { fmt::print("I got nothing...\n"); });
tt::sync_wait(std::move(s));

Expand Down
19 changes: 12 additions & 7 deletions examples/documentation/hello_world_documentation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,37 +14,42 @@

int main(int argc, char* argv[])
{
// Most functionality is found in the pika::execution namespace. If pika is built with stdexec,
// std::execution will also be found in this namespace.
// Most functionality is found in the pika::execution namespace. If pika is
// built with stdexec, std::execution will also be found in this namespace.
namespace ex = pika::execution::experimental;
// Some additional utilities are in pika::this_thread.
namespace tt = pika::this_thread::experimental;

// Start the pika runtime.
pika::start(argc, argv);

// Create a std::execution scheduler that runs work on the default pika thread pool.
// Create a std::execution scheduler that runs work on the default pika
// thread pool.
ex::thread_pool_scheduler sched{};

// We can schedule work using sched.
auto snd1 = ex::just(42) | ex::continues_on(sched) | ex::then([](int x) {
fmt::print("Hello from a pika user-level thread (with id {})!\nx = {}\n",
fmt::print(
"Hello from a pika user-level thread (with id {})!\nx = {}\n",
pika::this_thread::get_id(), x);
});

// The work is started once we call sync_wait.
tt::sync_wait(std::move(snd1));

// We can build arbitrary graphs of work using the split and when_all adaptors.
// We can build arbitrary graphs of work using the split and when_all
// adaptors.
auto snd2 = ex::just(3.14) | ex::split();
auto snd3 = ex::continues_on(snd2, sched) |
ex::then([](double pi) { fmt::print("Is this pi: {}?\n", pi); });
auto snd4 = ex::when_all(std::move(snd2), ex::just(500.3)) | ex::continues_on(sched) |
auto snd4 = ex::when_all(std::move(snd2), ex::just(500.3)) |
ex::continues_on(sched) |
ex::then([](double pi, double r) { return pi * r * r; });
auto result = tt::sync_wait(ex::when_all(std::move(snd3), std::move(snd4)));
fmt::print("The result is {}\n", result);

// Tell the runtime that when there are no more tasks in the queues it is ok to stop.
// Tell the runtime that when there are no more tasks in the queues it is ok
// to stop.
pika::finalize();

// Wait for all work to finish and stop the runtime.
Expand Down
3 changes: 2 additions & 1 deletion examples/documentation/init_hpp_documentation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@ int main(int argc, char* argv[])

pika::start(argc, argv);

// The pika runtime is now active and we can schedule work on the default thread pool
// The pika runtime is now active and we can schedule work on the default
// thread pool
auto s = ex::schedule(ex::thread_pool_scheduler{}) |
ex::then([]() { fmt::print("Hello from the pika runtime\n"); });
tt::sync_wait(std::move(s));
Expand Down
7 changes: 4 additions & 3 deletions examples/documentation/require_started_documentation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,15 @@ int main(int argc, char* argv[])
}

{
// The termination is ignored with discard, the sender is from the user's perspective
// rightfully not used
// The termination is ignored with discard, the sender is from the
// user's perspective rightfully not used
auto s = ex::just() | ex::require_started();
s.discard();
}

{
// The require_started sender terminates on destruction if it has not been used
// The require_started sender terminates on destruction if it has not
// been used
auto s = ex::just() | ex::require_started();
}
assert(false);
Expand Down
17 changes: 10 additions & 7 deletions examples/documentation/split_tuple_documentation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,21 +24,24 @@ int main(int argc, char* argv[])

// split_tuple can be used to process the result and its square through
// senders, without having to pass both around together
auto [snd, snd_squared] = ex::schedule(sched) | ex::then([]() { return 42; }) |
ex::then([](int x) { return std::tuple(x, x * x); }) | ex::split_tuple();
auto [snd, snd_squared] = ex::schedule(sched) |
ex::then([]() { return 42; }) |
ex::then([](int x) { return std::tuple(x, x * x); }) |
ex::split_tuple();

// snd and snd_squared will be ready at the same time, but can be used independently
// snd and snd_squared will be ready at the same time, but can be used
// independently
auto snd_print = std::move(snd) | ex::continues_on(sched) |
ex::then([](int x) { fmt::print("x is {}\n", x); });
auto snd_process =
std::move(snd_squared) | ex::continues_on(sched) | ex::then([](int x_squared) {
auto snd_process = std::move(snd_squared) | ex::continues_on(sched) |
ex::then([](int x_squared) {
fmt::print("Performing expensive operations on x * x\n");
std::this_thread::sleep_for(std::chrono::milliseconds(300));
return x_squared / 2;
});

auto x_squared_processed =
tt::sync_wait(ex::when_all(std::move(snd_print), std::move(snd_process)));
auto x_squared_processed = tt::sync_wait(
ex::when_all(std::move(snd_print), std::move(snd_process)));
fmt::print("The final result is {}\n", x_squared_processed);

pika::finalize();
Expand Down
14 changes: 8 additions & 6 deletions examples/documentation/unpack_documentation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,22 +21,24 @@ int main(int argc, char* argv[])
pika::start(argc, argv);
ex::thread_pool_scheduler sched{};

auto tuple_sender = ex::just(std::tuple(std::string("hello!"), 42)) | ex::continues_on(sched);
auto tuple_sender = ex::just(std::tuple(std::string("hello!"), 42)) |
ex::continues_on(sched);
auto process_data = [](auto message, auto answer) {
fmt::print("{}\nthe answer is: {}\n", message, answer);
};

// With the unpack adaptor, process_data does not have to know that the data was originally sent
// as a tuple
// With the unpack adaptor, process_data does not have to know that the data
// was originally sent as a tuple
auto unpack_sender = tuple_sender | ex::unpack() | ex::then(process_data);

// We can manually recreate the behaviour of the unpack adaptor by using std::apply. This is
// equivalent to the above.
// We can manually recreate the behaviour of the unpack adaptor by using
// std::apply. This is equivalent to the above.
auto apply_sender = tuple_sender | ex::then([&](auto tuple_of_data) {
return std::apply(process_data, std::move(tuple_of_data));
});

tt::sync_wait(ex::when_all(std::move(unpack_sender), std::move(apply_sender)));
tt::sync_wait(
ex::when_all(std::move(unpack_sender), std::move(apply_sender)));

pika::finalize();
pika::stop();
Expand Down
7 changes: 4 additions & 3 deletions examples/documentation/when_all_vector_documentation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,11 @@ int main(int argc, char* argv[])
snds.reserve(n);
for (std::size_t i = 0; i < n; ++i)
{
snds.push_back(ex::just(i) | ex::continues_on(sched) | ex::then(calculate));
snds.push_back(
ex::just(i) | ex::continues_on(sched) | ex::then(calculate));
}
auto snds_print =
ex::when_all_vector(std::move(snds)) | ex::then([](std::vector<std::size_t> results) {
auto snds_print = ex::when_all_vector(std::move(snds)) |
ex::then([](std::vector<std::size_t> results) {
fmt::print("Results are: {}\n", fmt::join(results, ", "));
});
tt::sync_wait(std::move(snds_print));
Expand Down

0 comments on commit 4011d32

Please sign in to comment.