From 356379cc688826da4ac93808ea6982baaeb7fd6b Mon Sep 17 00:00:00 2001 From: Timo Heister Date: Mon, 3 Jul 2023 00:00:32 -0400 Subject: [PATCH] bundle taskflow 3.6.0 --- bundled/setup_bundled.cmake | 4 +- bundled/taskflow-2.5.0/README.md | 747 ----- .../include/taskflow/core/executor.hpp | 1236 -------- .../include/taskflow/core/flow_builder.hpp | 949 ------ .../include/taskflow/core/graph.hpp | 528 ---- .../include/taskflow/core/observer.hpp | 521 ---- .../include/taskflow/core/taskflow.hpp | 353 --- .../include/taskflow/core/topology.hpp | 93 - .../include/taskflow/core/tsq.hpp | 250 -- .../include/taskflow/cuda/cuda_device.hpp | 136 - .../include/taskflow/cuda/cuda_error.hpp | 31 - .../include/taskflow/cuda/cuda_flow.hpp | 503 ---- .../include/taskflow/cuda/cuda_graph.hpp | 310 -- .../include/taskflow/cuda/cuda_task.hpp | 173 -- .../include/taskflow/declarations.hpp | 36 - .../include/taskflow/nstd/any.hpp | 703 ----- .../include/taskflow/nstd/optional.hpp | 1718 ----------- .../include/taskflow/nstd/variant.hpp | 2631 ----------------- .../include/taskflow/taskflow.hpp | 13 - .../include/taskflow/utility/os.hpp | 27 - .../taskflow/utility/passive_vector.hpp | 213 -- .../include/taskflow/utility/stringify.hpp | 21 - .../include/taskflow/utility/traits.hpp | 308 -- .../LICENSE | 7 +- bundled/taskflow-3.6.0/README.md | 450 +++ .../include/algorithm/critical.hpp | 78 + .../include/algorithm/data_pipeline.hpp | 637 ++++ .../taskflow-3.6.0/include/algorithm/find.hpp | 547 ++++ .../include/algorithm/for_each.hpp | 173 ++ .../include/algorithm/launch.hpp | 58 + .../include/algorithm/partitioner.hpp | 543 ++++ .../include/algorithm/pipeline.hpp | 1663 +++++++++++ .../include/algorithm/reduce.hpp | 295 ++ .../taskflow-3.6.0/include/algorithm/scan.hpp | 614 ++++ .../taskflow-3.6.0/include/algorithm/sort.hpp | 648 ++++ .../include/algorithm/transform.hpp | 199 ++ bundled/taskflow-3.6.0/include/core/async.hpp | 396 +++ .../include/core/async_task.hpp | 125 + .../include/core/declarations.hpp | 60 + .../include/core/environment.hpp | 8 + .../include}/core/error.hpp | 6 +- .../include/core/executor-module-opt.hpp | 2025 +++++++++++++ .../taskflow-3.6.0/include/core/executor.hpp | 2313 +++++++++++++++ .../include/core/flow_builder.hpp | 1352 +++++++++ bundled/taskflow-3.6.0/include/core/graph.hpp | 998 +++++++ .../include}/core/notifier.hpp | 19 +- .../taskflow-3.6.0/include/core/observer.hpp | 1046 +++++++ .../taskflow-3.6.0/include/core/semaphore.hpp | 132 + .../include}/core/task.hpp | 608 ++-- .../taskflow-3.6.0/include/core/taskflow.hpp | 619 ++++ .../taskflow-3.6.0/include/core/topology.hpp | 56 + bundled/taskflow-3.6.0/include/core/tsq.hpp | 441 +++ .../taskflow-3.6.0/include/core/worker.hpp | 260 ++ .../include/cuda/algorithm/find.hpp | 295 ++ .../include/cuda/algorithm/for_each.hpp | 315 ++ .../include/cuda/algorithm/matmul.hpp | 57 + .../include/cuda/algorithm/merge.hpp | 585 ++++ .../include/cuda/algorithm/reduce.hpp | 460 +++ .../include/cuda/algorithm/scan.hpp | 488 +++ .../include/cuda/algorithm/sort.hpp | 506 ++++ .../include/cuda/algorithm/transform.hpp | 282 ++ .../include/cuda/algorithm/transpose.hpp | 41 + .../include/cuda/cuda_capturer.hpp | 724 +++++ .../include/cuda/cuda_device.hpp | 342 +++ .../include/cuda/cuda_error.hpp | 26 + .../include/cuda/cuda_execution_policy.hpp | 155 + .../include/cuda/cuda_graph.hpp | 805 +++++ .../include/cuda/cuda_memory.hpp | 855 ++++++ .../taskflow-3.6.0/include/cuda/cuda_meta.hpp | 452 +++ .../include/cuda/cuda_object.hpp | 287 ++ .../include/cuda/cuda_optimizer.hpp | 404 +++ .../include/cuda/cuda_stream.hpp | 226 ++ .../taskflow-3.6.0/include/cuda/cuda_task.hpp | 274 ++ .../taskflow-3.6.0/include/cuda/cudaflow.hpp | 1024 +++++++ .../taskflow-3.6.0/include/dsl/connection.hpp | 53 + bundled/taskflow-3.6.0/include/dsl/dsl.hpp | 13 + .../taskflow-3.6.0/include/dsl/meta_macro.hpp | 72 + .../include/dsl/task_analyzer.hpp | 40 + .../taskflow-3.6.0/include/dsl/task_dsl.hpp | 104 + .../taskflow-3.6.0/include/dsl/task_trait.hpp | 46 + .../include/dsl/tuple_utils.hpp | 43 + .../taskflow-3.6.0/include/dsl/type_list.hpp | 136 + .../include/sycl/algorithm/reduce.hpp | 487 +++ .../include/sycl/algorithm/sycl_for_each.hpp | 88 + .../include/sycl/algorithm/sycl_transform.hpp | 46 + .../include/sycl/sycl_execution_policy.hpp | 70 + .../include/sycl/sycl_graph.hpp | 255 ++ .../taskflow-3.6.0/include/sycl/sycl_meta.hpp | 517 ++++ .../taskflow-3.6.0/include/sycl/sycl_task.hpp | 209 ++ .../taskflow-3.6.0/include/sycl/syclflow.hpp | 684 +++++ bundled/taskflow-3.6.0/include/taskflow.hpp | 69 + .../include/utility/iterator.hpp | 22 + .../taskflow-3.6.0/include/utility/macros.hpp | 17 + .../taskflow-3.6.0/include/utility/math.hpp | 151 + .../include}/utility/object_pool.hpp | 182 +- bundled/taskflow-3.6.0/include/utility/os.hpp | 196 ++ .../include/utility/serializer.hpp | 1135 +++++++ .../include}/utility/singleton.hpp | 4 +- .../include/utility/small_vector.hpp | 1048 +++++++ .../taskflow-3.6.0/include/utility/stream.hpp | 31 + .../taskflow-3.6.0/include/utility/traits.hpp | 299 ++ .../include}/utility/uuid.hpp | 69 +- 102 files changed, 30628 insertions(+), 11941 deletions(-) delete mode 100644 bundled/taskflow-2.5.0/README.md delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/core/executor.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/core/flow_builder.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/core/graph.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/core/observer.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/core/taskflow.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/core/topology.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/core/tsq.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_device.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_error.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_flow.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_graph.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_task.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/declarations.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/nstd/any.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/nstd/optional.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/nstd/variant.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/taskflow.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/utility/os.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/utility/passive_vector.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/utility/stringify.hpp delete mode 100644 bundled/taskflow-2.5.0/include/taskflow/utility/traits.hpp rename bundled/{taskflow-2.5.0 => taskflow-3.6.0}/LICENSE (84%) create mode 100644 bundled/taskflow-3.6.0/README.md create mode 100644 bundled/taskflow-3.6.0/include/algorithm/critical.hpp create mode 100644 bundled/taskflow-3.6.0/include/algorithm/data_pipeline.hpp create mode 100644 bundled/taskflow-3.6.0/include/algorithm/find.hpp create mode 100644 bundled/taskflow-3.6.0/include/algorithm/for_each.hpp create mode 100644 bundled/taskflow-3.6.0/include/algorithm/launch.hpp create mode 100644 bundled/taskflow-3.6.0/include/algorithm/partitioner.hpp create mode 100644 bundled/taskflow-3.6.0/include/algorithm/pipeline.hpp create mode 100644 bundled/taskflow-3.6.0/include/algorithm/reduce.hpp create mode 100644 bundled/taskflow-3.6.0/include/algorithm/scan.hpp create mode 100644 bundled/taskflow-3.6.0/include/algorithm/sort.hpp create mode 100644 bundled/taskflow-3.6.0/include/algorithm/transform.hpp create mode 100644 bundled/taskflow-3.6.0/include/core/async.hpp create mode 100644 bundled/taskflow-3.6.0/include/core/async_task.hpp create mode 100644 bundled/taskflow-3.6.0/include/core/declarations.hpp create mode 100644 bundled/taskflow-3.6.0/include/core/environment.hpp rename bundled/{taskflow-2.5.0/include/taskflow => taskflow-3.6.0/include}/core/error.hpp (84%) create mode 100644 bundled/taskflow-3.6.0/include/core/executor-module-opt.hpp create mode 100644 bundled/taskflow-3.6.0/include/core/executor.hpp create mode 100644 bundled/taskflow-3.6.0/include/core/flow_builder.hpp create mode 100644 bundled/taskflow-3.6.0/include/core/graph.hpp rename bundled/{taskflow-2.5.0/include/taskflow => taskflow-3.6.0/include}/core/notifier.hpp (96%) create mode 100644 bundled/taskflow-3.6.0/include/core/observer.hpp create mode 100644 bundled/taskflow-3.6.0/include/core/semaphore.hpp rename bundled/{taskflow-2.5.0/include/taskflow => taskflow-3.6.0/include}/core/task.hpp (50%) create mode 100644 bundled/taskflow-3.6.0/include/core/taskflow.hpp create mode 100644 bundled/taskflow-3.6.0/include/core/topology.hpp create mode 100644 bundled/taskflow-3.6.0/include/core/tsq.hpp create mode 100644 bundled/taskflow-3.6.0/include/core/worker.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/algorithm/find.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/algorithm/for_each.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/algorithm/matmul.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/algorithm/merge.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/algorithm/reduce.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/algorithm/scan.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/algorithm/sort.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/algorithm/transform.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/algorithm/transpose.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/cuda_capturer.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/cuda_device.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/cuda_error.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/cuda_execution_policy.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/cuda_graph.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/cuda_memory.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/cuda_meta.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/cuda_object.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/cuda_optimizer.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/cuda_stream.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/cuda_task.hpp create mode 100644 bundled/taskflow-3.6.0/include/cuda/cudaflow.hpp create mode 100644 bundled/taskflow-3.6.0/include/dsl/connection.hpp create mode 100644 bundled/taskflow-3.6.0/include/dsl/dsl.hpp create mode 100644 bundled/taskflow-3.6.0/include/dsl/meta_macro.hpp create mode 100644 bundled/taskflow-3.6.0/include/dsl/task_analyzer.hpp create mode 100644 bundled/taskflow-3.6.0/include/dsl/task_dsl.hpp create mode 100644 bundled/taskflow-3.6.0/include/dsl/task_trait.hpp create mode 100644 bundled/taskflow-3.6.0/include/dsl/tuple_utils.hpp create mode 100644 bundled/taskflow-3.6.0/include/dsl/type_list.hpp create mode 100644 bundled/taskflow-3.6.0/include/sycl/algorithm/reduce.hpp create mode 100644 bundled/taskflow-3.6.0/include/sycl/algorithm/sycl_for_each.hpp create mode 100644 bundled/taskflow-3.6.0/include/sycl/algorithm/sycl_transform.hpp create mode 100644 bundled/taskflow-3.6.0/include/sycl/sycl_execution_policy.hpp create mode 100644 bundled/taskflow-3.6.0/include/sycl/sycl_graph.hpp create mode 100644 bundled/taskflow-3.6.0/include/sycl/sycl_meta.hpp create mode 100644 bundled/taskflow-3.6.0/include/sycl/sycl_task.hpp create mode 100644 bundled/taskflow-3.6.0/include/sycl/syclflow.hpp create mode 100644 bundled/taskflow-3.6.0/include/taskflow.hpp create mode 100644 bundled/taskflow-3.6.0/include/utility/iterator.hpp create mode 100644 bundled/taskflow-3.6.0/include/utility/macros.hpp create mode 100644 bundled/taskflow-3.6.0/include/utility/math.hpp rename bundled/{taskflow-2.5.0/include/taskflow => taskflow-3.6.0/include}/utility/object_pool.hpp (91%) create mode 100644 bundled/taskflow-3.6.0/include/utility/os.hpp create mode 100644 bundled/taskflow-3.6.0/include/utility/serializer.hpp rename bundled/{taskflow-2.5.0/include/taskflow => taskflow-3.6.0/include}/utility/singleton.hpp (94%) create mode 100644 bundled/taskflow-3.6.0/include/utility/small_vector.hpp create mode 100644 bundled/taskflow-3.6.0/include/utility/stream.hpp create mode 100644 bundled/taskflow-3.6.0/include/utility/traits.hpp rename bundled/{taskflow-2.5.0/include/taskflow => taskflow-3.6.0/include}/utility/uuid.hpp (82%) diff --git a/bundled/setup_bundled.cmake b/bundled/setup_bundled.cmake index 2b0fb9d5f0..c9e9165c21 100644 --- a/bundled/setup_bundled.cmake +++ b/bundled/setup_bundled.cmake @@ -100,10 +100,10 @@ option(DEAL_II_FORCE_BUNDLED_TASKFLOW "Always use the bundled taskflow header library instead of an external one." OFF) -set(TASKFLOW_FOLDER "${CMAKE_SOURCE_DIR}/bundled/taskflow-2.5.0") +set(TASKFLOW_FOLDER "${CMAKE_SOURCE_DIR}/bundled/taskflow-3.6.0") macro(feature_taskflow_configure_bundled) - set(TASKFLOW_VERSION "2.5.0") + set(TASKFLOW_VERSION "3.6.0") list(APPEND DEAL_II_BUNDLED_INCLUDE_DIRS ${TASKFLOW_FOLDER}/include) endmacro() diff --git a/bundled/taskflow-2.5.0/README.md b/bundled/taskflow-2.5.0/README.md deleted file mode 100644 index 780f4df9ef..0000000000 --- a/bundled/taskflow-2.5.0/README.md +++ /dev/null @@ -1,747 +0,0 @@ -# Taskflow - -[![Codacy Badge](https://api.codacy.com/project/badge/Grade/3bbdc89f9a7a41eaa17559fab8a64cde)](https://app.codacy.com/gh/taskflow/taskflow?utm_source=github.com&utm_medium=referral&utm_content=taskflow/taskflow&utm_campaign=Badge_Grade_Dashboard) -[![Linux Build Status](https://travis-ci.com/taskflow/taskflow.svg?branch=master)](https://travis-ci.com/taskflow/taskflow) -[![Windows Build status](https://ci.appveyor.com/api/projects/status/rbjl16i6c9ahxr16?svg=true)](https://ci.appveyor.com/project/tsung-wei-huang/taskflow) -[![Wiki](image/api-doc.svg)][wiki] -[![TFProf](image/tfprof.svg)](https://taskflow.github.io/tfprof/) -[![Cite](image/cite-arXiv.svg)](https://arxiv.org/abs/2004.10908v2) - -Taskflow helps you quickly write parallel tasks programs in modern C++ - -:exclamation: Starting from [v2.5.0](https://github.com/taskflow/taskflow/releases/tag/2.5.0), we have renamed cpp-taskflow to ***taskflow*** to broaden its support and future application scopes. The core codebase remains *unchanged*. You may only need to [change the remote URL](https://help.github.com/en/github/using-git/changing-a-remotes-url) to this new repository. Thank your for the support! - -# Why Taskflow? - -Taskflow is faster, more expressive, and easier for drop-in integration -than many of existing task programming frameworks -in handling complex parallel workloads. - -![](image/performance.png) - -Taskflow lets you quickly implement task decomposition strategies -that incorporate both regular and irregular compute patterns, -together with an efficient *work-stealing* scheduler to optimize your multithreaded performance. - -| [Static Tasking](#get-started-with-taskflow) | [Dynamic Tasking](#dynamic-tasking) | -| :------------: | :-------------: | -| ![](image/static_graph.svg) | | - -Taskflow supports conditional tasking for you to make rapid control-flow decisions -across dependent tasks to implement cycles and conditions that were otherwise difficult to do -with existing tools. - -| [Conditional Tasking](#conditional-tasking) | -| :-----------------: | -| ![](image/condition.svg) | - -Taskflow is composable. You can create large parallel graphs through -composition of modular and reusable blocks that are easier to optimize -at an individual scope. - -| [Taskflow Composition](#composable-tasking) | -| :---------------: | -|![](image/framework.svg)| - -Taskflow supports heterogeneous tasking for you to -accelerate a wide range of scientific computing applications -by harnessing the power of CPU-GPU collaborative computing. - -| [Concurrent CPU-GPU Tasking](#concurrent-cpu-gpu-tasking) | -| :-----------------: | -| ![](image/cudaflow.svg) | - - -Taskflow provides visualization and tooling needed for profiling Taskflow programs. - -| [Taskflow Profiler](https://taskflow.github.io/tfprof) | -| :-----------------: | -| ![](image/tfprof.png) | - -We are committed to support trustworthy developments for both academic and industrial research projects -in parallel computing. Check out [Who is Using Taskflow](#who-is-using-taskflow) and what our users say: - -+ *"Taskflow is the cleanest Task API I've ever seen." [Damien Hocking @Corelium Inc](http://coreliuminc.com)* -+ *"Taskflow has a very simple and elegant tasking interface. The performance also scales very well." [Glen Fraser][totalgee]* -+ *"Taskflow lets me handle parallel processing in a smart way." [Hayabusa @Learning](https://cpp-learning.com/cpp-taskflow/)* -+ *"Taskflow improves the throughput of our graph engine in just a few hours of coding." [Jean-Michaël @KDAB](https://ossia.io/)* -+ *"Best poster award for open-source parallel programming library." [Cpp Conference 2018][Cpp Conference 2018]* -+ *"Second Prize of Open-source Software Competition." [ACM Multimedia Conference 2019](https://tsung-wei-huang.github.io/img/mm19-ossc-award.jpg)* - -See a quick [presentation][Presentation] and -visit the [documentation][wiki] to learn more about Taskflow. -Technical details can be referred to our [arXiv paper](https://arxiv.org/abs/2004.10908v2). - -# Table of Contents - -* [Get Started with Taskflow](#get-started-with-taskflow) -* [Create a Taskflow Application](#create-a-taskflow-application) - * [Step 1: Create a Taskflow](#step-1-create-a-taskflow) - * [Step 2: Define Task Dependencies](#step-2-define-task-dependencies) - * [Step 3: Execute a Taskflow](#step-3-execute-a-taskflow) -* [Dynamic Tasking](#dynamic-tasking) -* [Conditional Tasking](#conditional-tasking) -* [Composable Tasking](#composable-tasking) -* [Concurrent CPU-GPU Tasking](#concurrent-cpu-gpu-tasking) - * [Step 1: Create a cudaFlow](#step-1-create-a-cudaflow) - * [Step 2: Compile and Execute a cudaFlow](#step-2-compile-and-execute-a-cudaflow) -* [Visualize a Taskflow Graph](#visualize-a-taskflow-graph) -* [API Reference](#api-reference) -* [System Requirements](#system-requirements) -* [Compile Unit Tests, Examples, and Benchmarks](#compile-unit-tests-examples-and-benchmarks) -* [Who is Using Taskflow?](#who-is-using-taskflow) - - -# Get Started with Taskflow - -The following example [simple.cpp](./examples/simple.cpp) shows the basic Taskflow API -you need in most applications. - -```cpp -#include // Taskflow is header-only - -int main(){ - - tf::Executor executor; - tf::Taskflow taskflow; - - auto [A, B, C, D] = taskflow.emplace( - [] () { std::cout << "TaskA\n"; }, // task dependency graph - [] () { std::cout << "TaskB\n"; }, // - [] () { std::cout << "TaskC\n"; }, // +---+ - [] () { std::cout << "TaskD\n"; } // +---->| B |-----+ - ); // | +---+ | - // +---+ +-v-+ - A.precede(B); // A runs before B // | A | | D | - A.precede(C); // A runs before C // +---+ +-^-+ - B.precede(D); // B runs before D // | +---+ | - C.precede(D); // C runs before D // +---->| C |-----+ - // +---+ - executor.run(taskflow).wait(); - - return 0; -} -``` - -Compile and run the code with the following commands: - -```bash -~$ g++ simple.cpp -I path/to/include/taskflow/ -std=c++17 -O2 -lpthread -o simple -~$ ./simple -TaskA -TaskC <-- concurrent with TaskB -TaskB <-- concurrent with TaskC -TaskD -``` - -# Create a Taskflow Application - -Taskflow defines a very expressive API to create task dependency graphs. -Most applications are developed through the following three steps: - -## Step 1: Create a Taskflow - -Create a taskflow object to build a task dependency graph: - -```cpp -tf::Taskflow taskflow; -``` - -A task is a callable object for which [std::invoke][std::invoke] is applicable. -Use the method `emplace` to create a task: - -```cpp -tf::Task A = taskflow.emplace([](){ std::cout << "Task A\n"; }); -``` - -## Step 2: Define Task Dependencies - -You can add dependency links between tasks to enforce one task to run before or -after another. - -```cpp -A.precede(B); // A runs before B. -``` - -## Step 3: Execute a Taskflow - -To execute a taskflow, you need to create an *executor*. -An executor manages a set of worker threads to execute a taskflow -through an efficient *work-stealing* algorithm. - -```cpp -tf::Executor executor; -``` - -The executor provides a rich set of methods to run a taskflow. -You can run a taskflow multiple times, or until a stopping criteria is met. -These methods are non-blocking with a [std::future][std::future] return -to let you query the execution status. -Executor is *thread-safe*. - -```cpp -executor.run(taskflow); // runs the taskflow once -executor.run_n(taskflow, 4); // runs the taskflow four times - -// keeps running the taskflow until the predicate becomes true -executor.run_until(taskflow, [counter=4](){ return --counter == 0; } ); -``` - - -You can call `wait_for_all` to block the executor until all associated taskflows complete. - -```cpp -executor.wait_for_all(); // block until all associated tasks finish -``` - -Notice that the executor does not own any taskflow. -It is your responsibility to keep a taskflow alive during its execution, -or it can result in undefined behavior. -In most applications, you need only one executor to run multiple taskflows -each representing a specific part of your parallel decomposition. - -
[↑]
- -# Dynamic Tasking - -Another powerful feature of Taskflow is *dynamic* tasking. -Dynamic tasks are those tasks created during the execution of a taskflow. -These tasks are spawned by a parent task and are grouped together to a *subflow* graph. -To create a subflow for dynamic tasking, -emplace a callable with one argument of type `tf::Subflow`. - - - -```cpp -// create three regular tasks -tf::Task A = tf.emplace([](){}).name("A"); -tf::Task C = tf.emplace([](){}).name("C"); -tf::Task D = tf.emplace([](){}).name("D"); - -// create a subflow graph (dynamic tasking) -tf::Task B = tf.emplace([] (tf::Subflow& subflow) { - tf::Task B1 = subflow.emplace([](){}).name("B1"); - tf::Task B2 = subflow.emplace([](){}).name("B2"); - tf::Task B3 = subflow.emplace([](){}).name("B3"); - B1.precede(B3); - B2.precede(B3); -}).name("B"); - -A.precede(B); // B runs after A -A.precede(C); // C runs after A -B.precede(D); // D runs after B -C.precede(D); // D runs after C -``` - -By default, a subflow graph joins its parent node. -This ensures a subflow graph finishes before the successors of -its parent task. -You can disable this feature by calling `subflow.detach()`. -For example, detaching the above subflow will result in the following execution flow: - - - -```cpp -// create a "detached" subflow graph (dynamic tasking) -tf::Task B = tf.emplace([] (tf::Subflow& subflow) { - tf::Task B1 = subflow.emplace([](){}).name("B1"); - tf::Task B2 = subflow.emplace([](){}).name("B2"); - tf::Task B3 = subflow.emplace([](){}).name("B3"); - - B1.precede(B3); - B2.precede(B3); - - // detach the subflow to form a parallel execution line - subflow.detach(); -}).name("B"); -``` - -A subflow can be nested or recursive. You can create another subflow from -the execution of a subflow and so on. - -
[↑]
- -# Conditional Tasking - -Taskflow supports *conditional tasking* for users to implement *general* control flow with cycles and conditionals. -A *condition task* evalutes a set of instructions and returns an integer index -of the next immediate successor to execute. -The index is defined with respect to the order of its successor construction. - - - -```cpp -tf::Task init = tf.emplace([](){ }).name("init"); -tf::Task stop = tf.emplace([](){ }).name("stop"); - -// creates a condition task that returns 0 or 1 -tf::Task cond = tf.emplace([](){ - std::cout << "flipping a coin\n"; - return rand() % 2; -}).name("cond"); - -// creates a feedback loop -init.precede(cond); -cond.precede(cond, stop); // cond--0-->cond, cond--1-->stop - -executor.run(tf).wait(); -``` - -
[↑]
- - - -# Composable Tasking - -A powerful feature of `tf::Taskflow` is composability. -You can create multiple task graphs from different parts of your workload -and use them to compose a large graph through the `composed_of` method. - - - - -```cpp -tf::Taskflow f1, f2; - -auto [f1A, f1B] = f1.emplace( - []() { std::cout << "Task f1A\n"; }, - []() { std::cout << "Task f1B\n"; } -); -auto [f2A, f2B, f2C] = f2.emplace( - []() { std::cout << "Task f2A\n"; }, - []() { std::cout << "Task f2B\n"; }, - []() { std::cout << "Task f2C\n"; } -); -auto f1_module_task = f2.composed_of(f1); - -f1_module_task.succeed(f2A, f2B) - .precede(f2C); -``` - -Similarly, `composed_of` returns a task handle and you can use -`precede` to create dependencies. -You can compose a taskflow from multiple taskflows and use the result -to compose a larger taskflow and so on. - -
[↑]
- -# Concurrent CPU-GPU Tasking - -Taskflow enables concurrent CPU-GPU tasking by leveraging -[Nvidia CUDA Toolkit][cuda-toolkit]. -You can harness the power of CPU-GPU collaborative computing -to implement heterogeneous decomposition algorithms. - -## Step 1: Create a cudaFlow - -A `tf::cudaFlow` is a graph object created at runtime -similar to dynamic tasking. -It manages a task node in a taskflow and associates it -with a [CUDA Graph][cudaGraph]. -To create a cudaFlow, emplace a callable with an argument -of type `tf::cudaFlow`. - - - -```cpp -tf::Taskflow taskflow; -tf::Executor executor; - -const unsigned N = 1<<20; // size of the vector -std::vector hx(N, 1.0f), hy(N, 2.0f); // x and y vectors at host -float *dx{nullptr}, *dy{nullptr}; // x and y vectors at device - -tf::Task allocate_x = taskflow.emplace([&](){ cudaMalloc(&dx, N*sizeof(float));}); -tf::Task allocate_y = taskflow.emplace([&](){ cudaMalloc(&dy, N*sizeof(float));}); -tf::Task cudaflow = taskflow.emplace([&](tf::cudaFlow& cf) { - tf::cudaTask h2d_x = cf.copy(dx, hx.data(), N); // host-to-device x data transfer - tf::cudaTask h2d_y = cf.copy(dy, hy.data(), N); // host-to-device y data transfer - tf::cudaTask d2h_x = cf.copy(hx.data(), dx, N); // device-to-host x data transfer - tf::cudaTask d2h_y = cf.copy(hy.data(), dy, N); // device-to-host y data transfer - // launch saxpy<<<(N+255)/256, 256, 0>>>(N, 2.0f, dx, dy) - tf::cudaTask kernel = cf.kernel((N+255)/256, 256, 0, saxpy, N, 2.0f, dx, dy); - kernel.succeed(h2d_x, h2d_y) - .precede(d2h_x, d2h_y); -}); -cudaflow.succeed(allocate_x, allocate_y); // overlap data allocations - -executor.run(taskflow).wait(); -``` - -Assume our kernel implements the canonical saxpy operation -(single-precision A·X Plus Y) using the CUDA syntax. - - - -```cpp -// saxpy (single-precision A·X Plus Y) kernel -__global__ void saxpy( - int n, float a, float *x, float *y -) { - // get the thread index - int i = blockIdx.x*blockDim.x + threadIdx.x; - - if (i < n) { - y[i] = a*x[i] + y[i]; - } -} -``` - - - -## Step 2: Compile and Execute a cudaFlow - -Name you source with the extension `.cu`, let's say `saxpy.cu`, -and compile it through [nvcc][nvcc]: - -```bash -~$ nvcc saxpy.cu -I path/to/include/taskflow -O2 -o saxpy -~$ ./saxpy -``` - -Our source autonomously enables cudaFlow for compilers that support -CUDA. - -
[↑]
- -# Visualize a Taskflow Graph - -You can dump a taskflow through a `std::ostream` -in [GraphViz][GraphViz] format using the method `dump`. -There are a number of free [GraphViz tools][AwesomeGraphViz] you could find online to visualize your Taskflow graph. - - - -```cpp -tf::Taskflow taskflow; -tf::Task A = taskflow.emplace([] () {}).name("A"); -tf::Task B = taskflow.emplace([] () {}).name("B"); -tf::Task C = taskflow.emplace([] () {}).name("C"); -tf::Task D = taskflow.emplace([] () {}).name("D"); -tf::Task E = taskflow.emplace([] () {}).name("E"); -A.precede(B, C, E); -C.precede(D); -B.precede(D, E); - -taskflow.dump(std::cout); // dump the graph in DOT to std::cout -``` - -When you have tasks that are created at runtime (e.g., subflow, cudaFlow), -you need to execute the graph first to spawn these tasks -and dump the entire graph. - - - -```cpp -tf::Executor executor; -tf::Taskflow taskflow; - -tf::Task A = taskflow.emplace([](){}).name("A"); - -// create a subflow of two tasks B1->B2 -tf::Task B = taskflow.emplace([] (tf::Subflow& subflow) { - tf::Task B1 = subflow.emplace([](){}).name("B1"); - tf::Task B2 = subflow.emplace([](){}).name("B2"); - B1.precede(B2); -}).name("B"); - -A.precede(B); - -executor.run(tf).wait(); // run the taskflow to spawn subflows -tf.dump(std::cout); // dump the graph including dynamic tasks -``` - -
[↑]
- - - - -# API Reference - -The official [documentation][wiki] explains a complete list of -Taskflow API. -Here, we highlight commonly used methods. - -## Taskflow API - -The class `tf::Taskflow` is the main place to create a task dependency graph. - -### *emplace/placeholder* - -You can use `emplace` to create a task from a target callable. - -```cpp -tf::Task task = taskflow.emplace([] () { std::cout << "my task\n"; }); -``` - -When a task cannot be determined beforehand, you can create a placeholder and assign the callable later. - -```cpp -tf::Task A = taskflow.emplace([](){}); -tf::Task B = taskflow.placeholder(); -A.precede(B); -B.work([](){ /* do something */ }); -``` - -### *parallel_for* - -The method `parallel_for` creates a subgraph that applies the callable to each item in the given range of a container. - - - -```cpp -auto v = {'A', 'B', 'C', 'D'}; -auto [S, T] = taskflow.parallel_for( - v.begin(), // iterator to the beginning - v.end(), // iterator to the end - [] (int i) { - std::cout << "parallel " << i << '\n'; - } -); -// add dependencies via S and T. -``` - -You can specify a *chunk* size (default one) in the last argument to force a task to include a certain number of items. - - - -```cpp -auto v = {'A', 'B', 'C', 'D'}; -auto [S, T] = taskflow.parallel_for( - v.begin(), // iterator to the beginning - v.end(), // iterator to the end - [] (int i) { - std::cout << "AB and CD run in parallel" << '\n'; - }, - 2 // at least two items at a time -); -``` - -In addition to iterator-based construction, -`parallel_for` has another overload of index-based loop. -The first three argument of this overload indicates -starting index, ending index (exclusive), and step size. - -```cpp -// [0, 11) with a step size of 2 -auto [S, T] = taskflow.parallel_for( - 0, 11, 2, - [] (int i) { - std::cout << "parallel_for on index " << i << std::endl; - }, - 2 // at least two items at a time -); -// will print 0, 2, 4, 6, 8, 10 (three partitions, {0, 2}, {4, 6}, {8, 10}) -``` - -## Task API - -Each time you create a task, the taskflow object adds a node to the present task dependency graph -and return a *task handle* to you. -You can access or modify the attributes of the associated task node. - -### *name* - -The method `name` lets you assign a human-readable string to a task. - -```cpp -A.name("my name is A"); -``` - -### *work* - -The method `work` lets you assign a callable to a task. - -```cpp -A.work([] () { std::cout << "hello world!"; }); -``` - -### *precede/succeed* - -The method `precede/succedd` lets you add a preceding/succeeding link between tasks. - - - -```cpp -// A runs before B, C, D, and E -A.precede(B, C, D, E); -``` - -The method `succeed` is similar to `precede` but operates in the opposite direction. - -### *empty/has_work* - -A task is empty if it is not associated with any graph node. - -```cpp -tf::Task task; // assert(task.empty()); -``` - -A placeholder task is associated with a graph node but has no work assigned yet. - -``` -tf::Task task = taskflow.placeholder(); // assert(!task.has_work()); -``` - -## Executor API - -The class `tf::Executor` is used for executing one or multiple taskflow objects. - -### *run/run_n/run_until* - -The run series are *thread-safe* and *non-blocking* calls to execute a taskflow. -Issuing multiple runs on the same taskflow will automatically synchronize -to a sequential chain of executions. - -```cpp -executor.run(taskflow); // runs a graph once -executor.run_n(taskflow, 5); // runs a graph five times -executor.run_until(taskflow, my_pred); // keeps running until the my_pred becomes true -executor.wait_for_all(); // blocks until all tasks finish -``` - -The first run finishes before the second run, and the second run finishes before the third run. -
[↑]
- -# System Requirements - -To use the latest [Taskflow](https://github.com/taskflow/taskflow/archive/master.zip), you only need a [C++14][C++14] compiler. - -+ GNU C++ Compiler at least v5.0 with -std=c++14 -+ Clang C++ Compiler at least v4.0 with -std=c++14 -+ Microsoft Visual Studio at least v15.7 (MSVC++ 19.14); see [vcpkg guide](https://github.com/taskflow/taskflow/issues/143) -+ AppleClang Xode Version at least v8 -+ Nvidia CUDA Toolkit and Compiler ([nvcc][nvcc]) at least v10.0 with -std=c++14 - -Taskflow works on Linux, Windows, and Mac OS X. See the [C++ compiler support](https://en.cppreference.com/w/cpp/compiler_support) status. - -
[↑]
- -# Compile Unit Tests, Examples, and Benchmarks - -Taskflow uses [CMake](https://cmake.org/) to build examples and unit tests. -We recommend using out-of-source build. - -```bash -~$ cmake --version # must be at least 3.9 or higher -~$ mkdir build -~$ cd build -~$ cmake ../ -~$ make & make test # run all unit tests -``` - -## Examples - -The folder `examples/` contains several examples and is a great place to learn to use Taskflow. - -| Example | Description | -| ------- | ----------- | -| [simple.cpp](./examples/simple.cpp) | uses basic task building blocks to create a trivial taskflow graph | -| [debug.cpp](./examples/debug.cpp)| inspects a taskflow through the dump method | -| [parallel_for.cpp](./examples/parallel_for.cpp)| parallelizes a for loop with unbalanced workload | -| [subflow.cpp](./examples/subflow.cpp)| demonstrates how to create a subflow graph that spawns three dynamic tasks | -| [run_variants.cpp](./examples/run_variants.cpp)| shows multiple ways to run a taskflow graph | -| [composition.cpp](./examples/composition.cpp)| demonstrates the decomposable interface of taskflow | -| [observer.cpp](./examples/observer.cpp)| demonstrates how to monitor the thread activities in scheduling and running tasks | -| [condition.cpp](./examples/condition.cpp) | creates a conditional tasking graph with a feedback loop control flow | -| [cuda/saxpy.cu](./examples/cuda/saxpy.cu) | uses cudaFlow to create a saxpy (single-precision A·X Plus Y) task graph | -| [cuda/matmul.cu](./examples/cuda/matmul.cu) | uses cudaFlow to create a matrix multiplication workload and compares it with a CPU basline | - -## Benchmarks - -Please visit [benchmarks](benchmarks/benchmarks.md) to learn to -compile the benchmarks. - -
[↑]
- -# Who is Using Taskflow? - -Taskflow is being used in both industry and academic projects to scale up existing workloads -that incorporate complex task dependencies. - -- [OpenTimer][OpenTimer]: A High-performance Timing Analysis Tool for Very Large Scale Integration (VLSI) Systems -- [DtCraft][DtCraft]: A General-purpose Distributed Programming Systems using Data-parallel Streams -- [Firestorm][Firestorm]: Fighting Game Engine with Asynchronous Resource Loaders (developed by [ForgeMistress][ForgeMistress]) -- [Shiva][Shiva]: An extensible engine via an entity component system through scripts, DLLs, and header-only (C++) -- [PID Framework][PID Framework]: A Global Development Methodology Supported by a CMake API and Dedicated C++ Projects -- [NovusCore][NovusCore]: An emulating project for World of Warraft (Wrath of the Lich King 3.3.5a 12340 client build) -- [SA-PCB][SA-PCB]: Annealing-based Printed Circuit Board (PCB) Placement Tool -- [LPMP](https://github.com/LPMP/LPMP): A C++ framework for developing scalable Lagrangian decomposition solvers for discrete optimization problems -- [Heteroflow](https://github.com/Heteroflow/Heteroflow): A Modern C++ Parallel CPU-GPU Task Programming Library -- [OpenPhySyn](https://github.com/The-OpenROAD-Project/OpenPhySyn): A plugin-based physical synthesis optimization kit as part of the OpenRoad flow -- [OSSIA](https://ossia.io/): Open-source Software System for Interactive Applications -- [deal.II](https://github.com/dealii/dealii): A C++ software library to support the creation of finite element code - -[More...](https://github.com/search?q=taskflow&type=Code) - -
[↑]
- -# Contributors - -Taskflow is being actively developed and contributed by the -[these people](https://github.com/taskflow/taskflow/graphs/contributors). -Meanwhile, we appreciate the support from many organizations for our developments. - - -| [][UofU] | [][UIUC] | [][CSL] | [][NSF] | [][DARPA IDEA] | -| :---: | :---: | :---: | :---: | :---: | - -# License - -Taskflow is licensed under the [MIT License](./LICENSE). - -* * * - -[Tsung-Wei Huang]: https://tsung-wei-huang.github.io/ -[Chun-Xun Lin]: https://github.com/clin99 -[Martin Wong]: https://ece.illinois.edu/directory/profile/mdfwong -[Gitter badge]: ./image/gitter_badge.svg -[GitHub releases]: https://github.com/taskflow/taskflow/releases -[GitHub issues]: https://github.com/taskflow/taskflow/issues -[GitHub insights]: https://github.com/taskflow/taskflow/pulse -[GitHub pull requests]: https://github.com/taskflow/taskflow/pulls -[GitHub contributors]: https://github.com/taskflow/taskflow/graphs/contributors -[GraphViz]: https://www.graphviz.org/ -[AwesomeGraphViz]: https://dreampuf.github.io/GraphvizOnline/ -[OpenMP Tasking]: https://www.openmp.org/spec-html/5.0/openmpsu99.html -[TBB FlowGraph]: https://www.threadingbuildingblocks.org/tutorial-intel-tbb-flow-graph -[OpenTimer]: https://github.com/OpenTimer/OpenTimer -[DtCraft]: https://github.com/tsung-wei-huang/DtCraft -[totalgee]: https://github.com/totalgee -[damienhocking]: https://github.com/damienhocking -[ForgeMistress]: https://github.com/ForgeMistress -[Patrik Huber]: https://github.com/patrikhuber -[DARPA IDEA]: https://www.darpa.mil/news-events/2017-09-13 -[KingDuckZ]: https://github.com/KingDuckZ -[NSF]: https://www.nsf.gov/ -[UIUC]: https://illinois.edu/ -[CSL]: https://csl.illinois.edu/ -[UofU]: https://www.utah.edu/ -[wiki]: https://taskflow.github.io/taskflow/index.html -[release notes]: https://taskflow.github.io/taskflow/Releases.html -[PayMe]: https://www.paypal.me/twhuang/10 -[C++17]: https://en.wikipedia.org/wiki/C%2B%2B17 -[C++14]: https://en.wikipedia.org/wiki/C%2B%2B14 -[email me]: mailto:twh760812@gmail.com -[Cpp Conference 2018]: https://github.com/CppCon/CppCon2018 -[ChromeTracing]: https://www.chromium.org/developers/how-tos/trace-event-profiling-tool -[IPDPS19]: https://tsung-wei-huang.github.io/papers/ipdps19.pdf -[WorkStealing Wiki]: https://en.wikipedia.org/wiki/Work_stealing - -[std::invoke]: https://en.cppreference.com/w/cpp/utility/functional/invoke -[std::future]: https://en.cppreference.com/w/cpp/thread/future - -[cuda-zone]: https://developer.nvidia.com/cuda-zone -[nvcc]: https://developer.nvidia.com/cuda-llvm-compiler -[cuda-toolkit]: https://developer.nvidia.com/cuda-toolkit -[cudaGraph]: https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__GRAPH.html - -[Firestorm]: https://github.com/ForgeMistress/Firestorm -[Shiva]: https://shiva.gitbook.io/project/shiva -[PID Framework]: http://pid.lirmm.net/pid-framework/index.html -[NovusCore]: https://github.com/novuscore/NovusCore -[SA-PCB]: https://github.com/choltz95/SA-PCB - -[Presentation]: https://taskflow.github.io/ -[chrome://tracing]: chrome://tracing - diff --git a/bundled/taskflow-2.5.0/include/taskflow/core/executor.hpp b/bundled/taskflow-2.5.0/include/taskflow/core/executor.hpp deleted file mode 100644 index 8c47499bfb..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/core/executor.hpp +++ /dev/null @@ -1,1236 +0,0 @@ -#pragma once - -#include "tsq.hpp" -#include "notifier.hpp" -#include "observer.hpp" -#include "taskflow.hpp" - -namespace tf { - - -/** @class WorkerView - -@brief class to access worker information from the observer interface - -*/ -//class WorkerView { -// -// friend class Executor; -// -// public: -// -// -// private: -// -// Worker* _worker; -// -//}; - - -// ---------------------------------------------------------------------------- -// Executor Definition -// ---------------------------------------------------------------------------- - - -/** @class Executor - -@brief execution interface for running a taskflow graph - -An executor object manages a set of worker threads and internalements -an efficient work-stealing scheduling algorithm to run a taskflow. - -*/ -class Executor { - - friend class Subflow; - - struct Worker { - size_t id; - size_t victim; - Domain domain; - Executor* executor; - Notifier::Waiter* waiter; - std::mt19937 rdgen { std::random_device{}() }; - TaskQueue wsq[NUM_DOMAINS]; - }; - - struct PerThread { - Worker* worker {nullptr}; - }; - -#ifdef TF_ENABLE_CUDA - struct cudaDevice { - std::vector streams; - }; -#endif - - public: - -#ifdef TF_ENABLE_CUDA - /** - @brief constructs the executor with N/M cpu/gpu worker threads - */ - explicit Executor( - size_t N = std::thread::hardware_concurrency(), - size_t M = cuda_num_devices() - ); -#else - /** - @brief constructs the executor with N worker threads - */ - explicit Executor(size_t N = std::thread::hardware_concurrency()); -#endif - - /** - @brief destructs the executor - */ - ~Executor(); - - /** - @brief runs the taskflow once - - @param taskflow a tf::Taskflow object - - @return a std::future to access the execution status of the taskflow - */ - std::future run(Taskflow& taskflow); - - /** - @brief runs the taskflow once and invoke a callback upon completion - - @param taskflow a tf::Taskflow object - @param callable a callable object to be invoked after this run - - @return a std::future to access the execution status of the taskflow - */ - template - std::future run(Taskflow& taskflow, C&& callable); - - /** - @brief runs the taskflow for N times - - @param taskflow a tf::Taskflow object - @param N number of runs - - @return a std::future to access the execution status of the taskflow - */ - std::future run_n(Taskflow& taskflow, size_t N); - - /** - @brief runs the taskflow for N times and then invokes a callback - - @param taskflow a tf::Taskflow - @param N number of runs - @param callable a callable object to be invoked after this run - - @return a std::future to access the execution status of the taskflow - */ - template - std::future run_n(Taskflow& taskflow, size_t N, C&& callable); - - /** - @brief runs the taskflow multiple times until the predicate becomes true and - then invokes a callback - - @param taskflow a tf::Taskflow - @param pred a boolean predicate to return true for stop - - @return a std::future to access the execution status of the taskflow - */ - template - std::future run_until(Taskflow& taskflow, P&& pred); - - /** - @brief runs the taskflow multiple times until the predicate becomes true and - then invokes the callback - - @param taskflow a tf::Taskflow - @param pred a boolean predicate to return true for stop - @param callable a callable object to be invoked after this run - - @return a std::future to access the execution status of the taskflow - */ - template - std::future run_until(Taskflow& taskflow, P&& pred, C&& callable); - - /** - @brief wait for all pending graphs to complete - */ - void wait_for_all(); - - /** - @brief queries the number of worker threads (can be zero) - */ - size_t num_workers() const; - - /** - @brief queries the number of running topologies at the time of this call - - When a taskflow is submitted to an executor, a topology is created to store - runtime metadata of the running taskflow. - */ - size_t num_topologies() const; - - /** - @brief queries the number of worker domains - - Each domain manages a subset of worker threads to execute domain-specific tasks, - for example, HOST tasks and CUDA tasks. - */ - size_t num_domains() const; - - /** - @brief queries the id of the caller thread in this executor - - Each worker has an unique id from 0 to N-1 exclusive to the associated executor. - If the caller thread does not belong to the executor, -1 is returned. - */ - int this_worker_id() const; - - /** - @brief constructs an observer to inspect the activities of worker threads - - Each executor manage a list of observers in shared ownership with callers. - - @tparam Observer observer type derived from tf::ObserverInterface - @tparam ArgsT... argument parameter pack - - @param args arguments to forward to the constructor of the observer - - @return a shared pointer to the created observer - */ - template - std::shared_ptr make_observer(Args&&... args); - - /** - @brief removes the associated observer - */ - template - void remove_observer(std::shared_ptr observer); - - /** - @brief queries the number of observers - */ - size_t num_observers() const; - - private: - - const size_t _VICTIM_BEG; - const size_t _VICTIM_END; - const size_t _MAX_STEALS; - const size_t _MAX_YIELDS; - - std::condition_variable _topology_cv; - std::mutex _topology_mutex; - std::mutex _wsq_mutex; - - size_t _num_topologies {0}; - - std::vector _workers; - std::vector _threads; - -#ifdef TF_ENABLE_CUDA - std::vector _cuda_devices; -#endif - - Notifier _notifier[NUM_DOMAINS]; - - TaskQueue _wsq[NUM_DOMAINS]; - - size_t _id_offset[NUM_DOMAINS] = {0}; - - std::atomic _num_actives[NUM_DOMAINS]; - std::atomic _num_thieves[NUM_DOMAINS]; - std::atomic _done {0}; - - std::unordered_set> _observers; - - TFProfObserver* _tfprof; - - PerThread& _per_thread() const; - - bool _wait_for_task(Worker&, Node*&); - - void _instantiate_tfprof(); - void _flush_tfprof(); - void _observer_prologue(Worker&, Node*); - void _observer_epilogue(Worker&, Node*); - void _spawn(size_t, Domain); - void _worker_loop(Worker&); - void _exploit_task(Worker&, Node*&); - void _explore_task(Worker&, Node*&); - void _schedule(Node*); - void _schedule(PassiveVector&); - void _invoke(Worker&, Node*); - void _invoke_static_work(Worker&, Node*); - void _invoke_dynamic_work(Worker&, Node*); - void _invoke_dynamic_work_internal(Worker&, Node*, Graph&, bool); - void _invoke_dynamic_work_external(Graph&, Node*); - void _invoke_condition_work(Worker&, Node*); - void _invoke_module_work(Worker&, Node*); - -#ifdef TF_ENABLE_CUDA - void _invoke_cudaflow_work(Worker&, Node*); - void _invoke_cudaflow_work_internal(Worker&, Node*); -#endif - - void _set_up_topology(Topology*); - void _tear_down_topology(Topology*); - void _increment_topology(); - void _decrement_topology(); - void _decrement_topology_and_notify(); -}; - - -#ifdef TF_ENABLE_CUDA -// Constructor -inline Executor::Executor(size_t N, size_t M) : - _VICTIM_BEG {0}, - _VICTIM_END {N + M - 1}, - _MAX_STEALS {(N + M + 1) << 1}, - _MAX_YIELDS {100}, - _workers {N + M}, - _cuda_devices {cuda_num_devices()}, - _notifier {Notifier(N), Notifier(M)} { - - if(N == 0) { - TF_THROW("no cpu workers to execute taskflows"); - } - - if(M == 0) { - TF_THROW("no gpu workers to execute cudaflows"); - } - - for(int i=0; i().get(); -} - -// Procedure: _flush_tfprof -inline void Executor::_flush_tfprof() { - if(_tfprof) { - std::ostringstream fpath; - fpath << get_env("TF_ENABLE_PROFILER") << _tfprof->_uuid << ".tfp"; - std::ofstream ofs(fpath.str()); - _tfprof->dump(ofs); - } -} - -// Function: num_workers -inline size_t Executor::num_workers() const { - return _workers.size(); -} - -// Function: num_domains -inline size_t Executor::num_domains() const { - return NUM_DOMAINS; -} - -// Function: num_topologies -inline size_t Executor::num_topologies() const { - return _num_topologies; -} - -// Function: _per_thread -inline Executor::PerThread& Executor::_per_thread() const { - thread_local PerThread pt; - return pt; -} - -// Function: this_worker_id -inline int Executor::this_worker_id() const { - auto worker = _per_thread().worker; - return worker ? static_cast(worker->id) : -1; -} - -// Procedure: _spawn -inline void Executor::_spawn(size_t N, Domain d) { - - auto id = _threads.size(); - - _id_offset[d] = id; - - for(size_t i=0; i void { - - PerThread& pt = _per_thread(); - pt.worker = &w; - - Node* t = nullptr; - - // must use 1 as condition instead of !done - while(1) { - - // execute the tasks. - _exploit_task(w, t); - - // wait for tasks - if(_wait_for_task(w, t) == false) { - break; - } - } - - }, std::ref(_workers[id])); - } - -} - -// Function: _explore_task -inline void Executor::_explore_task(Worker& w, Node*& t) { - - //assert(_workers[w].wsq.empty()); - assert(!t); - - const auto d = w.domain; - - size_t num_steals = 0; - size_t num_yields = 0; - - std::uniform_int_distribution rdvtm(_VICTIM_BEG, _VICTIM_END); - - //while(!_done) { - // - // size_t vtm = rdvtm(w.rdgen); - // - // t = (vtm == w.id) ? _wsq[d].steal() : _workers[vtm].wsq[d].steal(); - - // if(t) { - // break; - // } - - // if(num_steal++ > _MAX_STEALS) { - // std::this_thread::yield(); - // if(num_yields++ > _MAX_YIELDS) { - // break; - // } - // } - //} - - do { - t = (w.id == w.victim) ? _wsq[d].steal() : _workers[w.victim].wsq[d].steal(); - - if(t) { - break; - } - - if(num_steals++ > _MAX_STEALS) { - std::this_thread::yield(); - if(num_yields++ > _MAX_YIELDS) { - break; - } - } - - w.victim = rdvtm(w.rdgen); - } while(!_done); - -} - -// Procedure: _exploit_task -inline void Executor::_exploit_task(Worker& w, Node*& t) { - - if(t) { - - const auto d = w.domain; - - if(_num_actives[d].fetch_add(1) == 0 && _num_thieves[d] == 0) { - _notifier[d].notify(false); - } - - do { - - _invoke(w, t); - - if(t->_parent == nullptr) { - if(t->_topology->_join_counter.fetch_sub(1) == 1) { - _tear_down_topology(t->_topology); - } - } - else { // joined subflow - t->_parent->_join_counter.fetch_sub(1); - } - - t = w.wsq[d].pop(); - - } while(t); - - --_num_actives[d]; - } -} - -// Function: _wait_for_task -inline bool Executor::_wait_for_task(Worker& worker, Node*& t) { - - const auto d = worker.domain; - - wait_for_task: - - assert(!t); - - ++_num_thieves[d]; - - explore_task: - - _explore_task(worker, t); - - if(t) { - if(_num_thieves[d].fetch_sub(1) == 1) { - _notifier[d].notify(false); - } - return true; - } - - _notifier[d].prepare_wait(worker.waiter); - - //if(auto vtm = _find_victim(me); vtm != _workers.size()) { - if(!_wsq[d].empty()) { - - _notifier[d].cancel_wait(worker.waiter); - //t = (vtm == me) ? _wsq.steal() : _workers[vtm].wsq.steal(); - - t = _wsq[d].steal(); - if(t) { - if(_num_thieves[d].fetch_sub(1) == 1) { - _notifier[d].notify(false); - } - return true; - } - else { - worker.victim = worker.id; - goto explore_task; - } - } - - if(_done) { - _notifier[d].cancel_wait(worker.waiter); - for(int i=0; i -std::shared_ptr Executor::make_observer(Args&&... args) { - - static_assert( - std::is_base_of::value, - "Observer must be derived from ObserverInterface" - ); - - // use a local variable to mimic the constructor - auto ptr = std::make_shared(std::forward(args)...); - - ptr->set_up(_workers.size()); - - _observers.emplace(std::static_pointer_cast(ptr)); - - return ptr; -} - -// Procedure: remove_observer -template -void Executor::remove_observer(std::shared_ptr ptr) { - - static_assert( - std::is_base_of::value, - "Observer must be derived from ObserverInterface" - ); - - _observers.erase(std::static_pointer_cast(ptr)); -} - -// Function: num_observers -inline size_t Executor::num_observers() const { - return _observers.size(); -} - -// Procedure: _schedule -// The main procedure to schedule a give task node. -// Each task node has two types of tasks - regular and subflow. -inline void Executor::_schedule(Node* node) { - - //assert(_workers.size() != 0); - - const auto d = node->domain(); - - // caller is a worker to this pool - auto worker = _per_thread().worker; - - if(worker != nullptr && worker->executor == this) { - worker->wsq[d].push(node); - if(worker->domain != d) { - if(_num_actives[d] == 0 && _num_thieves[d] == 0) { - _notifier[d].notify(false); - } - } - return; - } - - // other threads - { - std::lock_guard lock(_wsq_mutex); - _wsq[d].push(node); - } - - _notifier[d].notify(false); -} - -// Procedure: _schedule -// The main procedure to schedule a set of task nodes. -// Each task node has two types of tasks - regular and subflow. -inline void Executor::_schedule(PassiveVector& nodes) { - - //assert(_workers.size() != 0); - - // We need to cacth the node count to avoid accessing the nodes - // vector while the parent topology is removed! - const auto num_nodes = nodes.size(); - - if(num_nodes == 0) { - return; - } - - // worker thread - auto worker = _per_thread().worker; - - // task counts - size_t tcount[NUM_DOMAINS] = {0}; - - if(worker != nullptr && worker->executor == this) { - for(size_t i=0; idomain(); - worker->wsq[d].push(nodes[i]); - tcount[d]++; - } - - for(int d=0; ddomain) { - if(_num_actives[d] == 0 && _num_thieves[d] == 0) { - _notifier[d].notify_n(tcount[d]); - } - } - } - - return; - } - - // other threads - { - std::lock_guard lock(_wsq_mutex); - for(size_t k=0; kdomain(); - _wsq[d].push(nodes[k]); - tcount[d]++; - } - } - - for(int d=0; dnum_successors(); - - // acquire the parent flow counter - auto& c = (node->_parent) ? node->_parent->_join_counter : - node->_topology->_join_counter; - - // switch is faster than nested if-else due to jump table - switch(node->_handle.index()) { - // static task - case Node::STATIC_WORK:{ - _invoke_static_work(worker, node); - } - break; - - // module task - case Node::MODULE_WORK: { - _invoke_module_work(worker, node); - } - break; - - // dynamic task - case Node::DYNAMIC_WORK: { - _invoke_dynamic_work(worker, node); - } - break; - - // condition task - case Node::CONDITION_WORK: { - _invoke_condition_work(worker, node); - return ; - } // no need to add a break here due to the immediate return - - // cudaflow task -#ifdef TF_ENABLE_CUDA - case Node::CUDAFLOW_WORK: { - _invoke_cudaflow_work(worker, node); - } - break; -#endif - - // monostate - default: - break; - } - - // We MUST recover the dependency since subflow may have - // a condition node to go back (cyclic). - // This must be done before scheduling the successors, otherwise this might cause - // race condition on the _dependents - if(node->_has_state(Node::BRANCH)) { - // If this is a case node, we need to deduct condition predecessors - node->_join_counter = node->num_strong_dependents(); - } - else { - node->_join_counter = node->num_dependents(); - } - - // At this point, the node storage might be destructed. - for(size_t i=0; i_successors[i]->_join_counter) == 0) { - c.fetch_add(1); - _schedule(node->_successors[i]); - } - } -} - -// Procedure: _observer_prologue -inline void Executor::_observer_prologue(Worker& worker, Node* node) { - for(auto& observer : _observers) { - observer->on_entry(worker.id, TaskView(node)); - } -} - -// Procedure: _observer_epilogue -inline void Executor::_observer_epilogue(Worker& worker, Node* node) { - for(auto& observer : _observers) { - observer->on_exit(worker.id, TaskView(node)); - } -} - -// Procedure: _invoke_static_work -inline void Executor::_invoke_static_work(Worker& worker, Node* node) { - _observer_prologue(worker, node); - nstd::get(node->_handle).work(); - _observer_epilogue(worker, node); -} - -// Procedure: _invoke_dynamic_work -inline void Executor::_invoke_dynamic_work(Worker& w, Node* node) { - - _observer_prologue(w, node); - - auto& handle = nstd::get(node->_handle); - - handle.subgraph.clear(); - - Subflow fb(*this, node, handle.subgraph); - - handle.work(fb); - - if(!fb._joined) { - _invoke_dynamic_work_internal(w, node, handle.subgraph, fb._detach); - } - - // TODO - _observer_epilogue(w, node); -} - -// Procedure: _invoke_dynamic_work_external -inline void Executor::_invoke_dynamic_work_external(Graph& g, Node* p) { - - auto worker = _per_thread().worker; - - assert(worker && worker->executor == this); - - _invoke_dynamic_work_internal(*worker, p, g, false); -} - -// Procedure: _invoke_dynamic_work_internal -inline void Executor::_invoke_dynamic_work_internal(Worker& w, Node* p, Graph& g, bool d) { - - if(!g.empty()) { - - PassiveVector src; - - for(auto n : g._nodes) { - - n->_topology = p->_topology; - n->_set_up_join_counter(); - n->_parent = d ? nullptr : p; - - if(n->num_dependents() == 0) { - src.push_back(n); - } - } - - // detach here - if(d) { - p->_topology->_join_counter.fetch_add(src.size()); - _schedule(src); - } - // join here - else { - p->_join_counter.fetch_add(src.size()); - _schedule(src); - Node* t = nullptr; - - do { - t = w.wsq[w.domain].pop(); - - if(t) { - _invoke(w, t); - t->_parent ? t->_parent->_join_counter.fetch_sub(1) : - t->_topology->_join_counter.fetch_sub(1); - } - - } while(p->_join_counter != 0); - } - } -} - -// Procedure: _invoke_condition_work -inline void Executor::_invoke_condition_work(Worker& worker, Node* node) { - - _observer_prologue(worker, node); - - if(node->_has_state(Node::BRANCH)) { - node->_join_counter = node->num_strong_dependents(); - } - else { - node->_join_counter = node->num_dependents(); - } - - auto id = nstd::get(node->_handle).work(); - - if(id >= 0 && static_cast(id) < node->num_successors()) { - auto s = node->_successors[id]; - s->_join_counter.store(0); - - node->_parent ? node->_parent->_join_counter.fetch_add(1) : - node->_topology->_join_counter.fetch_add(1); - _schedule(s); - - - //if(s->domain() == worker.domain) { - // _schedule(s, true); - //} - //else { - // node->_parent ? node->_parent->_join_counter.fetch_add(1) : - // node->_topology->_join_counter.fetch_add(1); - // _schedule(s, false); - //} - } - - _observer_epilogue(worker, node); -} - -#ifdef TF_ENABLE_CUDA -// Procedure: _invoke_cudaflow_work -inline void Executor::_invoke_cudaflow_work(Worker& worker, Node* node) { - _observer_prologue(worker, node); - _invoke_cudaflow_work_internal(worker, node); - _observer_epilogue(worker, node); -} - -// Procedure: _invoke_cudaflow_work_internal -inline void Executor::_invoke_cudaflow_work_internal(Worker& w, Node* node) { - - assert(w.domain == node->domain()); - - auto& h = nstd::get(node->_handle); - - h.graph.clear(); - - cudaFlow cf(h.graph, [repeat=1] () mutable { return repeat-- == 0; }); - - h.work(cf); - - if(h.graph.empty()) { - return; - } - - // transforms cudaFlow to a native cudaGraph under the specified device - // and launches the graph through a given or an internal device stream - const int d = cf._device; - - cudaScopedDevice ctx(d); - - auto s = cf._stream ? *(cf._stream) : - _cuda_devices[d].streams[w.id - _id_offset[w.domain]]; - - h.graph._make_native_graph(); - - cudaGraphExec_t exec; - - TF_CHECK_CUDA( - cudaGraphInstantiate(&exec, h.graph._native_handle, nullptr, nullptr, 0), - "failed to create an executable cudaGraph" - ); - - while(!cf._predicate()) { - TF_CHECK_CUDA( - cudaGraphLaunch(exec, s), "failed to launch cudaGraph on stream ", s - ); - - TF_CHECK_CUDA( - cudaStreamSynchronize(s), "failed to synchronize stream ", s - ); - } - - TF_CHECK_CUDA( - cudaGraphExecDestroy(exec), "failed to destroy an executable cudaGraph" - ); -} -#endif - -// Procedure: _invoke_module_work -inline void Executor::_invoke_module_work(Worker& w, Node* node) { - - _observer_prologue(w, node); - - auto module = nstd::get(node->_handle).module; - - _invoke_dynamic_work_internal(w, node, module->_graph, false); - - _observer_epilogue(w, node); -} - -// Function: run -inline std::future Executor::run(Taskflow& f) { - return run_n(f, 1, [](){}); -} - -// Function: run -template -std::future Executor::run(Taskflow& f, C&& c) { - return run_n(f, 1, std::forward(c)); -} - -// Function: run_n -inline std::future Executor::run_n(Taskflow& f, size_t repeat) { - return run_n(f, repeat, [](){}); -} - -// Function: run_n -template -std::future Executor::run_n(Taskflow& f, size_t repeat, C&& c) { - return run_until(f, [repeat]() mutable { return repeat-- == 0; }, std::forward(c)); -} - -// Function: run_until -template -std::future Executor::run_until(Taskflow& f, P&& pred) { - return run_until(f, std::forward

(pred), [](){}); -} - -// Function: _set_up_topology -inline void Executor::_set_up_topology(Topology* tpg) { - - tpg->_sources.clear(); - - // scan each node in the graph and build up the links - for(auto node : tpg->_taskflow._graph._nodes) { - - node->_topology = tpg; - node->_clear_state(); - - if(node->num_dependents() == 0) { - tpg->_sources.push_back(node); - } - - node->_set_up_join_counter(); - } - - tpg->_join_counter.store(tpg->_sources.size(), std::memory_order_relaxed); -} - -// Function: _tear_down_topology -inline void Executor::_tear_down_topology(Topology* tpg) { - - auto &f = tpg->_taskflow; - - //assert(&tpg == &(f._topologies.front())); - - // case 1: we still need to run the topology again - if(! tpg->_pred() ) { - //tpg->_recover_num_sinks(); - - assert(tpg->_join_counter == 0); - tpg->_join_counter = tpg->_sources.size(); - - _schedule(tpg->_sources); - } - // case 2: the final run of this topology - else { - - if(tpg->_call != nullptr) { - tpg->_call(); - } - - f._mtx.lock(); - - // If there is another run (interleave between lock) - if(f._topologies.size() > 1) { - - assert(tpg->_join_counter == 0); - - // Set the promise - tpg->_promise.set_value(); - f._topologies.pop_front(); - f._mtx.unlock(); - - // decrement the topology but since this is not the last we don't notify - _decrement_topology(); - - tpg = &(f._topologies.front()); - - _set_up_topology(tpg); - _schedule(tpg->_sources); - - //f._topologies.front()._bind(f._graph); - //*tpg = &(f._topologies.front()); - - //assert(f._topologies.front()._join_counter == 0); - - //f._topologies.front()._join_counter = f._topologies.front()._sources.size(); - - //_schedule(f._topologies.front()._sources); - } - else { - assert(f._topologies.size() == 1); - - // Need to back up the promise first here becuz taskflow might be - // destroy before taskflow leaves - auto p {std::move(tpg->_promise)}; - - f._topologies.pop_front(); - - f._mtx.unlock(); - - // We set the promise in the end in case taskflow leaves before taskflow - p.set_value(); - - _decrement_topology_and_notify(); - } - } -} - -// Function: run_until -template -std::future Executor::run_until(Taskflow& f, P&& pred, C&& c) { - - _increment_topology(); - - // Special case of predicate - if(f.empty() || pred()) { - std::promise promise; - promise.set_value(); - _decrement_topology_and_notify(); - return promise.get_future(); - } - - // Multi-threaded execution. - bool run_now {false}; - Topology* tpg; - std::future future; - - { - std::lock_guard lock(f._mtx); - - // create a topology for this run - //tpg = &(f._topologies.emplace_back(f, std::forward

(pred), std::forward(c))); - f._topologies.emplace_back(f, std::forward

(pred), std::forward(c)); - tpg = &(f._topologies.back()); - future = tpg->_promise.get_future(); - - if(f._topologies.size() == 1) { - run_now = true; - //tpg->_bind(f._graph); - //_schedule(tpg->_sources); - } - } - - // Notice here calling schedule may cause the topology to be removed sonner - // before the function leaves. - if(run_now) { - _set_up_topology(tpg); - _schedule(tpg->_sources); - } - - return future; -} - -// Procedure: _increment_topology -inline void Executor::_increment_topology() { - std::lock_guard lock(_topology_mutex); - ++_num_topologies; -} - -// Procedure: _decrement_topology_and_notify -inline void Executor::_decrement_topology_and_notify() { - std::lock_guard lock(_topology_mutex); - if(--_num_topologies == 0) { - _topology_cv.notify_all(); - } -} - -// Procedure: _decrement_topology -inline void Executor::_decrement_topology() { - std::lock_guard lock(_topology_mutex); - --_num_topologies; -} - -// Procedure: wait_for_all -inline void Executor::wait_for_all() { - std::unique_lock lock(_topology_mutex); - _topology_cv.wait(lock, [&](){ return _num_topologies == 0; }); -} - -// ---------------------------------------------------------------------------- -// Cyclic Dependency -// ---------------------------------------------------------------------------- - -inline void Subflow::join() { - - if(_joined) { - TF_THROW("subflow already joined"); - } - - _executor._invoke_dynamic_work_external(_graph, _parent); - _joined = true; -} - -} // end of namespace tf ----------------------------------------------------- - - - - - - - - - - - diff --git a/bundled/taskflow-2.5.0/include/taskflow/core/flow_builder.hpp b/bundled/taskflow-2.5.0/include/taskflow/core/flow_builder.hpp deleted file mode 100644 index e554730ef0..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/core/flow_builder.hpp +++ /dev/null @@ -1,949 +0,0 @@ -#pragma once - -#include "task.hpp" - -namespace tf { - -/** -@class FlowBuilder - -@brief building methods of a task dependency graph - -*/ -class FlowBuilder { - - friend class Executor; - - public: - - /** - @brief creates a static task from a given callable object - - @tparam C callable type - - @param callable a callable object constructible from std::function - - @return Task handle - */ - template - std::enable_if_t, Task> emplace(C&& callable); - - /** - @brief creates a dynamic task from a given callable object - - @tparam C callable type - - @param callable a callable object constructible from std::function - - @return Task handle - */ - template - std::enable_if_t, Task> emplace(C&& callable); - - /** - @brief creates a condition task from a given callable object - - @tparam C callable type - - @param callable a callable object constructible from std::function - - @return Task handle - */ - template - std::enable_if_t, Task> emplace(C&& callable); - -#ifdef TF_ENABLE_CUDA - /** - @brief creates a cudaflow task from a given callable object - - @tparam C callable type - - @param callable a callable object constructible from std::function - - @return Task handle - */ - template - std::enable_if_t, Task> emplace(C&& callable); -#endif - - /** - @brief creates multiple tasks from a list of callable objects - - @tparam C... callable types - - @param callables one or multiple callable objects constructible from each task category - - @return a Task handle - */ - template 1), void>* = nullptr> - auto emplace(C&&... callables); - - /** - @brief creates a module task from a taskflow - - @param taskflow a taskflow object for the module - @return a Task handle - */ - Task composed_of(Taskflow& taskflow); - - /** - @brief constructs a task dependency graph of range-based parallel_for - - The task dependency graph applies the callable object - @p callable to each object obtained by dereferencing - every iterator in the range [beg, end). The range - is split into chunks of size @p chunk, where each of them - is processed by one Task. - - The callable needs to accept a single argument, the object in the range. - - @tparam I input iterator type - @tparam C callable type - - @param beg iterator to the beginning (inclusive) - @param end iterator to the end (exclusive) - @param callable a callable object to be applied to - @param chunk size (default 1) - - @return a pair of Task handles to the beginning and the end of the graph - */ - template - std::pair parallel_for(I beg, I end, C&& callable, size_t chunk=1); - - /** - @brief constructs a task dependency graph of integer index-based parallel_for - - The task dependency graph applies a callable object to every index - in the range [beg, end) with a step size chunk by chunk. - - @tparam I integer (arithmetic) index type - @tparam C callable type - - @param beg index of the beginning (inclusive) - @param end index of the end (exclusive) - @param step step size - @param callable a callable object to be applied to - @param chunk items per task - - @return a pair of Task handles to the beginning and the end of the graph - */ - template < - typename I, - typename C, - std::enable_if_t>::value, void>* = nullptr - > - std::pair parallel_for( - I beg, I end, I step, C&& callable, size_t chunk = 1 - ); - - /** - @brief constructs a task dependency graph of floating index-based parallel_for - - The task dependency graph applies a callable object to every index - in the range [beg, end) with a step size chunk by chunk. - - @tparam I floating (arithmetic) index type - @tparam C callable type - - @param beg index of the beginning (inclusive) - @param end index of the end (exclusive) - @param step step size - @param callable a callable object to be applied to - @param chunk items per task - - @return a pair of Task handles to the beginning and the end of the graph - */ - template < - typename I, - typename C, - std::enable_if_t>::value, void>* = nullptr - > - std::pair parallel_for( - I beg, I end, I step, C&& callable, size_t chunk = 1 - ); - - /** - @brief construct a task dependency graph of parallel reduction - - The task dependency graph reduces items in the range [beg, end) to a single result. - - @tparam I input iterator type - @tparam T data type - @tparam B binary operator type - - @param beg iterator to the beginning (inclusive) - @param end iterator to the end (exclusive) - @param result reference variable to store the final result - @param bop binary operator that will be applied in unspecified order to the result - of dereferencing the input iterator - - @return a pair of Task handles to the beginning and the end of the graph - */ - template - std::pair reduce(I beg, I end, T& result, B&& bop); - - /** - @brief constructs a task dependency graph of parallel reduction through @std_min - - The task dependency graph applies a parallel reduction - to find the minimum item in the range [beg, end) through @std_min reduction. - - @tparam I input iterator type - @tparam T data type - - @param beg iterator to the beginning (inclusive) - @param end iterator to the end (exclusive) - @param result reference variable to store the final result - - @return a pair of Task handles to the beginning and the end of the graph - */ - template - std::pair reduce_min(I beg, I end, T& result); - - /** - @brief constructs a task dependency graph of parallel reduction through @std_max - - The task dependency graph applies a parallel reduction - to find the maximum item in the range [beg, end) through @std_max reduction. - - @tparam I input iterator type - @tparam T data type - - @param beg iterator to the beginning (inclusive) - @param end iterator to the end (exclusive) - @param result reference variable to store the final result - - @return a pair of Task handles to the beginning and the end of the graph - */ - template - std::pair reduce_max(I beg, I end, T& result); - - /** - @brief constructs a task dependency graph of parallel transformation and reduction - - The task dependency graph transforms each item in the range [beg, end) - into a new data type and then reduce the results. - - @tparam I input iterator type - @tparam T data type - @tparam B binary operator - @tparam U unary operator type - - @param beg iterator to the beginning (inclusive) - @param end iterator to the end (exclusive) - @param result reference variable to store the final result - @param bop binary function object that will be applied in unspecified order - to the results of @c uop; the return type must be @c T - @param uop unary function object that transforms each element - in the input range; the return type must be acceptable as input to @c bop - - @return a pair of Task handles to the beginning and the end of the graph - */ - template - std::pair transform_reduce(I beg, I end, T& result, B&& bop, U&& uop); - - /** - @brief constructs a task dependency graph of parallel transformation and reduction - - The task dependency graph transforms each item in the range [beg, end) - into a new data type and then apply two-layer reductions to derive the result. - - @tparam I input iterator type - @tparam T data type - @tparam B binary operator type - @tparam P binary operator type - @tparam U unary operator type - - @param beg iterator to the beginning (inclusive) - @param end iterator to the end (exclusive) - @param result reference variable to store the final result - @param bop1 binary function object that will be applied in the second-layer reduction - to the results of @c bop2 - @param bop2 binary function object that will be applied in the first-layer reduction - to the results of @c uop and the dereferencing of input iterators - @param uop unary function object that will be applied to transform an item to a new - data type that is acceptable as input to @c bop2 - - @return a pair of Task handles to the beginning and the end of the graph - */ - template - std::pair transform_reduce( - I beg, I end, T& result, B&& bop1, P&& bop2, U&& uop - ); - - /** - @brief creates an empty task - - @return a Task handle - */ - Task placeholder(); - - /** - @brief adds a dependency link from task A to task B - - @param A task A - @param B task B - */ - void precede(Task A, Task B); - - /** - @brief adds adjacent dependency links to a linear list of tasks - - @param tasks a vector of tasks - */ - void linearize(std::vector& tasks); - - /** - @brief adds adjacent dependency links to a linear list of tasks - - @param tasks an initializer list of tasks - */ - void linearize(std::initializer_list tasks); - - /** - @brief adds dependency links from one task A to many tasks - - @param A task A - @param others a task set which A precedes - */ - void broadcast(Task A, std::vector& others); - - /** - @brief adds dependency links from one task A to many tasks - - @param A task A - @param others a task set which A precedes - */ - void broadcast(Task A, std::initializer_list others); - - /** - @brief adds dependency links from many tasks to one task A - - @param others a task set to precede A - @param A task A - */ - void succeed(std::vector& others, Task A); - - /** - @brief adds dependency links from many tasks to one task A - - @param others a task set to precede A - @param A task A - */ - void succeed(std::initializer_list others, Task A); - - protected: - - /** - @brief constructs a flow builder with a graph - */ - FlowBuilder(Graph& graph); - - /** - @brief associated graph object - */ - Graph& _graph; - - private: - - template - void _linearize(L&); -}; - -// Constructor -inline FlowBuilder::FlowBuilder(Graph& graph) : - _graph {graph} { -} - -// Function: emplace -template 1), void>*> -auto FlowBuilder::emplace(C&&... cs) { - return std::make_tuple(emplace(std::forward(cs))...); -} - -// Function: emplace -// emplaces a static task -template -std::enable_if_t, Task> FlowBuilder::emplace(C&& c) { - auto n = _graph.emplace_back( - nstd::in_place_type_t{}, std::forward(c) - ); - return Task(n); -} - -// Function: emplace -// emplaces a dynamic task -template -std::enable_if_t, Task> FlowBuilder::emplace(C&& c) { - auto n = _graph.emplace_back( - nstd::in_place_type_t{}, std::forward(c) - ); - return Task(n); -} - -// Function: emplace -// emplaces a condition task -template -std::enable_if_t, Task> FlowBuilder::emplace(C&& c) { - auto n = _graph.emplace_back( - nstd::in_place_type_t{}, std::forward(c) - ); - return Task(n); -} - -#ifdef TF_ENABLE_CUDA -// Function: emplace -// emplaces a cudaflow task -template -std::enable_if_t, Task> FlowBuilder::emplace(C&& c) { - auto n = _graph.emplace_back( - nstd::in_place_type_t{}, std::forward(c) - ); - return Task(n); -} -#endif - -// Function: composed_of -inline Task FlowBuilder::composed_of(Taskflow& taskflow) { - auto node = _graph.emplace_back( - nstd::in_place_type_t{}, &taskflow - ); - return Task(node); -} - -// Procedure: precede -inline void FlowBuilder::precede(Task from, Task to) { - from._node->_precede(to._node); -} - -// Procedure: broadcast -inline void FlowBuilder::broadcast(Task from, std::vector& tos) { - for(auto to : tos) { - from.precede(to); - } -} - -// Procedure: broadcast -inline void FlowBuilder::broadcast(Task from, std::initializer_list tos) { - for(auto to : tos) { - from.precede(to); - } -} - -// Function: succeed -inline void FlowBuilder::succeed(std::vector& froms, Task to) { - for(auto from : froms) { - to.succeed(from); - } -} - -// Function: succeed -inline void FlowBuilder::succeed(std::initializer_list froms, Task to) { - for(auto from : froms) { - to.succeed(from); - } -} - -// Function: placeholder -inline Task FlowBuilder::placeholder() { - auto node = _graph.emplace_back(); - return Task(node); -} - -// Function: parallel_for -template -std::pair FlowBuilder::parallel_for( - I beg, I end, C&& c, size_t chunk -){ - - //using category = typename std::iterator_traits::iterator_category; - - auto S = placeholder(); - auto T = placeholder(); - - // default partition equals to the worker count - if(chunk == 0) { - chunk = 1; - } - - size_t remain = std::distance(beg, end); - - while(beg != end) { - - auto e = beg; - - auto x = std::min(remain, chunk); - std::advance(e, x); - remain -= x; - - // Create a task - auto task = emplace([beg, e, c] () mutable { - std::for_each(beg, e, c); - }); - - S.precede(task); - task.precede(T); - - // adjust the pointer - beg = e; - } - - // special case - if(S.num_successors() == 0) { - S.precede(T); - } - - return std::make_pair(S, T); -} - -// Function: parallel_for -template < - typename I, - typename C, - std::enable_if_t>::value, void>* -> -std::pair FlowBuilder::parallel_for(I beg, I end, I s, C&& c, size_t chunk) { - - if((s == 0) || (beg < end && s <= 0) || (beg > end && s >=0) ) { - TF_THROW("invalid range [", beg, ", ", end, ") with step size ", s); - } - - // source and target - auto source = placeholder(); - auto target = placeholder(); - - if(chunk == 0) { - chunk = 1; - } - - // positive case - if(beg < end) { - while(beg != end) { - auto o = static_cast(chunk) * s; - auto e = std::min(beg + o, end); - auto task = emplace([=] () mutable { - for(auto i=beg; i end) { - while(beg != end) { - auto o = static_cast(chunk) * s; - auto e = std::max(beg + o, end); - auto task = emplace([=] () mutable { - for(auto i=beg; i>e; i+=s) { - c(i); - } - }); - source.precede(task); - task.precede(target); - beg = e; - } - } - - if(source.num_successors() == 0) { - source.precede(target); - } - - return std::make_pair(source, target); -} - -// Function: parallel_for -template >::value, void>* -> -std::pair FlowBuilder::parallel_for(I beg, I end, I s, C&& c, size_t chunk) { - - if((s == 0) || (beg < end && s <= 0) || (beg > end && s >=0) ) { - TF_THROW("invalid range [", beg, ", ", end, ") with step size ", s); - } - - // source and target - auto source = placeholder(); - auto target = placeholder(); - - if(chunk == 0) { - chunk = 1; - } - - // positive case - if(beg < end) { - size_t N=0; - I b = beg; - for(I e=beg; e end) { - size_t N=0; - I b = beg; - for(I e=beg; e>end; e+=s) { - if(++N == chunk) { - auto task = emplace([=] () mutable { - for(size_t i=0; i -std::pair FlowBuilder::reduce_min(I beg, I end, T& result) { - return reduce(beg, end, result, [] (const auto& l, const auto& r) { - return std::min(l, r); - }); -} - -// Function: reduce_max -// Find the maximum element over a range of items. -template -std::pair FlowBuilder::reduce_max(I beg, I end, T& result) { - return reduce(beg, end, result, [] (const auto& l, const auto& r) { - return std::max(l, r); - }); -} - -// Function: transform_reduce -template -std::pair FlowBuilder::transform_reduce( - I beg, I end, T& result, B&& bop, U&& uop -) { - - //using category = typename std::iterator_traits::iterator_category; - - // Even partition - size_t d = std::distance(beg, end); - size_t w = std::max(unsigned{1}, std::thread::hardware_concurrency()); - size_t g = std::max((d + w - 1) / w, size_t{2}); - - auto source = placeholder(); - auto target = placeholder(); - - //std::vector> futures; - auto g_results = std::make_unique(w); - size_t id {0}; - - size_t remain = d; - - while(beg != end) { - - auto e = beg; - - size_t x = std::min(remain, g); - std::advance(e, x); - remain -= x; - - // Create a task - auto task = emplace([beg, e, bop, uop, res=&(g_results[id])] () mutable { - *res = uop(*beg); - for(++beg; beg != e; ++beg) { - *res = bop(std::move(*res), uop(*beg)); - } - }); - - source.precede(task); - task.precede(target); - - // adjust the pointer - beg = e; - id ++; - } - - // target synchronizer - target.work([&result, bop, res=make_moc(std::move(g_results)), w=id] () { - for(auto i=0u; i -std::pair FlowBuilder::transform_reduce( - I beg, I end, T& result, B&& bop, P&& pop, U&& uop -) { - - //using category = typename std::iterator_traits::iterator_category; - - // Even partition - size_t d = std::distance(beg, end); - size_t w = std::max(unsigned{1}, std::thread::hardware_concurrency()); - size_t g = std::max((d + w - 1) / w, size_t{2}); - - auto source = placeholder(); - auto target = placeholder(); - - auto g_results = std::make_unique(w); - - size_t id {0}; - size_t remain = d; - - while(beg != end) { - - auto e = beg; - - size_t x = std::min(remain, g); - std::advance(e, x); - remain -= x; - - // Create a task - auto task = emplace([beg, e, uop, pop, res= &g_results[id]] () mutable { - *res = uop(*beg); - for(++beg; beg != e; ++beg) { - *res = pop(std::move(*res), *beg); - } - }); - source.precede(task); - task.precede(target); - - // adjust the pointer - beg = e; - id ++; - } - - // target synchronizer - target.work([&result, bop, g_results=make_moc(std::move(g_results)), w=id] () { - for(auto i=0u; i -void FlowBuilder::_linearize(L& keys) { - - auto itr = keys.begin(); - auto end = keys.end(); - - if(itr == end) { - return; - } - - auto nxt = itr; - - for(++nxt; nxt != end; ++nxt, ++itr) { - itr->_node->_precede(nxt->_node); - } -} - -// Procedure: linearize -inline void FlowBuilder::linearize(std::vector& keys) { - _linearize(keys); -} - -// Procedure: linearize -inline void FlowBuilder::linearize(std::initializer_list keys) { - _linearize(keys); -} - -// Proceduer: reduce -template -std::pair FlowBuilder::reduce(I beg, I end, T& result, B&& op) { - - //using category = typename std::iterator_traits::iterator_category; - - size_t d = std::distance(beg, end); - size_t w = std::max(unsigned{1}, std::thread::hardware_concurrency()); - size_t g = std::max((d + w - 1) / w, size_t{2}); - - auto source = placeholder(); - auto target = placeholder(); - - //T* g_results = static_cast(malloc(sizeof(T)*w)); - auto g_results = std::make_unique(w); - //std::vector> futures; - - size_t id {0}; - size_t remain = d; - - while(beg != end) { - - auto e = beg; - - size_t x = std::min(remain, g); - std::advance(e, x); - remain -= x; - - // Create a task - //auto [task, future] = emplace([beg, e, op] () mutable { - auto task = emplace([beg, e, op, res = &g_results[id]] () mutable { - *res = *beg; - for(++beg; beg != e; ++beg) { - *res = op(std::move(*res), *beg); - } - //auto init = *beg; - //for(++beg; beg != e; ++beg) { - // init = op(std::move(init), *beg); - //} - //return init; - }); - source.precede(task); - task.precede(target); - //futures.push_back(std::move(future)); - - // adjust the pointer - beg = e; - id ++; - } - - // target synchronizer - //target.work([&result, futures=MoC{std::move(futures)}, op] () { - // for(auto& fu : futures.object) { - // result = op(std::move(result), fu.get()); - // } - //}); - target.work([g_results=make_moc(std::move(g_results)), &result, op, w=id] () { - for(auto i=0u; i - Node* emplace_back(Args&& ...); - - Node* emplace_back(); - - private: - - static ObjectPool& _node_pool(); - - std::vector _nodes; -}; - -// ---------------------------------------------------------------------------- - -// Class: Node -class Node { - - friend class Task; - friend class TaskView; - friend class Topology; - friend class Taskflow; - friend class Executor; - friend class FlowBuilder; - friend class Subflow; - - TF_ENABLE_POOLABLE_ON_THIS; - - // state bit flag - constexpr static int BRANCH = 0x1; - - // static work handle - struct StaticWork { - - template - StaticWork(C&&); - - std::function work; - }; - - // dynamic work handle - struct DynamicWork { - - template - DynamicWork(C&&); - - std::function work; - Graph subgraph; - }; - - // condition work handle - struct ConditionWork { - - template - ConditionWork(C&&); - - std::function work; - }; - - // module work handle - struct ModuleWork { - - template - ModuleWork(T&&); - - Taskflow* module {nullptr}; - }; - - // cudaFlow work handle -#ifdef TF_ENABLE_CUDA - struct cudaFlowWork { - - template - cudaFlowWork(C&& c) : work {std::forward(c)} {} - - std::function work; - - cudaGraph graph; - }; -#endif - - using handle_t = nstd::variant< - nstd::monostate, // placeholder -#ifdef TF_ENABLE_CUDA - cudaFlowWork, // cudaFlow -#endif - StaticWork, // static tasking - DynamicWork, // dynamic tasking - ConditionWork, // conditional tasking - ModuleWork // composable tasking - >; - - public: - - // variant index - constexpr static auto PLACEHOLDER_WORK = get_index_v; - constexpr static auto STATIC_WORK = get_index_v; - constexpr static auto DYNAMIC_WORK = get_index_v; - constexpr static auto CONDITION_WORK = get_index_v; - constexpr static auto MODULE_WORK = get_index_v; - -#ifdef TF_ENABLE_CUDA - constexpr static auto CUDAFLOW_WORK = get_index_v; -#endif - - template - Node(Args&&... args); - - ~Node(); - - size_t num_successors() const; - size_t num_dependents() const; - size_t num_strong_dependents() const; - size_t num_weak_dependents() const; - - const std::string& name() const; - - Domain domain() const; - - private: - - std::string _name; - - handle_t _handle; - - PassiveVector _successors; - PassiveVector _dependents; - - Topology* _topology {nullptr}; - - Node* _parent {nullptr}; - - int _state {0}; - - std::atomic _join_counter {0}; - - void _precede(Node*); - void _set_state(int); - void _unset_state(int); - void _clear_state(); - void _set_up_join_counter(); - - bool _has_state(int) const; - -}; - -// ---------------------------------------------------------------------------- -// Definition for Node::StaticWork -// ---------------------------------------------------------------------------- - -// Constructor -template -Node::StaticWork::StaticWork(C&& c) : work {std::forward(c)} { -} - -// ---------------------------------------------------------------------------- -// Definition for Node::DynamicWork -// ---------------------------------------------------------------------------- - -// Constructor -template -Node::DynamicWork::DynamicWork(C&& c) : work {std::forward(c)} { -} - -// ---------------------------------------------------------------------------- -// Definition for Node::ConditionWork -// ---------------------------------------------------------------------------- - -// Constructor -template -Node::ConditionWork::ConditionWork(C&& c) : work {std::forward(c)} { -} - -// ---------------------------------------------------------------------------- -// Definition for Node::ModuleWork -// ---------------------------------------------------------------------------- - -// Constructor -template -Node::ModuleWork::ModuleWork(T&& tf) : module {tf} { -} - -// ---------------------------------------------------------------------------- -// Definition for Node -// ---------------------------------------------------------------------------- - -// Constructor -template -Node::Node(Args&&... args): _handle{std::forward(args)...} { -} - -// Destructor -inline Node::~Node() { - // this is to avoid stack overflow - - if(_handle.index() == DYNAMIC_WORK) { - - auto& subgraph = nstd::get(_handle).subgraph; - - std::vector nodes; - - std::move( - subgraph._nodes.begin(), subgraph._nodes.end(), std::back_inserter(nodes) - ); - subgraph._nodes.clear(); - - size_t i = 0; - - while(i < nodes.size()) { - - if(nodes[i]->_handle.index() == DYNAMIC_WORK) { - - auto& sbg = nstd::get(nodes[i]->_handle).subgraph; - std::move( - sbg._nodes.begin(), sbg._nodes.end(), std::back_inserter(nodes) - ); - sbg._nodes.clear(); - } - - ++i; - } - - auto& np = Graph::_node_pool(); - for(i=0; i~Node(); - //np.deallocate(nodes[i]); - np.recycle(nodes[i]); - } - } -} - -// Procedure: _precede -inline void Node::_precede(Node* v) { - _successors.push_back(v); - v->_dependents.push_back(this); -} - -// Function: num_successors -inline size_t Node::num_successors() const { - return _successors.size(); -} - -// Function: dependents -inline size_t Node::num_dependents() const { - return _dependents.size(); -} - -// Function: num_weak_dependents -inline size_t Node::num_weak_dependents() const { - return std::count_if( - _dependents.begin(), - _dependents.end(), - [](Node* node){ return node->_handle.index() == Node::CONDITION_WORK; } - ); -} - -// Function: num_strong_dependents -inline size_t Node::num_strong_dependents() const { - return std::count_if( - _dependents.begin(), - _dependents.end(), - [](Node* node){ return node->_handle.index() != Node::CONDITION_WORK; } - ); -} - -// Function: name -inline const std::string& Node::name() const { - return _name; -} - -// Function: domain -inline Domain Node::domain() const { - - Domain domain; - - switch(_handle.index()) { - - case STATIC_WORK: - case DYNAMIC_WORK: - case CONDITION_WORK: - case MODULE_WORK: - domain = Domain::HOST; - break; - -#ifdef TF_ENABLE_CUDA - case CUDAFLOW_WORK: - domain = Domain::CUDA; - break; -#endif - - default: - domain = Domain::HOST; - break; - } - - return domain; -} - -// -//// Function: dump -//inline std::string Node::dump() const { -// std::ostringstream os; -// dump(os); -// return os.str(); -//} -// -//// Function: dump -//inline void Node::dump(std::ostream& os) const { -// -// os << 'p' << this << "[label=\""; -// if(_name.empty()) os << 'p' << this; -// else os << _name; -// os << "\" "; -// -// // condition node is colored green -// if(_handle.index() == CONDITION_WORK) { -// os << " shape=diamond color=black fillcolor=aquamarine style=filled"; -// } -// -// os << "];\n"; -// -// for(size_t s=0; s<_successors.size(); ++s) { -// if(_handle.index() == CONDITION_WORK) { -// // case edge is dashed -// os << 'p' << this << " -> p" << _successors[s] -// << " [style=dashed label=\"" << s << "\"];\n"; -// } -// else { -// os << 'p' << this << " -> p" << _successors[s] << ";\n"; -// } -// } -// -// // subflow join node -// if(_parent && _successors.size() == 0) { -// os << 'p' << this << " -> p" << _parent << ";\n"; -// } -// -// if(_subgraph && !_subgraph->empty()) { -// -// os << "subgraph cluster_p" << this << " {\nlabel=\"Subflow: "; -// if(_name.empty()) os << 'p' << this; -// else os << _name; -// -// os << "\";\n" << "color=blue\n"; -// -// for(const auto& n : _subgraph->nodes()) { -// n->dump(os); -// } -// os << "}\n"; -// } -//} - -// Procedure: _set_state -inline void Node::_set_state(int flag) { - _state |= flag; -} - -// Procedure: _unset_state -inline void Node::_unset_state(int flag) { - _state &= ~flag; -} - -// Procedure: _clear_state -inline void Node::_clear_state() { - _state = 0; -} - -// Procedure: _set_up_join_counter -inline void Node::_set_up_join_counter() { - - int c = 0; - - for(auto p : _dependents) { - if(p->_handle.index() == Node::CONDITION_WORK) { - _set_state(Node::BRANCH); - } - else { - c++; - } - } - - _join_counter.store(c, std::memory_order_relaxed); -} - -// Function: _has_state -inline bool Node::_has_state(int flag) const { - return _state & flag; -} - -// ---------------------------------------------------------------------------- -// Graph definition -// ---------------------------------------------------------------------------- - -// Function: _node_pool -inline ObjectPool& Graph::_node_pool() { - static ObjectPool pool; - return pool; -} - -// Destructor -inline Graph::~Graph() { - auto& np = _node_pool(); - for(auto node : _nodes) { - //node->~Node(); - //np.deallocate(node); - np.recycle(node); - } -} - -// Move constructor -inline Graph::Graph(Graph&& other) : - _nodes {std::move(other._nodes)} { -} - -// Move assignment -inline Graph& Graph::operator = (Graph&& other) { - _nodes = std::move(other._nodes); - return *this; -} - -// Procedure: clear -inline void Graph::clear() { - auto& np = _node_pool(); - for(auto node : _nodes) { - //node->~Node(); - //np.deallocate(node); - np.recycle(node); - } - _nodes.clear(); -} - -// Function: size -// query the size -inline size_t Graph::size() const { - return _nodes.size(); -} - -// Function: empty -// query the emptiness -inline bool Graph::empty() const { - return _nodes.empty(); -} - -// Function: emplace_back -// create a node from a give argument; constructor is called if necessary -template -Node* Graph::emplace_back(ArgsT&&... args) { - //auto node = _node_pool().allocate(); - //new (node) Node(std::forward(args)...); - //_nodes.push_back(node); - _nodes.push_back(_node_pool().animate(std::forward(args)...)); - return _nodes.back(); -} - -// Function: emplace_back -// create a node from a give argument; constructor is called if necessary -inline Node* Graph::emplace_back() { - //auto node = _node_pool().allocate(); - //new (node) Node(); - //_nodes.push_back(node); - _nodes.push_back(_node_pool().animate()); - return _nodes.back(); -} - - -} // end of namespace tf. --------------------------------------------------- - - - - - diff --git a/bundled/taskflow-2.5.0/include/taskflow/core/observer.hpp b/bundled/taskflow-2.5.0/include/taskflow/core/observer.hpp deleted file mode 100644 index be73a0f658..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/core/observer.hpp +++ /dev/null @@ -1,521 +0,0 @@ -// 2020/04/30 - midified by Tsung-Wei Huang -// - adding TaskflowBoard support -// -// 2019/07/31 - modified by Tsung-Wei Huang -// - fixed the missing comma in outputing JSON -// -// 2019/06/13 - modified by Tsung-Wei Huang -// - added TaskView interface -// -// 2019/04/17 - created by Tsung-Wei Huang - -#pragma once - -#include "task.hpp" - -namespace tf { - -/** -@class: ObserverInterface - -@brief The interface class for creating an executor observer. - -The tf::ExecutorObserver class let users define methods to monitor the behaviors -of an executor. -This is particularly useful when you want to inspect the performance of an executor. -*/ -class ObserverInterface { - - friend class Executor; - - public: - - /** - @brief virtual destructor - */ - virtual ~ObserverInterface() = default; - - /** - @brief constructor-like method to call when the executor observer is fully created - @param num_workers the number of the worker threads in the executor - */ - virtual void set_up(size_t num_workers) = 0; - - /** - @brief method to call before a worker thread executes a closure - @param worker_id the id of this worker thread - @param task_view a constant wrapper object to the task - */ - virtual void on_entry(size_t worker_id, TaskView task_view) = 0; - - /** - @brief method to call after a worker thread executed a closure - @param worker_id the id of this worker thread - @param task_view a constant wrapper object to the task - */ - virtual void on_exit(size_t worker_id, TaskView task_view) = 0; -}; - -// ---------------------------------------------------------------------------- -// ChromeTracingObserver definition -// ---------------------------------------------------------------------------- - -/** -@class: ChromeTracingObserver - -@brief observer designed based on chrome tracing format - -*/ -class ChromeTracingObserver : public ObserverInterface { - - friend class Executor; - - // data structure to record each task execution - struct Segment { - - std::string name; - - std::chrono::time_point beg; - std::chrono::time_point end; - - Segment( - const std::string& n, - std::chrono::time_point b - ); - - Segment( - const std::string& n, - std::chrono::time_point b, - std::chrono::time_point e - ); - }; - - // data structure to store the entire execution timeline - struct Timeline { - std::chrono::time_point origin; - std::vector> segments; - std::vector>> stacks; - }; - - public: - - /** - @brief dump the timelines in JSON format to an ostream - @param ostream the target std::ostream to dump - */ - inline void dump(std::ostream& ostream) const; - - /** - @brief dump the timelines in JSON to a std::string - @return a JSON string - */ - inline std::string dump() const; - - /** - @brief clear the timeline data - */ - inline void clear(); - - /** - @brief get the number of total tasks in the observer - @return number of total tasks - */ - inline size_t num_tasks() const; - - private: - - inline void set_up(size_t num_workers) override final; - inline void on_entry(size_t worker_id, TaskView task_view) override final; - inline void on_exit(size_t worker_id, TaskView task_view) override final; - - Timeline _timeline; -}; - -// constructor -inline ChromeTracingObserver::Segment::Segment( - const std::string& n, - std::chrono::time_point b -) : - name {n}, beg {b} { -} - -// constructor -inline ChromeTracingObserver::Segment::Segment( - const std::string& n, - std::chrono::time_point b, - std::chrono::time_point e -) : - name {n}, beg {b}, end {e} { -} - -// Procedure: set_up -inline void ChromeTracingObserver::set_up(size_t num_workers) { - _timeline.segments.resize(num_workers); - _timeline.stacks.resize(num_workers); - - for(size_t w=0; w 0) { - break; - } - } - - os << '['; - - for(size_t w=first; w<_timeline.segments.size(); w++) { - - if(w != first && _timeline.segments[w].size() > 0) { - os << ','; - } - - for(size_t i=0; i<_timeline.segments[w].size(); i++) { - - os << '{' - << "\"cat\":\"ChromeTracingObserver\","; - - // name field - os << "\"name\":\""; - if(_timeline.segments[w][i].name.empty()) { - os << w << '_' << i; - } - else { - os << _timeline.segments[w][i].name; - } - os << "\","; - - // segment field - os << "\"ph\":\"X\"," - << "\"pid\":1," - << "\"tid\":" << w << ',' - << "\"ts\":" << std::chrono::duration_cast( - _timeline.segments[w][i].beg - _timeline.origin - ).count() << ',' - << "\"dur\":" << std::chrono::duration_cast( - _timeline.segments[w][i].end - _timeline.segments[w][i].beg - ).count(); - - if(i != _timeline.segments[w].size() - 1) { - os << "},"; - } - else { - os << '}'; - } - } - } - os << "]\n"; -} - -// Function: dump -inline std::string ChromeTracingObserver::dump() const { - std::ostringstream oss; - dump(oss); - return oss.str(); -} - -// Function: num_tasks -inline size_t ChromeTracingObserver::num_tasks() const { - return std::accumulate( - _timeline.segments.begin(), _timeline.segments.end(), size_t{0}, - [](size_t sum, const auto& exe){ - return sum + exe.size(); - } - ); -} - -// ---------------------------------------------------------------------------- -// TFProfObserver definition -// ---------------------------------------------------------------------------- - -/** -@class: TFProfObserver - -@brief observer designed based on taskflow board format - -*/ -class TFProfObserver : public ObserverInterface { - - friend class Executor; - - // data structure to record each task execution - struct Segment { - - std::string name; - TaskType type; - - std::chrono::time_point beg; - std::chrono::time_point end; - - Segment( - const std::string& n, - TaskType t, - std::chrono::time_point b - ); - - Segment( - const std::string& n, - TaskType t, - std::chrono::time_point b, - std::chrono::time_point e - ); - }; - - // data structure to store the entire execution timeline - struct Timeline { - std::chrono::time_point origin; - std::vector> segments; - std::vector>> stacks; - }; - - public: - - /** - @brief dump the timelines in JSON format to an ostream - @param ostream the target std::ostream to dump - */ - inline void dump(std::ostream& ostream) const; - - /** - @brief dump the timelines in JSON to a std::string - @return a JSON string - */ - inline std::string dump() const; - - /** - @brief clear the timeline data - */ - inline void clear(); - - /** - @brief get the number of total tasks in the observer - @return number of total tasks - */ - inline size_t num_tasks() const; - - private: - - inline void set_up(size_t num_workers) override final; - inline void on_entry(size_t worker_id, TaskView task_view) override final; - inline void on_exit(size_t worker_id, TaskView task_view) override final; - - Timeline _timeline; - - UUID _uuid; -}; - -// constructor -inline TFProfObserver::Segment::Segment( - const std::string& n, - TaskType t, - std::chrono::time_point b -) : - name {n}, type {t}, beg {b} { -} - -// constructor -inline TFProfObserver::Segment::Segment( - const std::string& n, - TaskType t, - std::chrono::time_point b, - std::chrono::time_point e -) : - name {n}, type {t}, beg {b}, end {e} { -} - -// Procedure: set_up -inline void TFProfObserver::set_up(size_t num_workers) { - - _timeline.segments.resize(num_workers); - _timeline.stacks.resize(num_workers); - - for(size_t w=0; w 0) { - break; - } - } - - // not timeline data to dump - if(first == _timeline.segments.size()) { - os << "{}\n"; - return; - } - - os << "{\"executor\":\"" << _uuid << "\",\"data\":["; - - for(size_t w=first; w<_timeline.segments.size(); w++) { - - if(_timeline.segments[w].empty()) { - continue; - } - - if(w != first) { - os << ','; - } - - os << "{\"worker\":\"worker " << w << "\",\"data\":["; - for(size_t i=0; i<_timeline.segments[w].size(); ++i) { - - const auto& s = _timeline.segments[w][i]; - - if(i) os << ','; - - // span - os << "{\"span\":[" - << std::chrono::duration_cast( - s.beg - _timeline.origin - ).count() << "," - << std::chrono::duration_cast( - s.end - _timeline.origin - ).count() << "],"; - - // name - os << "\"name\":\""; - if(s.name.empty()) { - os << w << '_' << i; - } - else { - os << s.name; - } - os << "\","; - - // category "type": "Condition Task", - os << "\"type\":\"" << task_type_to_string(s.type) << "\""; - - os << "}"; - } - os << "]}"; - } - - os << "]}\n"; -} - -// Function: dump -inline std::string TFProfObserver::dump() const { - std::ostringstream oss; - dump(oss); - return oss.str(); -} - -// Function: num_tasks -inline size_t TFProfObserver::num_tasks() const { - return std::accumulate( - _timeline.segments.begin(), _timeline.segments.end(), size_t{0}, - [](size_t sum, const auto& exe){ - return sum + exe.size(); - } - ); -} - -// ---------------------------------------------------------------------------- -// Identifier for Each Built-in Observer -// ---------------------------------------------------------------------------- - -/** @enum ObserverType - -built-in observer types - -*/ -enum ObserverType { - TFPROF = 1, - CHROME = 2 -}; - -/** -@brief convert an observer type to a human-readable string -*/ -inline const char* observer_type_to_string(ObserverType type) { - const char* val; - switch(type) { - case TFPROF: val = "TFProf"; break; - case CHROME: val = "Chrome"; break; - default: val = "undefined"; break; - } - return val; -} - -// ---------------------------------------------------------------------------- -// Legacy Alias -// ---------------------------------------------------------------------------- -using ExecutorObserverInterface = ObserverInterface; -using ExecutorObserver = ChromeTracingObserver; - - -} // end of namespace tf ----------------------------------------------------- - - diff --git a/bundled/taskflow-2.5.0/include/taskflow/core/taskflow.hpp b/bundled/taskflow-2.5.0/include/taskflow/core/taskflow.hpp deleted file mode 100644 index f6123eff5d..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/core/taskflow.hpp +++ /dev/null @@ -1,353 +0,0 @@ -#pragma once - -#include - -#include "flow_builder.hpp" -#include "topology.hpp" - -namespace tf { - -// ---------------------------------------------------------------------------- - -/** -@class Taskflow - -@brief main entry to create a task dependency graph - -*/ -class Taskflow : public FlowBuilder { - - friend class Topology; - friend class Executor; - friend class FlowBuilder; - - struct Dumper { - std::stack stack; - std::unordered_set visited; - }; - - public: - - /** - @brief constructs a taskflow with a given name - */ - Taskflow(const std::string& name); - - /** - @brief constructs a taskflow - */ - Taskflow(); - - /** - @brief destroy the taskflow (virtual call) - */ - virtual ~Taskflow(); - - /** - @brief dumps the taskflow to a std::ostream in DOT format - - @param ostream a std::ostream target - */ - void dump(std::ostream& ostream) const; - - /** - @brief dumps the taskflow in DOT format to a std::string - */ - std::string dump() const; - - /** - @brief queries the number of tasks in the taskflow - */ - size_t num_tasks() const; - - /** - @brief queries the emptiness of the taskflow - */ - bool empty() const; - - /** - @brief sets the name of the taskflow - - @return @c *this - */ - void name(const std::string&); - - /** - @brief queries the name of the taskflow - */ - const std::string& name() const ; - - /** - @brief clears the associated task dependency graph - */ - void clear(); - - /** - @brief applies an visitor callable to each task in the taskflow - */ - template - void for_each_task(V&& visitor) const; - - private: - - std::string _name; - - Graph _graph; - - std::mutex _mtx; - - std::list _topologies; - - void _dump(std::ostream&, const Taskflow*) const; - void _dump(std::ostream&, const Node*, Dumper&) const; - void _dump(std::ostream&, const Graph&, Dumper&) const; -}; - -// Constructor -inline Taskflow::Taskflow(const std::string& name) : - FlowBuilder {_graph}, - _name {name} { -} - -// Constructor -inline Taskflow::Taskflow() : FlowBuilder{_graph} { -} - -// Destructor -inline Taskflow::~Taskflow() { - assert(_topologies.empty()); -} - -// Procedure: -inline void Taskflow::clear() { - _graph.clear(); -} - -// Function: num_noces -inline size_t Taskflow::num_tasks() const { - return _graph.size(); -} - -// Function: empty -inline bool Taskflow::empty() const { - return _graph.empty(); -} - -// Function: name -inline void Taskflow::name(const std::string &name) { - _name = name; -} - -// Function: name -inline const std::string& Taskflow::name() const { - return _name; -} - -// Function: for_each_task -template -void Taskflow::for_each_task(V&& visitor) const { - for(size_t i=0; i<_graph._nodes.size(); ++i) { - visitor(Task(_graph._nodes[i])); - } -} - -// Procedure: dump -inline std::string Taskflow::dump() const { - std::ostringstream oss; - dump(oss); - return oss.str(); -} - -// Function: dump -inline void Taskflow::dump(std::ostream& os) const { - os << "digraph Taskflow {\n"; - _dump(os, this); - os << "}\n"; -} - -// Procedure: _dump -inline void Taskflow::_dump(std::ostream& os, const Taskflow* top) const { - - Dumper dumper; - - dumper.stack.push(top); - dumper.visited.insert(top); - - while(!dumper.stack.empty()) { - - auto f = dumper.stack.top(); - dumper.stack.pop(); - - os << "subgraph cluster_p" << f << " {\nlabel=\"Taskflow: "; - if(f->_name.empty()) os << 'p' << f; - else os << f->_name; - os << "\";\n"; - _dump(os, f->_graph, dumper); - os << "}\n"; - } -} - -// Procedure: _dump -inline void Taskflow::_dump( - std::ostream& os, const Node* node, Dumper& dumper -) const { - - os << 'p' << node << "[label=\""; - if(node->_name.empty()) os << 'p' << node; - else os << node->_name; - os << "\" "; - - // shape for node - switch(node->_handle.index()) { - - case Node::CONDITION_WORK: - os << "shape=diamond color=black fillcolor=aquamarine style=filled"; - break; - -#ifdef TF_ENABLE_CUDA - case Node::CUDAFLOW_WORK: - os << "shape=folder fillcolor=cyan style=filled"; - break; -#endif - - default: - break; - } - - os << "];\n"; - - for(size_t s=0; s_successors.size(); ++s) { - if(node->_handle.index() == Node::CONDITION_WORK) { - // case edge is dashed - os << 'p' << node << " -> p" << node->_successors[s] - << " [style=dashed label=\"" << s << "\"];\n"; - } - else { - os << 'p' << node << " -> p" << node->_successors[s] << ";\n"; - } - } - - // subflow join node - if(node->_parent && node->_successors.size() == 0) { - os << 'p' << node << " -> p" << node->_parent << ";\n"; - } - - switch(node->_handle.index()) { - - case Node::DYNAMIC_WORK: { - auto& sbg = nstd::get(node->_handle).subgraph; - if(!sbg.empty()) { - os << "subgraph cluster_p" << node << " {\nlabel=\"Subflow: "; - if(node->_name.empty()) os << 'p' << node; - else os << node->_name; - - os << "\";\n" << "color=blue\n"; - _dump(os, sbg, dumper); - os << "}\n"; - } - } - break; - -#ifdef TF_ENABLE_CUDA - case Node::CUDAFLOW_WORK: { - auto& cfg = nstd::get(node->_handle).graph; - if(!cfg.empty()) { - os << "subgraph cluster_p" << node << " {\nlabel=\"cudaFlow: "; - if(node->_name.empty()) os << 'p' << node; - else os << node->_name; - - os << "\";\n" << "color=\"purple\"\n"; - - for(const auto& v : cfg._nodes) { - - os << 'p' << v.get() << "[label=\""; - if(v->_name.empty()) { - os << 'p' << v.get() << "\""; - } - else { - os << v->_name << "\""; - } - - switch(v->_handle.index()) { - case cudaNode::NOOP: - break; - - case cudaNode::COPY: - //os << " shape=\"cds\""; - break; - - case cudaNode::KERNEL: - os << " style=\"filled\"" - << " color=\"white\" fillcolor=\"black\"" - << " fontcolor=\"white\"" - << " shape=\"box3d\""; - break; - - default: - break; - } - - os << "];\n"; - for(const auto s : v->_successors) { - os << 'p' << v.get() << " -> " << 'p' << s << ";\n"; - } - - if(v->_successors.size() == 0) { - os << 'p' << v.get() << " -> p" << node << ";\n"; - } - - } - os << "}\n"; - } - } - break; -#endif - - default: - break; - } -} - -// Procedure: _dump -inline void Taskflow::_dump( - std::ostream& os, const Graph& graph, Dumper& dumper -) const { - - for(const auto& n : graph._nodes) { - - // regular task - if(n->_handle.index() != Node::MODULE_WORK) { - _dump(os, n, dumper); - } - // module task - else { - - auto module = nstd::get(n->_handle).module; - - os << 'p' << n << "[shape=box3d, color=blue, label=\""; - if(n->_name.empty()) os << n; - else os << n->_name; - os << " [Taskflow: "; - if(module->_name.empty()) os << 'p' << module; - else os << module->_name; - os << "]\"];\n"; - - if(dumper.visited.find(module) == dumper.visited.end()) { - dumper.visited.insert(module); - dumper.stack.push(module); - } - - for(const auto s : n->_successors) { - os << 'p' << n << "->" << 'p' << s << ";\n"; - } - } - } -} - -// ---------------------------------------------------------------------------- -// Backward compatibility -// ---------------------------------------------------------------------------- -using Framework = Taskflow; - -} // end of namespace tf. --------------------------------------------------- - diff --git a/bundled/taskflow-2.5.0/include/taskflow/core/topology.hpp b/bundled/taskflow-2.5.0/include/taskflow/core/topology.hpp deleted file mode 100644 index 97780d9cda..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/core/topology.hpp +++ /dev/null @@ -1,93 +0,0 @@ -#pragma once - -//#include "taskflow.hpp" - -namespace tf { - -// ---------------------------------------------------------------------------- - -// class: Topology -class Topology { - - friend class Taskflow; - friend class Executor; - - public: - - template - Topology(Taskflow&, P&&, C&&); - - private: - - Taskflow& _taskflow; - - std::promise _promise; - - PassiveVector _sources; - - std::function _pred; - std::function _call; - - std::atomic _join_counter {0}; -}; - -// Constructor -template -inline Topology::Topology(Taskflow& tf, P&& p, C&& c): - _taskflow(tf), - _pred {std::forward

(p)}, - _call {std::forward(c)} { -} - -// Procedure: _bind -// Re-builds the source links and the sink number for this topology. -//inline void Topology::_bind(Graph& g) { -// -// _sources.clear(); -// -// //PassiveVector condition_nodes; -// -// // scan each node in the graph and build up the links -// for(auto& node : g.nodes()) { -// -// node->_topology = this; -// node->_clear_state(); -// node->_set_up_join_counter(); -// -// if(node->num_dependents() == 0) { -// _sources.push_back(node.get()); -// } -// -// //int join_counter = 0; -// //for(auto p : node->_dependents) { -// // if(p->_work.index() == Node::CONDITION_WORK) { -// // node->_set_state(Node::BRANCH); -// // } -// // else { -// // join_counter++; -// // } -// //} -// -// //node->_join_counter.store(join_counter, std::memory_order_relaxed); -// -// //// TODO: Merge with the loop below? -// //if(node->_work.index() == Node::CONDITION_WORK) { -// // condition_nodes.push_back(node.get()); -// //} -// -// //// Reset each node's num_dependents -// //node->_join_counter.store(node->_dependents.size(), std::memory_order_relaxed); -// } -// -// // We need to deduct the condition predecessors in impure case nodes -// //for(auto& n: condition_nodes) { -// // for(auto& s: n->_successors) { -// // s->_join_counter.fetch_sub(1, std::memory_order_relaxed); -// // s->set_branch(); -// // } -// //} -//} - - - -} // end of namespace tf. ---------------------------------------------------- diff --git a/bundled/taskflow-2.5.0/include/taskflow/core/tsq.hpp b/bundled/taskflow-2.5.0/include/taskflow/core/tsq.hpp deleted file mode 100644 index 23ea34d21b..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/core/tsq.hpp +++ /dev/null @@ -1,250 +0,0 @@ -// 2020/02/24 - created by twhuang -// - specialized work stealing queue for pointer - -#pragma once - -#include -#include -#include -#include -#include -#include - -namespace tf { - -/** -@class: TaskQueue - -@tparam T data type (must be a pointer) - -@brief Lock-free unbounded single-producer multiple-consumer queue. - -This class implements the work stealing queue described in the paper, -"Correct and Efficient Work-Stealing for Weak Memory Models," -available at https://www.di.ens.fr/~zappa/readings/ppopp13.pdf. - -Only the queue owner can perform pop and push operations, -while others can steal data from the queue. -*/ -template -class TaskQueue { - - static_assert(std::is_pointer::value, "T must be a pointer type"); - - struct Array { - - int64_t C; - int64_t M; - std::atomic* S; - - explicit Array(int64_t c) : - C {c}, - M {c-1}, - S {new std::atomic[static_cast(C)]} { - } - - ~Array() { - delete [] S; - } - - int64_t capacity() const noexcept { - return C; - } - - template - void push(int64_t i, O&& o) noexcept { - S[i & M].store(std::forward(o), std::memory_order_relaxed); - } - - T pop(int64_t i) noexcept { - return S[i & M].load(std::memory_order_relaxed); - } - - Array* resize(int64_t b, int64_t t) { - Array* ptr = new Array {2*C}; - for(int64_t i=t; i!=b; ++i) { - ptr->push(i, pop(i)); - } - return ptr; - } - - }; - - std::atomic _top; - std::atomic _bottom; - std::atomic _array; - std::vector _garbage; - - public: - - /** - @brief constructs the queue with a given capacity - - @param capacity the capacity of the queue (must be power of 2) - */ - explicit TaskQueue(int64_t capacity = 1024); - - /** - @brief destructs the queue - */ - ~TaskQueue(); - - /** - @brief queries if the queue is empty at the time of this call - */ - bool empty() const noexcept; - - /** - @brief queries the number of items at the time of this call - */ - size_t size() const noexcept; - - /** - @brief queries the capacity of the queue - */ - int64_t capacity() const noexcept; - - /** - @brief inserts an item to the queue - - Only the owner thread can insert an item to the queue. - The operation can trigger the queue to resize its capacity - if more space is required. - - @tparam O data type - - @param item the item to perfect-forward to the queue - */ - void push(T item); - - /** - @brief pops out an item from the queue - - Only the owner thread can pop out an item from the queue. - The return can be a nullptr if this operation failed (empty queue). - */ - T pop(); - - /** - @brief steals an item from the queue - - Any threads can try to steal an item from the queue. - The return can be a nullptr if this operation failed (not necessary empty). - */ - T steal(); -}; - -// Constructor -template -TaskQueue::TaskQueue(int64_t c) { - assert(c && (!(c & (c-1)))); - _top.store(0, std::memory_order_relaxed); - _bottom.store(0, std::memory_order_relaxed); - _array.store(new Array{c}, std::memory_order_relaxed); - _garbage.reserve(32); -} - -// Destructor -template -TaskQueue::~TaskQueue() { - for(auto a : _garbage) { - delete a; - } - delete _array.load(); -} - -// Function: empty -template -bool TaskQueue::empty() const noexcept { - int64_t b = _bottom.load(std::memory_order_relaxed); - int64_t t = _top.load(std::memory_order_relaxed); - return b <= t; -} - -// Function: size -template -size_t TaskQueue::size() const noexcept { - int64_t b = _bottom.load(std::memory_order_relaxed); - int64_t t = _top.load(std::memory_order_relaxed); - return static_cast(b >= t ? b - t : 0); -} - -// Function: push -template -void TaskQueue::push(T o) { - int64_t b = _bottom.load(std::memory_order_relaxed); - int64_t t = _top.load(std::memory_order_acquire); - Array* a = _array.load(std::memory_order_relaxed); - - // queue is full - if(a->capacity() - 1 < (b - t)) { - Array* tmp = a->resize(b, t); - _garbage.push_back(a); - std::swap(a, tmp); - _array.store(a, std::memory_order_relaxed); - } - - a->push(b, o); - std::atomic_thread_fence(std::memory_order_release); - _bottom.store(b + 1, std::memory_order_relaxed); -} - -// Function: pop -template -T TaskQueue::pop() { - int64_t b = _bottom.load(std::memory_order_relaxed) - 1; - Array* a = _array.load(std::memory_order_relaxed); - _bottom.store(b, std::memory_order_relaxed); - std::atomic_thread_fence(std::memory_order_seq_cst); - int64_t t = _top.load(std::memory_order_relaxed); - - T item {nullptr}; - - if(t <= b) { - item = a->pop(b); - if(t == b) { - // the last item just got stolen - if(!_top.compare_exchange_strong(t, t+1, - std::memory_order_seq_cst, - std::memory_order_relaxed)) { - item = nullptr; - } - _bottom.store(b + 1, std::memory_order_relaxed); - } - } - else { - _bottom.store(b + 1, std::memory_order_relaxed); - } - - return item; -} - -// Function: steal -template -T TaskQueue::steal() { - int64_t t = _top.load(std::memory_order_acquire); - std::atomic_thread_fence(std::memory_order_seq_cst); - int64_t b = _bottom.load(std::memory_order_acquire); - - T item {nullptr}; - - if(t < b) { - Array* a = _array.load(std::memory_order_consume); - item = a->pop(t); - if(!_top.compare_exchange_strong(t, t+1, - std::memory_order_seq_cst, - std::memory_order_relaxed)) { - return nullptr; - } - } - - return item; -} - -// Function: capacity -template -int64_t TaskQueue::capacity() const noexcept { - return _array.load(std::memory_order_relaxed)->capacity(); -} - -} // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_device.hpp b/bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_device.hpp deleted file mode 100644 index 7b9371a97c..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_device.hpp +++ /dev/null @@ -1,136 +0,0 @@ -#pragma once - -#include "cuda_error.hpp" - -namespace tf { - -/** -@brief queries the number of available devices -*/ -inline size_t cuda_num_devices() { - int N = 0; - TF_CHECK_CUDA(cudaGetDeviceCount(&N), "failed to get device count"); - return static_cast(N); -} - -/** -@brief gets the current device associated with the caller thread -*/ -inline int cuda_get_device() { - int id; - TF_CHECK_CUDA(cudaGetDevice(&id), "failed to get current device id"); - return id; -} - -/** -@brief switches to a given device context -*/ -inline void cuda_set_device(int id) { - TF_CHECK_CUDA(cudaSetDevice(id), "failed to switch to device ", id); -} - -/** -@brief obtains the device property -*/ -inline void cuda_get_device_property(int i, cudaDeviceProp& p) { - TF_CHECK_CUDA( - cudaGetDeviceProperties(&p, i), "failed to get property of device ", i - ); -} - -/** -@brief obtains the device property -*/ -inline cudaDeviceProp cuda_get_device_property(int i) { - cudaDeviceProp p; - TF_CHECK_CUDA( - cudaGetDeviceProperties(&p, i), "failed to get property of device ", i - ); - return p; -} - -/** -@brief cuda_dump_device_property -*/ -inline void cuda_dump_device_property(std::ostream& os, const cudaDeviceProp& p) { - - os << "Major revision number: " << p.major << '\n' - << "Minor revision number: " << p.minor << '\n' - << "Name: " << p.name << '\n' - << "Total global memory: " << p.totalGlobalMem << '\n' - << "Total shared memory per block: " << p.sharedMemPerBlock << '\n' - << "Total registers per block: " << p.regsPerBlock << '\n' - << "Warp size: " << p.warpSize << '\n' - << "Maximum memory pitch: " << p.memPitch << '\n' - << "Maximum threads per block: " << p.maxThreadsPerBlock << '\n'; - - os << "Maximum dimension of block: "; - for (int i = 0; i < 3; ++i) { - if(i) os << 'x'; - os << p.maxThreadsDim[i]; - } - os << '\n'; - - os << "Maximum dimenstion of grid: "; - for (int i = 0; i < 3; ++i) { - if(i) os << 'x'; - os << p.maxGridSize[i];; - } - os << '\n'; - - os << "Clock rate: " << p.clockRate << '\n' - << "Total constant memory: " << p.totalConstMem << '\n' - << "Texture alignment: " << p.textureAlignment << '\n' - << "Concurrent copy and execution: " << p.deviceOverlap << '\n' - << "Number of multiprocessors: " << p.multiProcessorCount << '\n' - << "Kernel execution timeout: " << p.kernelExecTimeoutEnabled << '\n' - << "GPU sharing Host Memory: " << p.integrated << '\n' - << "Host page-locked mem mapping: " << p.canMapHostMemory << '\n' - << "Alignment for Surfaces: " << p.surfaceAlignment << '\n' - << "Device has ECC support: " << p.ECCEnabled << '\n' - << "Unified Addressing (UVA): " << p.unifiedAddressing << '\n'; -} - -// ---------------------------------------------------------------------------- -// Class definitions -// ---------------------------------------------------------------------------- - -/** @class cudaScopedDevice - -@brief RAII-style device context switch - -*/ -class cudaScopedDevice { - - public: - - cudaScopedDevice(int); - ~cudaScopedDevice(); - - private: - - int _p; -}; - -// Constructor -inline cudaScopedDevice::cudaScopedDevice(int dev) { - TF_CHECK_CUDA(cudaGetDevice(&_p), "failed to get current device scope"); - if(_p == dev) { - _p = -1; - } - else { - TF_CHECK_CUDA(cudaSetDevice(dev), "failed to scope on device ", dev); - } -} - -// Destructor -inline cudaScopedDevice::~cudaScopedDevice() { - if(_p != -1) { - cudaSetDevice(_p); - //TF_CHECK_CUDA(cudaSetDevice(_p), "failed to scope back to device ", _p); - } -} - -} // end of namespace cuda --------------------------------------------------- - - diff --git a/bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_error.hpp b/bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_error.hpp deleted file mode 100644 index a2b679571c..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_error.hpp +++ /dev/null @@ -1,31 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -#include "../utility/stringify.hpp" - -#define TF_CUDA_REMOVE_FIRST_HELPER(N, ...) __VA_ARGS__ -#define TF_CUDA_REMOVE_FIRST(...) TF_CUDA_REMOVE_FIRST_HELPER(__VA_ARGS__) -#define TF_CUDA_GET_FIRST_HELPER(N, ...) N -#define TF_CUDA_GET_FIRST(...) TF_CUDA_GET_FIRST_HELPER(__VA_ARGS__) - -#define TF_CHECK_CUDA(...) \ -if(TF_CUDA_GET_FIRST(__VA_ARGS__) != cudaSuccess) { \ - std::ostringstream oss; \ - auto ev = TF_CUDA_GET_FIRST(__VA_ARGS__); \ - auto unknown_str = "unknown error"; \ - auto unknown_name = "cudaErrorUnknown"; \ - auto error_str = ::cudaGetErrorString(ev); \ - auto error_name = ::cudaGetErrorName(ev); \ - oss << "[" << __FILE__ << ":" << __LINE__ << "] " \ - << (error_str ? error_str : unknown_str) \ - << " (" \ - << (error_name ? error_name : unknown_name) \ - << ") - "; \ - tf::ostreamize(oss, TF_CUDA_REMOVE_FIRST(__VA_ARGS__)); \ - throw std::runtime_error(oss.str()); \ -} - diff --git a/bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_flow.hpp b/bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_flow.hpp deleted file mode 100644 index efe770a014..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_flow.hpp +++ /dev/null @@ -1,503 +0,0 @@ -#pragma once - -#include "cuda_task.hpp" - -namespace tf { - -/** -@class cudaFlow - -@brief methods for building a CUDA task dependency graph. - -A cudaFlow is a high-level interface to manipulate GPU tasks using -the task dependency graph model. -The class provides a set of methods for creating and launch different tasks -on one or multiple CUDA devices, -for instance, kernel tasks, data transfer tasks, and memory operation tasks. -*/ -class cudaFlow { - - friend class Executor; - - public: - - /** - @brief constructs a cudaFlow builder object - - @tparam P predicate type - - @param graph a cudaGraph to manipulate - @param p predicate which return @c true if the launching should be contined - */ - template - cudaFlow(cudaGraph& graph, P&& p); - - /** - @brief queries the emptiness of the graph - */ - bool empty() const; - - /** - @brief creates a no-operation task - - An empty node performs no operation during execution, - but can be used for transitive ordering. - For example, a phased execution graph with 2 groups of n nodes - with a barrier between them can be represented using an empty node - and 2*n dependency edges, - rather than no empty node and n^2 dependency edges. - */ - cudaTask noop(); - - // CUDA seems pretty restrictive about calling host in a cudaGraph. - // We disable this function and wait for future stability. - // - //@brief creates a host execution task - // - //@tparam C callable type - // - //@param c a callable object constructible from std::function. - - //A host can only execute CPU-specific functions and cannot do any CUDA calls - //(e.g., cudaMalloc). - // - //template - //cudaTask host(C&& c); - - /** - @brief creates a kernel task - - @tparam F kernel function type - @tparam ArgsT kernel function parameters type - - @param g configured grid - @param b configured block - @param s configured shared memory - @param f kernel function - @param args arguments to forward to the kernel function by copy - - @return cudaTask handle - */ - template - cudaTask kernel(dim3 g, dim3 b, size_t s, F&& f, ArgsT&&... args); - - /** - @brief creates a kernel task on a device - - @tparam F kernel function type - @tparam ArgsT kernel function parameters type - - @param d device identifier to luanch the kernel - @param g configured grid - @param b configured block - @param s configured shared memory - @param f kernel function - @param args arguments to forward to the kernel function by copy - - @return cudaTask handle - */ - template - cudaTask kernel_on(int d, dim3 g, dim3 b, size_t s, F&& f, ArgsT&&... args); - - /** - @brief creates a memset task - - @param dst pointer to the destination device memory area - @param v value to set for each byte of specified memory - @param count size in bytes to set - - A memset task fills the first @c count bytes of device memory area - pointed by @c dst with the byte value @c v. - */ - cudaTask memset(void* dst, int v, size_t count); - - /** - @brief creates a memcpy task - - @param tgt pointer to the target memory block - @param src pointer to the source memory block - @param bytes bytes to copy - - @return cudaTask handle - - A memcpy task transfers @c bytes of data from a course location - to a target location. Direction can be arbitrary among CPUs and GPUs. - */ - cudaTask memcpy(void* tgt, const void* src, size_t bytes); - - /** - @brief creates a zero task that zeroes a typed memory block - - @tparam T element type (size of @c T must be either 1, 2, or 4) - @param dst pointer to the destination device memory area - @param count number of elements - - A zero task zeroes the first @c count elements of type @c T - in a device memory area pointed by @c dst. - */ - template - std::enable_if_t< - is_pod_v && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), - cudaTask - > - zero(T* dst, size_t count); - - /** - @brief creates a fill task that fills a typed memory block with a value - - @tparam T element type (size of @c T must be either 1, 2, or 4) - @param dst pointer to the destination device memory area - @param value value to fill for each element of type @c T - @param count number of elements - - A fill task fills the first @c count elements of type @c T with @c value - in a device memory area pointed by @c dst. - The value to fill is interpreted in type @c T rather than byte. - */ - template - std::enable_if_t< - is_pod_v && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), - cudaTask - > - fill(T* dst, T value, size_t count); - - /** - @brief creates a copy task - - @tparam T element type (non-void) - - @param tgt pointer to the target memory block - @param src pointer to the source memory block - @param num number of elements to copy - - @return cudaTask handle - - A copy task transfers num*sizeof(T) bytes of data from a source location - to a target location. Direction can be arbitrary among CPUs and GPUs. - */ - template < - typename T, - std::enable_if_t::value, void>* = nullptr - > - cudaTask copy(T* tgt, const T* src, size_t num); - - /** - @brief assigns a device to launch the cudaFlow - - @param device target device identifier - */ - void device(int device); - - /** - @brief queries the device associated with the cudaFlow - */ - int device() const; - - /** - @brief assigns a stream to launch the cudaFlow - - @param stream target stream identifier - */ - void stream(cudaStream_t stream); - - /** - @brief assigns a predicate to loop the cudaFlow until the predicate is satisfied - - @tparam P predicate type - @param p predicate which return @c true if the launching should be contined - - The execution of cudaFlow is equivalent to: while(!predicate()) { run cudaflow; } - */ - template - void predicate(P&& p); - - /** - @brief repeats the execution of the cudaFlow by @c n times - */ - void repeat(size_t n); - - private: - - cudaGraph& _graph; - - int _device {0}; - - nstd::optional _stream; - - std::function _predicate; -}; - -// Constructor -template -cudaFlow::cudaFlow(cudaGraph& g, P&& p) : - _graph {g}, - _predicate {std::forward

(p)} { -} - -// Procedure: predicate -template -void cudaFlow::predicate(P&& pred) { - _predicate = std::forward

(pred); -} - -// Procedure: repeat -inline void cudaFlow::repeat(size_t n) { - _predicate = [n] () mutable { return n-- == 0; }; -} - -// Function: empty -inline bool cudaFlow::empty() const { - return _graph._nodes.empty(); -} - -// Procedure: device -inline void cudaFlow::device(int d) { - _device = d; -} - -// Function: device -inline int cudaFlow::device() const { - return _device; -} - -// Procedure: stream -inline void cudaFlow::stream(cudaStream_t s) { - _stream = s; -} - -// Function: noop -inline cudaTask cudaFlow::noop() { - auto node = _graph.emplace_back(nstd::in_place_type_t{}, - [](cudaGraph_t& graph, cudaGraphNode_t& node){ - TF_CHECK_CUDA( - ::cudaGraphAddEmptyNode(&node, graph, nullptr, 0), - "failed to create a no-operation (empty) node" - ); - } - ); - return cudaTask(node); -} - -//// Function: host -//template -//cudaTask cudaFlow::host(C&& c) { -// auto node = _graph.emplace_back(nstd::in_place_type_t{}, -// [c=std::forward(c)](cudaGraph_t& graph, cudaGraphNode_t& node) mutable { -// cudaHostNodeParams p; -// p.fn = [] (void* data) { (*static_cast(data))(); }; -// p.userData = &c; -// TF_CHECK_CUDA( -// ::cudaGraphAddHostNode(&node, graph, nullptr, 0, &p), -// "failed to create a host node" -// ); -// } -// ); -// return cudaTask(node); -//} - -// Function: kernel -template -cudaTask cudaFlow::kernel( - dim3 g, dim3 b, size_t s, F&& f, ArgsT&&... args -) { - - using traits = function_traits; - - static_assert(traits::arity == sizeof...(ArgsT), "arity mismatches"); - - auto node = _graph.emplace_back(nstd::in_place_type_t{}, - [g, b, s, f=(void*)f, args...] (cudaGraph_t& graph, cudaGraphNode_t& node) { - - cudaKernelNodeParams p; - void* arguments[sizeof...(ArgsT)] = { (void*)(&args)... }; - p.func = f; - p.gridDim = g; - p.blockDim = b; - p.sharedMemBytes = s; - p.kernelParams = arguments; - p.extra = nullptr; - - TF_CHECK_CUDA( - ::cudaGraphAddKernelNode(&node, graph, nullptr, 0, &p), - "failed to create a cudaGraph node in kernel task" - ); - } - ); - - return cudaTask(node); -} - -// Function: kernel -template -cudaTask cudaFlow::kernel_on( - int d, dim3 g, dim3 b, size_t s, F&& f, ArgsT&&... args -) { - - using traits = function_traits; - - static_assert(traits::arity == sizeof...(ArgsT), "arity mismatches"); - - auto node = _graph.emplace_back(nstd::in_place_type_t{}, - [d, g, b, s, f=(void*)f, args...] (cudaGraph_t& graph, cudaGraphNode_t& node) { - - cudaKernelNodeParams p; - void* arguments[sizeof...(ArgsT)] = { (void*)(&args)... }; - p.func = f; - p.gridDim = g; - p.blockDim = b; - p.sharedMemBytes = s; - p.kernelParams = arguments; - p.extra = nullptr; - - cudaScopedDevice ctx(d); - TF_CHECK_CUDA( - ::cudaGraphAddKernelNode(&node, graph, nullptr, 0, &p), - "failed to create a cudaGraph node in kernel_on task" - ); - } - ); - - return cudaTask(node); -} - -// Function: zero -template -std::enable_if_t< - is_pod_v && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), - cudaTask -> -cudaFlow::zero(T* dst, size_t count) { - auto node = _graph.emplace_back(nstd::in_place_type_t{}, - [dst, count] (cudaGraph_t& graph, cudaGraphNode_t& node) { - cudaMemsetParams p; - p.dst = dst; - p.value = 0; - p.pitch = 0; - p.elementSize = sizeof(T); // either 1, 2, or 4 - p.width = count; - p.height = 1; - TF_CHECK_CUDA( - cudaGraphAddMemsetNode(&node, graph, nullptr, 0, &p), - "failed to create a cudaGraph node in zero task" - ); - } - ); - return cudaTask(node); -} - -// Function: fill -template -std::enable_if_t< - is_pod_v && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), - cudaTask -> -cudaFlow::fill(T* dst, T value, size_t count) { - auto node = _graph.emplace_back(nstd::in_place_type_t{}, - [dst, value, count] (cudaGraph_t& graph, cudaGraphNode_t& node) { - cudaMemsetParams p; - p.dst = dst; - - // perform bit-wise copy - p.value = 0; // crucial - static_assert(sizeof(T) <= sizeof(p.value), "internal error"); - std::memcpy(&p.value, &value, sizeof(T)); - - p.pitch = 0; - p.elementSize = sizeof(T); // either 1, 2, or 4 - p.width = count; - p.height = 1; - TF_CHECK_CUDA( - cudaGraphAddMemsetNode(&node, graph, nullptr, 0, &p), - "failed to create a cudaGraph node in fill task" - ); - } - ); - return cudaTask(node); -} - -// Function: copy -template < - typename T, - std::enable_if_t::value, void>* -> -cudaTask cudaFlow::copy(T* tgt, const T* src, size_t num) { - - using U = std::decay_t; - - auto node = _graph.emplace_back(nstd::in_place_type_t{}, - [tgt, src, num] (cudaGraph_t& graph, cudaGraphNode_t& node) { - - cudaMemcpy3DParms p; - p.srcArray = nullptr; - p.srcPos = ::make_cudaPos(0, 0, 0); - p.srcPtr = ::make_cudaPitchedPtr(const_cast(src), num*sizeof(U), num, 1); - p.dstArray = nullptr; - p.dstPos = ::make_cudaPos(0, 0, 0); - p.dstPtr = ::make_cudaPitchedPtr(tgt, num*sizeof(U), num, 1); - p.extent = ::make_cudaExtent(num*sizeof(U), 1, 1); - p.kind = cudaMemcpyDefault; - - TF_CHECK_CUDA( - cudaGraphAddMemcpyNode(&node, graph, nullptr, 0, &p), - "failed to create a cudaGraph node in copy task" - ); - } - ); - - return cudaTask(node); -} - -// Function: memset -inline cudaTask cudaFlow::memset(void* dst, int ch, size_t count) { - - auto node = _graph.emplace_back(nstd::in_place_type_t{}, - [dst, ch, count] (cudaGraph_t& graph, cudaGraphNode_t& node) { - cudaMemsetParams p; - p.dst = dst; - p.value = ch; - p.pitch = 0; - //p.elementSize = (count & 1) == 0 ? ((count & 3) == 0 ? 4 : 2) : 1; - //p.width = (count & 1) == 0 ? ((count & 3) == 0 ? count >> 2 : count >> 1) : count; - p.elementSize = 1; // either 1, 2, or 4 - p.width = count; - p.height = 1; - TF_CHECK_CUDA( - cudaGraphAddMemsetNode(&node, graph, nullptr, 0, &p), - "failed to create a cudaGraph node in memset task" - ); - } - ); - - return cudaTask(node); -} - -// Function: memcpy -inline cudaTask cudaFlow::memcpy(void* tgt, const void* src, size_t bytes) { - auto node = _graph.emplace_back(nstd::in_place_type_t{}, - [tgt, src, bytes] (cudaGraph_t& graph, cudaGraphNode_t& node) { - // Parameters in cudaPitchedPtr - // d - Pointer to allocated memory - // p - Pitch of allocated memory in bytes - // xsz - Logical width of allocation in elements - // ysz - Logical height of allocation in elements - cudaMemcpy3DParms p; - p.srcArray = nullptr; - p.srcPos = ::make_cudaPos(0, 0, 0); - p.srcPtr = ::make_cudaPitchedPtr(const_cast(src), bytes, bytes, 1); - p.dstArray = nullptr; - p.dstPos = ::make_cudaPos(0, 0, 0); - p.dstPtr = ::make_cudaPitchedPtr(tgt, bytes, bytes, 1); - p.extent = ::make_cudaExtent(bytes, 1, 1); - p.kind = cudaMemcpyDefault; - TF_CHECK_CUDA( - cudaGraphAddMemcpyNode(&node, graph, nullptr, 0, &p), - "failed to create a cudaGraph node in memcpy task" - ); - } - ); - return cudaTask(node); -} - -} // end of namespace tf ----------------------------------------------------- - - diff --git a/bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_graph.hpp b/bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_graph.hpp deleted file mode 100644 index cb42617711..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_graph.hpp +++ /dev/null @@ -1,310 +0,0 @@ -#pragma once - -#include "cuda_device.hpp" - -#include "../utility/object_pool.hpp" -#include "../utility/traits.hpp" -#include "../utility/passive_vector.hpp" -#include "../nstd/variant.hpp" -#include "../nstd/optional.hpp" - -namespace tf { - -// ---------------------------------------------------------------------------- -// cudaNode class -// ---------------------------------------------------------------------------- - -// class: cudaNode -class cudaNode { - - friend class cudaFlow; - friend class cudaGraph; - friend class cudaTask; - - friend class Taskflow; - friend class Executor; - - - // Noop handle - struct Noop { - - template - Noop(C&&); - - std::function work; - }; - - //// Host handle - //struct Host { - - // template - // Host(C&&); - // - // std::function work; - //}; - - // Memset handle - struct Memset { - - template - Memset(C&&); - - std::function work; - }; - - // Copy handle - struct Copy { - - template - Copy(C&&); - - std::function work; - }; - - // Kernel handle - struct Kernel { - - template - Kernel(C&&); - - std::function work; - }; - - using handle_t = nstd::variant< - nstd::monostate, - Noop, - //Host, - Memset, - Copy, - Kernel - >; - - // variant index - constexpr static auto NOOP = get_index_v; - //constexpr static auto HOST = get_index_v; - constexpr static auto MEMSET = get_index_v; - constexpr static auto COPY = get_index_v; - constexpr static auto KERNEL = get_index_v; - - public: - - template - cudaNode(ArgsT&&...); - - private: - - std::string _name; - - handle_t _handle; - - cudaGraphNode_t _native_handle {nullptr}; - - PassiveVector _successors; - - void _precede(cudaNode*); -}; - -// ---------------------------------------------------------------------------- -// cudaGraph class -// ---------------------------------------------------------------------------- - -// class: cudaGraph -class cudaGraph { - - friend class cudaFlow; - friend class cudaNode; - friend class cudaTask; - - friend class Taskflow; - friend class Executor; - - public: - - ~cudaGraph(); - - template - cudaNode* emplace_back(ArgsT&&...); - - cudaGraph_t native_handle(); - - void clear(); - - bool empty() const; - - private: - - cudaGraph_t _native_handle {nullptr}; - - std::vector> _nodes; - - void _make_native_graph(); -}; - -// ---------------------------------------------------------------------------- -// cudaNode definitions -// ---------------------------------------------------------------------------- - -//// Host handle constructor -//template -//cudaNode::Host::Host(C&& c) : work {std::forward(c)} { -//} - -// Noop handle constructor -template -cudaNode::Noop::Noop(C&& c) : work {std::forward(c)} { -} - -// Memset handle constructor -template -cudaNode::Memset::Memset(C&& c) : work {std::forward(c)} { -} - -// Copy handle constructor -template -cudaNode::Copy::Copy(C&& c) : work {std::forward(c)} { -} - -// Kernel handle constructor -template -cudaNode::Kernel::Kernel(C&& c) : work {std::forward(c)} { -} - -// Constructor -template -cudaNode::cudaNode(ArgsT&&... args) : _handle {std::forward(args)...} { -} - -// Procedure: _precede -inline void cudaNode::_precede(cudaNode* v) { - _successors.push_back(v); -} - -// ---------------------------------------------------------------------------- -// cudaGraph definitions -// ---------------------------------------------------------------------------- - -// Destructor -inline cudaGraph::~cudaGraph() { - if(_native_handle) { - cudaGraphDestroy(_native_handle); - } -} - -// Function: empty -inline bool cudaGraph::empty() const { - return _nodes.empty(); -} - -// Procedure: clear -inline void cudaGraph::clear() { - - _nodes.clear(); - - if(_native_handle) { - TF_CHECK_CUDA( - cudaGraphDestroy(_native_handle), "failed to destroy a cudaGraph on clear" - ); - _native_handle = nullptr; - } -} - -// Function: emplace_back -template -cudaNode* cudaGraph::emplace_back(ArgsT&&... args) { - auto node = std::make_unique(std::forward(args)...); - _nodes.emplace_back(std::move(node)); - return _nodes.back().get(); -} - -// Function: native_handle -inline cudaGraph_t cudaGraph::native_handle() { - return _native_handle; -} - -// Procedure: _make_native_graph -inline void cudaGraph::_make_native_graph() { - - //// TODO: must be nullptr - //if(_native_handle) { - // TF_CHECK_CUDA( - // cudaGraphDestroy(_native_handle), "failed to destroy the previous cudaGraph" - // ); - // _native_handle = nullptr; - //} - // - //cudaScopedDevice ctx {d}; - assert(_native_handle == nullptr); - - TF_CHECK_CUDA( - cudaGraphCreate(&_native_handle, 0), "failed to create a cudaGraph" - ); - - // create nodes - for(auto& node : _nodes) { - switch(node->_handle.index()) { - case cudaNode::NOOP: - nstd::get(node->_handle).work( - _native_handle, node->_native_handle - ); - break; - - //case cudaNode::HOST: - // nstd::get(node->_handle).work( - // _native_handle, node->_native_handle - // ); - //break; - - case cudaNode::MEMSET: - nstd::get(node->_handle).work( - _native_handle, node->_native_handle - ); - break; - - case cudaNode::COPY: - nstd::get(node->_handle).work( - _native_handle, node->_native_handle - ); - break; - - case cudaNode::KERNEL: - nstd::get(node->_handle).work( - _native_handle, node->_native_handle - ); - break; - } - } - - // create edges - for(auto& node : _nodes) { - for(auto succ : node->_successors){ - TF_CHECK_CUDA( - ::cudaGraphAddDependencies( - _native_handle, &(node->_native_handle), &(succ->_native_handle), 1 - ), - "failed to add a preceding link" - ); - } - } - -} - - -//inline void cudaGraph::run() { -// cudaGraphExec_t graphExec; -// TF_CHECK_CUDA( -// cudaGraphInstantiate(&graphExec, _handle, nullptr, nullptr, 0), -// "failed to create an executable cudaGraph" -// ); -// TF_CHECK_CUDA(cudaGraphLaunch(graphExec, 0), "failed to launch cudaGraph") -// TF_CHECK_CUDA(cudaStreamSynchronize(0), "failed to sync cudaStream"); -// TF_CHECK_CUDA( -// cudaGraphExecDestroy(graphExec), "failed to destroy an executable cudaGraph" -// ); -//} - - - - - -} // end of namespace tf ----------------------------------------------------- - diff --git a/bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_task.hpp b/bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_task.hpp deleted file mode 100644 index 670765f786..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/cuda/cuda_task.hpp +++ /dev/null @@ -1,173 +0,0 @@ -#pragma once - -#include "cuda_graph.hpp" - -namespace tf { - -/** -@class cudaTask - -@brief handle to a node in a cudaGraph -*/ -class cudaTask { - - friend class cudaFlow; - - public: - - /** - @brief constructs an empty cudaTask - */ - cudaTask() = default; - - /** - @brief copy-constructs a cudaTask - */ - cudaTask(const cudaTask&) = default; - - /** - @brief copy-assigns a cudaTask - */ - cudaTask& operator = (const cudaTask&) = default; - - /** - @brief adds precedence links from this to other tasks - - @tparam Ts... parameter pack - - @param tasks one or multiple tasks - - @return @c *this - */ - template - cudaTask& precede(Ts&&... tasks); - - /** - @brief adds precedence links from other tasks to this - - @tparam Ts... parameter pack - - @param tasks one or multiple tasks - - @return @c *this - */ - template - cudaTask& succeed(Ts&&... tasks); - - /** - @brief assigns a name to the task - - @param name a @std_string acceptable string - - @return @c *this - */ - cudaTask& name(const std::string& name); - - /** - @brief queries the name of the task - */ - const std::string& name() const; - - /** - @brief queries the number of successors - */ - size_t num_successors() const; - - /** - @brief queries if the task is associated with a cudaNode - */ - bool empty() const; - - private: - - cudaTask(cudaNode*); - - cudaNode* _node {nullptr}; - - /// @private - template - void _precede(T&&); - - /// @private - template - void _precede(T&&, Ts&&...); - - /// @private - template - void _succeed(T&&); - - // @private - template - void _succeed(T&&, Ts&&...); -}; - -// Constructor -inline cudaTask::cudaTask(cudaNode* node) : _node {node} { -} - -// Function: precede -template -cudaTask& cudaTask::precede(Ts&&... tasks) { - _precede(std::forward(tasks)...); - return *this; -} - -/// @private -// Procedure: precede -template -void cudaTask::_precede(T&& other) { - _node->_precede(other._node); -} - -/// @private -// Procedure: _precede -template -void cudaTask::_precede(T&& task, Ts&&... others) { - _precede(std::forward(task)); - _precede(std::forward(others)...); -} - -// Function: succeed -template -cudaTask& cudaTask::succeed(Ts&&... tasks) { - _succeed(std::forward(tasks)...); - return *this; -} - -/// @private -// Procedure: _succeed -template -void cudaTask::_succeed(T&& other) { - other._node->_precede(_node); -} - -/// @private -// Procedure: _succeed -template -void cudaTask::_succeed(T&& task, Ts&&... others) { - _succeed(std::forward(task)); - _succeed(std::forward(others)...); -} - -// Function: empty -inline bool cudaTask::empty() const { - return _node == nullptr; -} - -// Function: name -inline cudaTask& cudaTask::name(const std::string& name) { - _node->_name = name; - return *this; -} - -// Function: name -inline const std::string& cudaTask::name() const { - return _node->_name; -} - -// Function: num_successors -inline size_t cudaTask::num_successors() const { - return _node->_successors.size(); -} - -} // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-2.5.0/include/taskflow/declarations.hpp b/bundled/taskflow-2.5.0/include/taskflow/declarations.hpp deleted file mode 100644 index 3047b2f9ef..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/declarations.hpp +++ /dev/null @@ -1,36 +0,0 @@ -#pragma once - -namespace tf { - -// ---------------------------------------------------------------------------- -// forward declarations -// ---------------------------------------------------------------------------- - -// taskflow -class Node; -class Graph; -class FlowBuilder; -class Subflow; -class Task; -class TaskView; -class Taskflow; -class Topology; -class Executor; -class WorkerView; -class ObserverInterface; -class ChromeTracingObserver; -class TFProfObserver; - -// cudaflow -class cudaNode; -class cudaGraph; -class cudaTask; -class cudaFlow; - - - -} // end of namespace tf ----------------------------------------------------- - - - - diff --git a/bundled/taskflow-2.5.0/include/taskflow/nstd/any.hpp b/bundled/taskflow-2.5.0/include/taskflow/nstd/any.hpp deleted file mode 100644 index a27000fe0a..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/nstd/any.hpp +++ /dev/null @@ -1,703 +0,0 @@ -// -// Copyright (c) 2016-2018 Martin Moene -// -// https://github.com/martinmoene/any-lite -// -// Distributed under the Boost Software License, Version 1.0. -// (See accompanying file LICENSE.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -#pragma once - -#ifndef NSTD_ANY_LITE_HPP -#define NSTD_ANY_LITE_HPP - -#define any_lite_MAJOR 0 -#define any_lite_MINOR 2 -#define any_lite_PATCH 0 - -#define any_lite_VERSION any_STRINGIFY(any_lite_MAJOR) "." any_STRINGIFY(any_lite_MINOR) "." any_STRINGIFY(any_lite_PATCH) - -#define any_STRINGIFY( x ) any_STRINGIFY_( x ) -#define any_STRINGIFY_( x ) #x - -// any-lite configuration: - -#define any_ANY_DEFAULT 0 -#define any_ANY_NSTD 1 -#define any_ANY_STD 2 - -#if !defined( any_CONFIG_SELECT_ANY ) -# define any_CONFIG_SELECT_ANY ( any_HAVE_STD_ANY ? any_ANY_STD : any_ANY_NSTD ) -#endif - -// Control presence of exception handling (try and auto discover): - -#ifndef any_CONFIG_NO_EXCEPTIONS -# if defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND) -# define any_CONFIG_NO_EXCEPTIONS 0 -# else -# define any_CONFIG_NO_EXCEPTIONS 1 -# endif -#endif - -// C++ language version detection (C++20 is speculative): -// Note: VC14.0/1900 (VS2015) lacks too much from C++14. - -#ifndef any_CPLUSPLUS -# if defined(_MSVC_LANG ) && !defined(__clang__) -# define any_CPLUSPLUS (_MSC_VER == 1900 ? 201103L : _MSVC_LANG ) -# else -# define any_CPLUSPLUS __cplusplus -# endif -#endif - -#define any_CPP98_OR_GREATER ( any_CPLUSPLUS >= 199711L ) -#define any_CPP11_OR_GREATER ( any_CPLUSPLUS >= 201103L ) -#define any_CPP14_OR_GREATER ( any_CPLUSPLUS >= 201402L ) -#define any_CPP17_OR_GREATER ( any_CPLUSPLUS >= 201703L ) -#define any_CPP20_OR_GREATER ( any_CPLUSPLUS >= 202000L ) - -// Use C++17 std::any if available and requested: - -#if any_CPP17_OR_GREATER && defined(__has_include ) -# if __has_include( ) -# define any_HAVE_STD_ANY 1 -# else -# define any_HAVE_STD_ANY 0 -# endif -#else -# define any_HAVE_STD_ANY 0 -#endif - -#define any_USES_STD_ANY ( (any_CONFIG_SELECT_ANY == any_ANY_STD) || ((any_CONFIG_SELECT_ANY == any_ANY_DEFAULT) && any_HAVE_STD_ANY) ) - -// -// in_place: code duplicated in any-lite, expected-lite, optional-lite, value-ptr-lite, variant-lite: -// - -#ifndef nstd_lite_HAVE_IN_PLACE_TYPES -#define nstd_lite_HAVE_IN_PLACE_TYPES 1 - -// C++17 std::in_place in : - -#if any_CPP17_OR_GREATER - -#include - -namespace nstd { - -using std::in_place; -using std::in_place_type; -using std::in_place_index; -using std::in_place_t; -using std::in_place_type_t; -using std::in_place_index_t; - -#define nstd_lite_in_place_t( T) std::in_place_t -#define nstd_lite_in_place_type_t( T) std::in_place_type_t -#define nstd_lite_in_place_index_t(K) std::in_place_index_t - -#define nstd_lite_in_place( T) std::in_place_t{} -#define nstd_lite_in_place_type( T) std::in_place_type_t{} -#define nstd_lite_in_place_index(K) std::in_place_index_t{} - -} // namespace nstd - -#else // any_CPP17_OR_GREATER - -#include - -namespace nstd { -namespace detail { - -template< class T > -struct in_place_type_tag {}; - -template< std::size_t K > -struct in_place_index_tag {}; - -} // namespace detail - -struct in_place_t {}; - -template< class T > -inline in_place_t in_place( detail::in_place_type_tag = detail::in_place_type_tag() ) -{ - return in_place_t(); -} - -template< std::size_t K > -inline in_place_t in_place( detail::in_place_index_tag = detail::in_place_index_tag() ) -{ - return in_place_t(); -} - -template< class T > -inline in_place_t in_place_type( detail::in_place_type_tag = detail::in_place_type_tag() ) -{ - return in_place_t(); -} - -template< std::size_t K > -inline in_place_t in_place_index( detail::in_place_index_tag = detail::in_place_index_tag() ) -{ - return in_place_t(); -} - -// mimic templated typedef: - -#define nstd_lite_in_place_t( T) nstd::in_place_t(&)( nstd::detail::in_place_type_tag ) -#define nstd_lite_in_place_type_t( T) nstd::in_place_t(&)( nstd::detail::in_place_type_tag ) -#define nstd_lite_in_place_index_t(K) nstd::in_place_t(&)( nstd::detail::in_place_index_tag ) - -#define nstd_lite_in_place( T) nstd::in_place_type -#define nstd_lite_in_place_type( T) nstd::in_place_type -#define nstd_lite_in_place_index(K) nstd::in_place_index - -} // namespace nstd - -#endif // any_CPP17_OR_GREATER -#endif // nstd_lite_HAVE_IN_PLACE_TYPES - -// -// Using std::any: -// - -#if any_USES_STD_ANY - -#include -#include - -namespace nstd { - - using std::any; - using std::any_cast; - using std::make_any; - using std::swap; - using std::bad_any_cast; -} - -#else // any_USES_STD_ANY - -#include - -// Compiler versions: -// -// MSVC++ 6.0 _MSC_VER == 1200 any_COMPILER_MSVC_VERSION == 60 (Visual Studio 6.0) -// MSVC++ 7.0 _MSC_VER == 1300 any_COMPILER_MSVC_VERSION == 70 (Visual Studio .NET 2002) -// MSVC++ 7.1 _MSC_VER == 1310 any_COMPILER_MSVC_VERSION == 71 (Visual Studio .NET 2003) -// MSVC++ 8.0 _MSC_VER == 1400 any_COMPILER_MSVC_VERSION == 80 (Visual Studio 2005) -// MSVC++ 9.0 _MSC_VER == 1500 any_COMPILER_MSVC_VERSION == 90 (Visual Studio 2008) -// MSVC++ 10.0 _MSC_VER == 1600 any_COMPILER_MSVC_VERSION == 100 (Visual Studio 2010) -// MSVC++ 11.0 _MSC_VER == 1700 any_COMPILER_MSVC_VERSION == 110 (Visual Studio 2012) -// MSVC++ 12.0 _MSC_VER == 1800 any_COMPILER_MSVC_VERSION == 120 (Visual Studio 2013) -// MSVC++ 14.0 _MSC_VER == 1900 any_COMPILER_MSVC_VERSION == 140 (Visual Studio 2015) -// MSVC++ 14.1 _MSC_VER >= 1910 any_COMPILER_MSVC_VERSION == 141 (Visual Studio 2017) -// MSVC++ 14.2 _MSC_VER >= 1920 any_COMPILER_MSVC_VERSION == 142 (Visual Studio 2019) - -#if defined(_MSC_VER ) && !defined(__clang__) -# define any_COMPILER_MSVC_VER (_MSC_VER ) -# define any_COMPILER_MSVC_VERSION (_MSC_VER / 10 - 10 * ( 5 + (_MSC_VER < 1900 ) ) ) -#else -# define any_COMPILER_MSVC_VER 0 -# define any_COMPILER_MSVC_VERSION 0 -#endif - -#define any_COMPILER_VERSION( major, minor, patch ) ( 10 * ( 10 * (major) + (minor) ) + (patch) ) - -#if defined(__clang__) -# define any_COMPILER_CLANG_VERSION any_COMPILER_VERSION(__clang_major__, __clang_minor__, __clang_patchlevel__) -#else -# define any_COMPILER_CLANG_VERSION 0 -#endif - -#if defined(__GNUC__) && !defined(__clang__) -# define any_COMPILER_GNUC_VERSION any_COMPILER_VERSION(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__) -#else -# define any_COMPILER_GNUC_VERSION 0 -#endif - -// half-open range [lo..hi): -//#define any_BETWEEN( v, lo, hi ) ( (lo) <= (v) && (v) < (hi) ) - -// Presence of language and library features: - -#define any_HAVE( feature ) ( any_HAVE_##feature ) - -#ifdef _HAS_CPP0X -# define any_HAS_CPP0X _HAS_CPP0X -#else -# define any_HAS_CPP0X 0 -#endif - -#define any_CPP11_90 (any_CPP11_OR_GREATER || any_COMPILER_MSVC_VER >= 1500) -#define any_CPP11_100 (any_CPP11_OR_GREATER || any_COMPILER_MSVC_VER >= 1600) -#define any_CPP11_120 (any_CPP11_OR_GREATER || any_COMPILER_MSVC_VER >= 1800) -#define any_CPP11_140 (any_CPP11_OR_GREATER || any_COMPILER_MSVC_VER >= 1900) - -#define any_CPP14_000 (any_CPP14_OR_GREATER) -#define any_CPP17_000 (any_CPP17_OR_GREATER) - -// Presence of C++11 language features: - -#define any_HAVE_CONSTEXPR_11 any_CPP11_140 -#define any_HAVE_DEFAULT_FUNCTION_TEMPLATE_ARG \ - any_CPP11_120 -#define any_HAVE_INITIALIZER_LIST any_CPP11_120 -#define any_HAVE_NOEXCEPT any_CPP11_140 -#define any_HAVE_NULLPTR any_CPP11_100 -#define any_HAVE_TYPE_TRAITS any_CPP11_90 -#define any_HAVE_STATIC_ASSERT any_CPP11_100 -#define any_HAVE_ADD_CONST any_CPP11_90 -#define any_HAVE_REMOVE_REFERENCE any_CPP11_90 - -#define any_HAVE_TR1_ADD_CONST (!! any_COMPILER_GNUC_VERSION ) -#define any_HAVE_TR1_REMOVE_REFERENCE (!! any_COMPILER_GNUC_VERSION ) -#define any_HAVE_TR1_TYPE_TRAITS (!! any_COMPILER_GNUC_VERSION ) - -// Presence of C++14 language features: - -#define any_HAVE_CONSTEXPR_14 any_CPP14_000 - -// Presence of C++17 language features: - -#define any_HAVE_NODISCARD any_CPP17_000 - -// Presence of C++ language features: - -#if any_HAVE_CONSTEXPR_11 -# define any_constexpr constexpr -#else -# define any_constexpr /*constexpr*/ -#endif - -#if any_HAVE_CONSTEXPR_14 -# define any_constexpr14 constexpr -#else -# define any_constexpr14 /*constexpr*/ -#endif - -#if any_HAVE_NOEXCEPT -# define any_noexcept noexcept -#else -# define any_noexcept /*noexcept*/ -#endif - -#if any_HAVE_NULLPTR -# define any_nullptr nullptr -#else -# define any_nullptr NULL -#endif - -#if any_HAVE_NODISCARD -# define any_nodiscard [[nodiscard]] -#else -# define any_nodiscard /*[[nodiscard]]*/ -#endif - -// additional includes: - -#if any_CONFIG_NO_EXCEPTIONS -# include -#else -# include -#endif - -#if ! any_HAVE_NULLPTR -# include -#endif - -#if any_HAVE_INITIALIZER_LIST -# include -#endif - -#if any_HAVE_TYPE_TRAITS -# include -#elif any_HAVE_TR1_TYPE_TRAITS -# include -#endif - -// Method enabling - -#if any_CPP11_OR_GREATER - -#define any_REQUIRES_0(...) \ - template< bool B = (__VA_ARGS__), typename std::enable_if::type = 0 > - -#define any_REQUIRES_T(...) \ - , typename = typename std::enable_if< (__VA_ARGS__), nstd::any_lite::detail::enabler >::type - -#define any_REQUIRES_R(R, ...) \ - typename std::enable_if<__VA_ARGS__, R>::type - -#define any_REQUIRES_A(...) \ - , typename std::enable_if<__VA_ARGS__, void*>::type = nullptr - -#endif - -// -// any: -// - -namespace nstd { namespace any_lite { - -// C++11 emulation: - -namespace std11 { - -#if any_HAVE_ADD_CONST - -using std::add_const; - -#elif any_HAVE_TR1_ADD_CONST - -using std::tr1::add_const; - -#else - -template< class T > struct add_const { typedef const T type; }; - -#endif // any_HAVE_ADD_CONST - -#if any_HAVE_REMOVE_REFERENCE - -using std::remove_reference; - -#elif any_HAVE_TR1_REMOVE_REFERENCE - -using std::tr1::remove_reference; - -#else - -template< class T > struct remove_reference { typedef T type; }; -template< class T > struct remove_reference { typedef T type; }; - -#endif // any_HAVE_REMOVE_REFERENCE - -} // namespace std11 - -namespace detail { - -// for any_REQUIRES_T - -/*enum*/ class enabler{}; - -} // namespace detail - -#if ! any_CONFIG_NO_EXCEPTIONS - -class bad_any_cast : public std::bad_cast -{ -public: -#if any_CPP11_OR_GREATER - virtual const char* what() const any_noexcept -#else - virtual const char* what() const throw() -#endif - { - return "any-lite: bad any_cast"; - } -}; - -#endif // any_CONFIG_NO_EXCEPTIONS - -class any -{ -public: - any_constexpr any() any_noexcept - : content( any_nullptr ) - {} - - any( any const & other ) - : content( other.content ? other.content->clone() : any_nullptr ) - {} - -#if any_CPP11_OR_GREATER - - any( any && other ) any_noexcept - : content( std::move( other.content ) ) - { - other.content = any_nullptr; - } - - template< - class ValueType, class T = typename std::decay::type - any_REQUIRES_T( ! std::is_same::value ) - > - any( ValueType && value ) any_noexcept - : content( new holder( std::forward( value ) ) ) - {} - - template< - class T, class... Args - any_REQUIRES_T( std::is_constructible::value ) - > - explicit any( nstd_lite_in_place_type_t(T), Args&&... args ) - : content( new holder( T( std::forward(args)... ) ) ) - {} - - template< - class T, class U, class... Args - any_REQUIRES_T( std::is_constructible&, Args&&...>::value ) - > - explicit any( nstd_lite_in_place_type_t(T), std::initializer_list il, Args&&... args ) - : content( new holder( T( il, std::forward(args)... ) ) ) - {} - -#else - - template< class ValueType > - any( ValueType const & value ) - : content( new holder( value ) ) - {} - -#endif // any_CPP11_OR_GREATER - - ~any() - { - reset(); - } - - any & operator=( any const & other ) - { - any( other ).swap( *this ); - return *this; - } - -#if any_CPP11_OR_GREATER - - any & operator=( any && other ) any_noexcept - { - any( std::move( other ) ).swap( *this ); - return *this; - } - - template< - class ValueType, class T = typename std::decay::type - any_REQUIRES_T( ! std::is_same::value ) - > - any & operator=( ValueType && value ) - { - any( std::move( value ) ).swap( *this ); - return *this; - } - - template< class T, class... Args > - void emplace( Args && ... args ) - { - any( T( std::forward(args)... ) ).swap( *this ); - } - - template< - class T, class U, class... Args - any_REQUIRES_T( std::is_constructible&, Args&&...>::value ) - > - void emplace( std::initializer_list il, Args&&... args ) - { - any( T( il, std::forward(args)... ) ).swap( *this ); - } - -#else - - template< class ValueType > - any & operator=( ValueType const & value ) - { - any( value ).swap( *this ); - return *this; - } - -#endif // any_CPP11_OR_GREATER - - void reset() any_noexcept - { - delete content; content = any_nullptr; - } - - void swap( any & other ) any_noexcept - { - std::swap( content, other.content ); - } - - bool has_value() const any_noexcept - { - return content != any_nullptr; - } - - const std::type_info & type() const any_noexcept - { - return has_value() ? content->type() : typeid( void ); - } - - // - // non-standard: - // - - template< class ValueType > - const ValueType * to_ptr() const - { - return &( static_cast *>( content )->held ); - } - - template< class ValueType > - ValueType * to_ptr() - { - return &( static_cast *>( content )->held ); - } - -private: - class placeholder - { - public: - virtual ~placeholder() - { - } - - virtual std::type_info const & type() const = 0; - - virtual placeholder * clone() const = 0; - }; - - template< typename ValueType > - class holder : public placeholder - { - public: - holder( ValueType const & value ) - : held( value ) - {} - -#if any_CPP11_OR_GREATER - holder( ValueType && value ) - : held( std::move( value ) ) - {} -#endif - - virtual std::type_info const & type() const - { - return typeid( ValueType ); - } - - virtual placeholder * clone() const - { - return new holder( held ); - } - - ValueType held; - }; - - placeholder * content; -}; - -inline void swap( any & x, any & y ) any_noexcept -{ - x.swap( y ); -} - -#if any_CPP11_OR_GREATER - -template< class T, class ...Args > -inline any make_any( Args&& ...args ) -{ - return any( nstd_lite_in_place_type(T), std::forward(args)...); -} - -template< class T, class U, class ...Args > -inline any make_any( std::initializer_list il, Args&& ...args ) -{ - return any( nstd_lite_in_place_type(T), il, std::forward(args)...); -} - -#endif // any_CPP11_OR_GREATER - -template< - class ValueType -#if any_HAVE_DEFAULT_FUNCTION_TEMPLATE_ARG -// any_REQUIRES_T(...) Allow for VC120 (VS2013): - , typename = typename std::enable_if< (std::is_reference::value || std::is_copy_constructible::value), nstd::any_lite::detail::enabler >::type -#endif -> -any_nodiscard inline ValueType any_cast( any const & operand ) -{ - const ValueType * result = any_cast< typename std11::add_const< typename std11::remove_reference::type >::type >( &operand ); - -#if any_CONFIG_NO_EXCEPTIONS - assert( result ); -#else - if ( ! result ) - { - throw bad_any_cast(); - } -#endif - - return *result; -} - -template< - class ValueType -#if any_HAVE_DEFAULT_FUNCTION_TEMPLATE_ARG -// any_REQUIRES_T(...) Allow for VC120 (VS2013): - , typename = typename std::enable_if< (std::is_reference::value || std::is_copy_constructible::value), nstd::any_lite::detail::enabler >::type -#endif -> -any_nodiscard inline ValueType any_cast( any & operand ) -{ - const ValueType * result = any_cast< typename std11::remove_reference::type >( &operand ); - -#if any_CONFIG_NO_EXCEPTIONS - assert( result ); -#else - if ( ! result ) - { - throw bad_any_cast(); - } -#endif - - return *result; -} - -#if any_CPP11_OR_GREATER - -template< - class ValueType -#if any_HAVE_DEFAULT_FUNCTION_TEMPLATE_ARG - any_REQUIRES_T( std::is_reference::value || std::is_copy_constructible::value ) -#endif -> -any_nodiscard inline ValueType any_cast( any && operand ) -{ - const ValueType * result = any_cast< typename std11::remove_reference::type >( &operand ); - -#if any_CONFIG_NO_EXCEPTIONS - assert( result ); -#else - if ( ! result ) - { - throw bad_any_cast(); - } -#endif - - return *result; -} - -#endif // any_CPP11_OR_GREATER - -template< class ValueType > -any_nodiscard inline ValueType const * any_cast( any const * operand ) any_noexcept -{ - return operand != any_nullptr && operand->type() == typeid(ValueType) ? operand->to_ptr() : any_nullptr; -} - -template -any_nodiscard inline ValueType * any_cast( any * operand ) any_noexcept -{ - return operand != any_nullptr && operand->type() == typeid(ValueType) ? operand->to_ptr() : any_nullptr; -} - -} // namespace any_lite - -using namespace any_lite; - -} // namespace nstd - -#endif // any_USES_STD_ANY - -#endif // NSTD_ANY_LITE_HPP diff --git a/bundled/taskflow-2.5.0/include/taskflow/nstd/optional.hpp b/bundled/taskflow-2.5.0/include/taskflow/nstd/optional.hpp deleted file mode 100644 index 578ea03d93..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/nstd/optional.hpp +++ /dev/null @@ -1,1718 +0,0 @@ -// -// Copyright (c) 2014-2018 Martin Moene -// -// https://github.com/martinmoene/optional-lite -// -// Distributed under the Boost Software License, Version 1.0. -// (See accompanying file LICENSE.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - -#pragma once - -#define optional_lite_MAJOR 3 -#define optional_lite_MINOR 2 -#define optional_lite_PATCH 0 - -#define optional_lite_VERSION optional_STRINGIFY(optional_lite_MAJOR) "." optional_STRINGIFY(optional_lite_MINOR) "." optional_STRINGIFY(optional_lite_PATCH) - -#define optional_STRINGIFY( x ) optional_STRINGIFY_( x ) -#define optional_STRINGIFY_( x ) #x - -// optional-lite configuration: - -#define optional_OPTIONAL_DEFAULT 0 -#define optional_OPTIONAL_NONSTD 1 -#define optional_OPTIONAL_STD 2 - -#if !defined( optional_CONFIG_SELECT_OPTIONAL ) -# define optional_CONFIG_SELECT_OPTIONAL ( optional_HAVE_STD_OPTIONAL ? optional_OPTIONAL_STD : optional_OPTIONAL_NONSTD ) -#endif - -// Control presence of exception handling (try and auto discover): - -#ifndef optional_CONFIG_NO_EXCEPTIONS -# if defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND) -# define optional_CONFIG_NO_EXCEPTIONS 0 -# else -# define optional_CONFIG_NO_EXCEPTIONS 1 -# endif -#endif - -// C++ language version detection (C++20 is speculative): -// Note: VC14.0/1900 (VS2015) lacks too much from C++14. - -#ifndef optional_CPLUSPLUS -# if defined(_MSVC_LANG ) && !defined(__clang__) -# define optional_CPLUSPLUS (_MSC_VER == 1900 ? 201103L : _MSVC_LANG ) -# else -# define optional_CPLUSPLUS __cplusplus -# endif -#endif - -#define optional_CPP98_OR_GREATER ( optional_CPLUSPLUS >= 199711L ) -#define optional_CPP11_OR_GREATER ( optional_CPLUSPLUS >= 201103L ) -#define optional_CPP11_OR_GREATER_ ( optional_CPLUSPLUS >= 201103L ) -#define optional_CPP14_OR_GREATER ( optional_CPLUSPLUS >= 201402L ) -#define optional_CPP17_OR_GREATER ( optional_CPLUSPLUS >= 201703L ) -#define optional_CPP20_OR_GREATER ( optional_CPLUSPLUS >= 202000L ) - -// C++ language version (represent 98 as 3): - -#define optional_CPLUSPLUS_V ( optional_CPLUSPLUS / 100 - (optional_CPLUSPLUS > 200000 ? 2000 : 1994) ) - -// Use C++17 std::optional if available and requested: - -#if optional_CPP17_OR_GREATER && defined(__has_include ) -# if __has_include( ) -# define optional_HAVE_STD_OPTIONAL 1 -# else -# define optional_HAVE_STD_OPTIONAL 0 -# endif -#else -# define optional_HAVE_STD_OPTIONAL 0 -#endif - -#define optional_USES_STD_OPTIONAL ( (optional_CONFIG_SELECT_OPTIONAL == optional_OPTIONAL_STD) || ((optional_CONFIG_SELECT_OPTIONAL == optional_OPTIONAL_DEFAULT) && optional_HAVE_STD_OPTIONAL) ) - -// -// in_place: code duplicated in any-lite, expected-lite, optional-lite, value-ptr-lite, variant-lite: -// - -#ifndef nonstd_lite_HAVE_IN_PLACE_TYPES -#define nonstd_lite_HAVE_IN_PLACE_TYPES 1 - -// C++17 std::in_place in : - -#if optional_CPP17_OR_GREATER - -#include - -namespace nonstd { - -using std::in_place; -using std::in_place_type; -using std::in_place_index; -using std::in_place_t; -using std::in_place_type_t; -using std::in_place_index_t; - -#define nonstd_lite_in_place_t( T) std::in_place_t -#define nonstd_lite_in_place_type_t( T) std::in_place_type_t -#define nonstd_lite_in_place_index_t(K) std::in_place_index_t - -#define nonstd_lite_in_place( T) std::in_place_t{} -#define nonstd_lite_in_place_type( T) std::in_place_type_t{} -#define nonstd_lite_in_place_index(K) std::in_place_index_t{} - -} // namespace nonstd - -#else // optional_CPP17_OR_GREATER - -#include - -namespace nonstd { -namespace detail { - -template< class T > -struct in_place_type_tag {}; - -template< std::size_t K > -struct in_place_index_tag {}; - -} // namespace detail - -struct in_place_t {}; - -template< class T > -inline in_place_t in_place( detail::in_place_type_tag /*unused*/ = detail::in_place_type_tag() ) -{ - return in_place_t(); -} - -template< std::size_t K > -inline in_place_t in_place( detail::in_place_index_tag /*unused*/ = detail::in_place_index_tag() ) -{ - return in_place_t(); -} - -template< class T > -inline in_place_t in_place_type( detail::in_place_type_tag /*unused*/ = detail::in_place_type_tag() ) -{ - return in_place_t(); -} - -template< std::size_t K > -inline in_place_t in_place_index( detail::in_place_index_tag /*unused*/ = detail::in_place_index_tag() ) -{ - return in_place_t(); -} - -// mimic templated typedef: - -#define nonstd_lite_in_place_t( T) nonstd::in_place_t(&)( nonstd::detail::in_place_type_tag ) -#define nonstd_lite_in_place_type_t( T) nonstd::in_place_t(&)( nonstd::detail::in_place_type_tag ) -#define nonstd_lite_in_place_index_t(K) nonstd::in_place_t(&)( nonstd::detail::in_place_index_tag ) - -#define nonstd_lite_in_place( T) nonstd::in_place_type -#define nonstd_lite_in_place_type( T) nonstd::in_place_type -#define nonstd_lite_in_place_index(K) nonstd::in_place_index - -} // namespace nonstd - -#endif // optional_CPP17_OR_GREATER -#endif // nonstd_lite_HAVE_IN_PLACE_TYPES - -// -// Using std::optional: -// - -#if optional_USES_STD_OPTIONAL - -#include - -namespace nonstd { - - using std::optional; - using std::bad_optional_access; - using std::hash; - - using std::nullopt; - using std::nullopt_t; - - using std::operator==; - using std::operator!=; - using std::operator<; - using std::operator<=; - using std::operator>; - using std::operator>=; - using std::make_optional; - using std::swap; -} - -#else // optional_USES_STD_OPTIONAL - -#include -#include - -// optional-lite alignment configuration: - -#ifndef optional_CONFIG_MAX_ALIGN_HACK -# define optional_CONFIG_MAX_ALIGN_HACK 0 -#endif - -#ifndef optional_CONFIG_ALIGN_AS -// no default, used in #if defined() -#endif - -#ifndef optional_CONFIG_ALIGN_AS_FALLBACK -# define optional_CONFIG_ALIGN_AS_FALLBACK double -#endif - -// Compiler warning suppression: - -#if defined(__clang__) -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wundef" -#elif defined(__GNUC__) -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wundef" -#elif defined(_MSC_VER ) -# pragma warning( push ) -#endif - -// half-open range [lo..hi): -#define optional_BETWEEN( v, lo, hi ) ( (lo) <= (v) && (v) < (hi) ) - -// Compiler versions: -// -// MSVC++ 6.0 _MSC_VER == 1200 optional_COMPILER_MSVC_VERSION == 60 (Visual Studio 6.0) -// MSVC++ 7.0 _MSC_VER == 1300 optional_COMPILER_MSVC_VERSION == 70 (Visual Studio .NET 2002) -// MSVC++ 7.1 _MSC_VER == 1310 optional_COMPILER_MSVC_VERSION == 71 (Visual Studio .NET 2003) -// MSVC++ 8.0 _MSC_VER == 1400 optional_COMPILER_MSVC_VERSION == 80 (Visual Studio 2005) -// MSVC++ 9.0 _MSC_VER == 1500 optional_COMPILER_MSVC_VERSION == 90 (Visual Studio 2008) -// MSVC++ 10.0 _MSC_VER == 1600 optional_COMPILER_MSVC_VERSION == 100 (Visual Studio 2010) -// MSVC++ 11.0 _MSC_VER == 1700 optional_COMPILER_MSVC_VERSION == 110 (Visual Studio 2012) -// MSVC++ 12.0 _MSC_VER == 1800 optional_COMPILER_MSVC_VERSION == 120 (Visual Studio 2013) -// MSVC++ 14.0 _MSC_VER == 1900 optional_COMPILER_MSVC_VERSION == 140 (Visual Studio 2015) -// MSVC++ 14.1 _MSC_VER >= 1910 optional_COMPILER_MSVC_VERSION == 141 (Visual Studio 2017) -// MSVC++ 14.2 _MSC_VER >= 1920 optional_COMPILER_MSVC_VERSION == 142 (Visual Studio 2019) - -#if defined(_MSC_VER ) && !defined(__clang__) -# define optional_COMPILER_MSVC_VER (_MSC_VER ) -# define optional_COMPILER_MSVC_VERSION (_MSC_VER / 10 - 10 * ( 5 + (_MSC_VER < 1900 ) ) ) -#else -# define optional_COMPILER_MSVC_VER 0 -# define optional_COMPILER_MSVC_VERSION 0 -#endif - -#define optional_COMPILER_VERSION( major, minor, patch ) ( 10 * (10 * (major) + (minor) ) + (patch) ) - -#if defined(__GNUC__) && !defined(__clang__) -# define optional_COMPILER_GNUC_VERSION optional_COMPILER_VERSION(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__) -#else -# define optional_COMPILER_GNUC_VERSION 0 -#endif - -#if defined(__clang__) -# define optional_COMPILER_CLANG_VERSION optional_COMPILER_VERSION(__clang_major__, __clang_minor__, __clang_patchlevel__) -#else -# define optional_COMPILER_CLANG_VERSION 0 -#endif - -#if optional_BETWEEN(optional_COMPILER_MSVC_VERSION, 70, 140 ) -# pragma warning( disable: 4345 ) // initialization behavior changed -#endif - -#if optional_BETWEEN(optional_COMPILER_MSVC_VERSION, 70, 150 ) -# pragma warning( disable: 4814 ) // in C++14 'constexpr' will not imply 'const' -#endif - -// Presence of language and library features: - -#define optional_HAVE(FEATURE) ( optional_HAVE_##FEATURE ) - -#ifdef _HAS_CPP0X -# define optional_HAS_CPP0X _HAS_CPP0X -#else -# define optional_HAS_CPP0X 0 -#endif - -// Unless defined otherwise below, consider VC14 as C++11 for optional-lite: - -#if optional_COMPILER_MSVC_VER >= 1900 -# undef optional_CPP11_OR_GREATER -# define optional_CPP11_OR_GREATER 1 -#endif - -#define optional_CPP11_90 (optional_CPP11_OR_GREATER_ || optional_COMPILER_MSVC_VER >= 1500) -#define optional_CPP11_100 (optional_CPP11_OR_GREATER_ || optional_COMPILER_MSVC_VER >= 1600) -#define optional_CPP11_110 (optional_CPP11_OR_GREATER_ || optional_COMPILER_MSVC_VER >= 1700) -#define optional_CPP11_120 (optional_CPP11_OR_GREATER_ || optional_COMPILER_MSVC_VER >= 1800) -#define optional_CPP11_140 (optional_CPP11_OR_GREATER_ || optional_COMPILER_MSVC_VER >= 1900) -#define optional_CPP11_141 (optional_CPP11_OR_GREATER_ || optional_COMPILER_MSVC_VER >= 1910) - -#define optional_CPP11_140_490 ((optional_CPP11_OR_GREATER_ && optional_COMPILER_GNUC_VERSION >= 490) || (optional_COMPILER_MSVC_VER >= 1910)) - -#define optional_CPP14_000 (optional_CPP14_OR_GREATER) -#define optional_CPP17_000 (optional_CPP17_OR_GREATER) - -// Presence of C++11 language features: - -#define optional_HAVE_CONSTEXPR_11 optional_CPP11_140 -#define optional_HAVE_IS_DEFAULT optional_CPP11_140 -#define optional_HAVE_NOEXCEPT optional_CPP11_140 -#define optional_HAVE_NULLPTR optional_CPP11_100 -#define optional_HAVE_REF_QUALIFIER optional_CPP11_140_490 -#define optional_HAVE_INITIALIZER_LIST optional_CPP11_140 - -// Presence of C++14 language features: - -#define optional_HAVE_CONSTEXPR_14 optional_CPP14_000 - -// Presence of C++17 language features: - -#define optional_HAVE_NODISCARD optional_CPP17_000 - -// Presence of C++ library features: - -#define optional_HAVE_CONDITIONAL optional_CPP11_120 -#define optional_HAVE_REMOVE_CV optional_CPP11_120 -#define optional_HAVE_TYPE_TRAITS optional_CPP11_90 - -#define optional_HAVE_TR1_TYPE_TRAITS (!! optional_COMPILER_GNUC_VERSION ) -#define optional_HAVE_TR1_ADD_POINTER (!! optional_COMPILER_GNUC_VERSION ) - -// C++ feature usage: - -#if optional_HAVE( CONSTEXPR_11 ) -# define optional_constexpr constexpr -#else -# define optional_constexpr /*constexpr*/ -#endif - -#if optional_HAVE( IS_DEFAULT ) -# define optional_is_default = default; -#else -# define optional_is_default {} -#endif - -#if optional_HAVE( CONSTEXPR_14 ) -# define optional_constexpr14 constexpr -#else -# define optional_constexpr14 /*constexpr*/ -#endif - -#if optional_HAVE( NODISCARD ) -# define optional_nodiscard [[nodiscard]] -#else -# define optional_nodiscard /*[[nodiscard]]*/ -#endif - -#if optional_HAVE( NOEXCEPT ) -# define optional_noexcept noexcept -#else -# define optional_noexcept /*noexcept*/ -#endif - -#if optional_HAVE( NULLPTR ) -# define optional_nullptr nullptr -#else -# define optional_nullptr NULL -#endif - -#if optional_HAVE( REF_QUALIFIER ) -// NOLINTNEXTLINE( bugprone-macro-parentheses ) -# define optional_ref_qual & -# define optional_refref_qual && -#else -# define optional_ref_qual /*&*/ -# define optional_refref_qual /*&&*/ -#endif - -// additional includes: - -#if optional_CONFIG_NO_EXCEPTIONS -// already included: -#else -# include -#endif - -#if optional_CPP11_OR_GREATER -# include -#endif - -#if optional_HAVE( INITIALIZER_LIST ) -# include -#endif - -#if optional_HAVE( TYPE_TRAITS ) -# include -#elif optional_HAVE( TR1_TYPE_TRAITS ) -# include -#endif - -// Method enabling - -#if optional_CPP11_OR_GREATER - -#define optional_REQUIRES_0(...) \ - template< bool B = (__VA_ARGS__), typename std::enable_if::type = 0 > - -#define optional_REQUIRES_T(...) \ - , typename std::enable_if< (__VA_ARGS__), int >::type = 0 - -#define optional_REQUIRES_R(R, ...) \ - typename std::enable_if< (__VA_ARGS__), R>::type - -#define optional_REQUIRES_A(...) \ - , typename std::enable_if< (__VA_ARGS__), void*>::type = nullptr - -#endif - -// -// optional: -// - -namespace nonstd { namespace optional_lite { - -namespace std11 { - -#if optional_CPP11_OR_GREATER - using std::move; -#else - template< typename T > T & move( T & t ) { return t; } -#endif - -#if optional_HAVE( CONDITIONAL ) - using std::conditional; -#else - template< bool B, typename T, typename F > struct conditional { typedef T type; }; - template< typename T, typename F > struct conditional { typedef F type; }; -#endif // optional_HAVE_CONDITIONAL - -// gcc < 5: -#if optional_CPP11_OR_GREATER -#if optional_BETWEEN( optional_COMPILER_GNUC_VERSION, 1, 500 ) - template< typename T > struct is_trivially_copy_constructible : std::true_type{}; - template< typename T > struct is_trivially_move_constructible : std::true_type{}; -#else - using std::is_trivially_copy_constructible; - using std::is_trivially_move_constructible; -#endif -#endif -} // namespace std11 - -#if optional_CPP11_OR_GREATER - -/// type traits C++17: - -namespace std17 { - -#if optional_CPP17_OR_GREATER - -using std::is_swappable; -using std::is_nothrow_swappable; - -#elif optional_CPP11_OR_GREATER - -namespace detail { - -using std::swap; - -struct is_swappable -{ - template< typename T, typename = decltype( swap( std::declval(), std::declval() ) ) > - static std::true_type test( int /*unused*/ ); - - template< typename > - static std::false_type test(...); -}; - -struct is_nothrow_swappable -{ - // wrap noexcept(expr) in separate function as work-around for VC140 (VS2015): - - template< typename T > - static constexpr bool satisfies() - { - return noexcept( swap( std::declval(), std::declval() ) ); - } - - template< typename T > - static auto test( int /*unused*/ ) -> std::integral_constant()>{} - - template< typename > - static auto test(...) -> std::false_type; -}; - -} // namespace detail - -// is [nothow] swappable: - -template< typename T > -struct is_swappable : decltype( detail::is_swappable::test(0) ){}; - -template< typename T > -struct is_nothrow_swappable : decltype( detail::is_nothrow_swappable::test(0) ){}; - -#endif // optional_CPP17_OR_GREATER - -} // namespace std17 - -/// type traits C++20: - -namespace std20 { - -template< typename T > -struct remove_cvref -{ - typedef typename std::remove_cv< typename std::remove_reference::type >::type type; -}; - -} // namespace std20 - -#endif // optional_CPP11_OR_GREATER - -/// class optional - -template< typename T > -class optional; - -namespace detail { - -// C++11 emulation: - -struct nulltype{}; - -template< typename Head, typename Tail > -struct typelist -{ - typedef Head head; - typedef Tail tail; -}; - -#if optional_CONFIG_MAX_ALIGN_HACK - -// Max align, use most restricted type for alignment: - -#define optional_UNIQUE( name ) optional_UNIQUE2( name, __LINE__ ) -#define optional_UNIQUE2( name, line ) optional_UNIQUE3( name, line ) -#define optional_UNIQUE3( name, line ) name ## line - -#define optional_ALIGN_TYPE( type ) \ - type optional_UNIQUE( _t ); struct_t< type > optional_UNIQUE( _st ) - -template< typename T > -struct struct_t { T _; }; - -union max_align_t -{ - optional_ALIGN_TYPE( char ); - optional_ALIGN_TYPE( short int ); - optional_ALIGN_TYPE( int ); - optional_ALIGN_TYPE( long int ); - optional_ALIGN_TYPE( float ); - optional_ALIGN_TYPE( double ); - optional_ALIGN_TYPE( long double ); - optional_ALIGN_TYPE( char * ); - optional_ALIGN_TYPE( short int * ); - optional_ALIGN_TYPE( int * ); - optional_ALIGN_TYPE( long int * ); - optional_ALIGN_TYPE( float * ); - optional_ALIGN_TYPE( double * ); - optional_ALIGN_TYPE( long double * ); - optional_ALIGN_TYPE( void * ); - -#ifdef HAVE_LONG_LONG - optional_ALIGN_TYPE( long long ); -#endif - - struct Unknown; - - Unknown ( * optional_UNIQUE(_) )( Unknown ); - Unknown * Unknown::* optional_UNIQUE(_); - Unknown ( Unknown::* optional_UNIQUE(_) )( Unknown ); - - struct_t< Unknown ( * )( Unknown) > optional_UNIQUE(_); - struct_t< Unknown * Unknown::* > optional_UNIQUE(_); - struct_t< Unknown ( Unknown::* )(Unknown) > optional_UNIQUE(_); -}; - -#undef optional_UNIQUE -#undef optional_UNIQUE2 -#undef optional_UNIQUE3 - -#undef optional_ALIGN_TYPE - -#elif defined( optional_CONFIG_ALIGN_AS ) // optional_CONFIG_MAX_ALIGN_HACK - -// Use user-specified type for alignment: - -#define optional_ALIGN_AS( unused ) \ - optional_CONFIG_ALIGN_AS - -#else // optional_CONFIG_MAX_ALIGN_HACK - -// Determine POD type to use for alignment: - -#define optional_ALIGN_AS( to_align ) \ - typename type_of_size< alignment_types, alignment_of< to_align >::value >::type - -template< typename T > -struct alignment_of; - -template< typename T > -struct alignment_of_hack -{ - char c; - T t; - alignment_of_hack(); -}; - -template< size_t A, size_t S > -struct alignment_logic -{ - enum { value = A < S ? A : S }; -}; - -template< typename T > -struct alignment_of -{ - enum { value = alignment_logic< - sizeof( alignment_of_hack ) - sizeof(T), sizeof(T) >::value }; -}; - -template< typename List, size_t N > -struct type_of_size -{ - typedef typename std11::conditional< - N == sizeof( typename List::head ), - typename List::head, - typename type_of_size::type >::type type; -}; - -template< size_t N > -struct type_of_size< nulltype, N > -{ - typedef optional_CONFIG_ALIGN_AS_FALLBACK type; -}; - -template< typename T> -struct struct_t { T _; }; - -#define optional_ALIGN_TYPE( type ) \ - typelist< type , typelist< struct_t< type > - -struct Unknown; - -typedef - optional_ALIGN_TYPE( char ), - optional_ALIGN_TYPE( short ), - optional_ALIGN_TYPE( int ), - optional_ALIGN_TYPE( long ), - optional_ALIGN_TYPE( float ), - optional_ALIGN_TYPE( double ), - optional_ALIGN_TYPE( long double ), - - optional_ALIGN_TYPE( char *), - optional_ALIGN_TYPE( short * ), - optional_ALIGN_TYPE( int * ), - optional_ALIGN_TYPE( long * ), - optional_ALIGN_TYPE( float * ), - optional_ALIGN_TYPE( double * ), - optional_ALIGN_TYPE( long double * ), - - optional_ALIGN_TYPE( Unknown ( * )( Unknown ) ), - optional_ALIGN_TYPE( Unknown * Unknown::* ), - optional_ALIGN_TYPE( Unknown ( Unknown::* )( Unknown ) ), - - nulltype - > > > > > > > > > > > > > > - > > > > > > > > > > > > > > - > > > > > > - alignment_types; - -#undef optional_ALIGN_TYPE - -#endif // optional_CONFIG_MAX_ALIGN_HACK - -/// C++03 constructed union to hold value. - -template< typename T > -union storage_t -{ -//private: -// template< typename > friend class optional; - - typedef T value_type; - - storage_t() optional_is_default - - explicit storage_t( value_type const & v ) - { - construct_value( v ); - } - - void construct_value( value_type const & v ) - { - ::new( value_ptr() ) value_type( v ); - } - -#if optional_CPP11_OR_GREATER - - explicit storage_t( value_type && v ) - { - construct_value( std::move( v ) ); - } - - void construct_value( value_type && v ) - { - ::new( value_ptr() ) value_type( std::move( v ) ); - } - - template< class... Args > - void emplace( Args&&... args ) - { - ::new( value_ptr() ) value_type( std::forward(args)... ); - } - - template< class U, class... Args > - void emplace( std::initializer_list il, Args&&... args ) - { - ::new( value_ptr() ) value_type( il, std::forward(args)... ); - } - -#endif - - void destruct_value() - { - value_ptr()->~T(); - } - - optional_nodiscard value_type const * value_ptr() const - { - return as(); - } - - value_type * value_ptr() - { - return as(); - } - - optional_nodiscard value_type const & value() const optional_ref_qual - { - return * value_ptr(); - } - - value_type & value() optional_ref_qual - { - return * value_ptr(); - } - -#if optional_HAVE( REF_QUALIFIER ) - - optional_nodiscard value_type const && value() const optional_refref_qual - { - return std::move( value() ); - } - - value_type && value() optional_refref_qual - { - return std::move( value() ); - } - -#endif - -#if optional_CPP11_OR_GREATER - - using aligned_storage_t = typename std::aligned_storage< sizeof(value_type), alignof(value_type) >::type; - aligned_storage_t data; - -#elif optional_CONFIG_MAX_ALIGN_HACK - - typedef struct { unsigned char data[ sizeof(value_type) ]; } aligned_storage_t; - - max_align_t hack; - aligned_storage_t data; - -#else - typedef optional_ALIGN_AS(value_type) align_as_type; - - typedef struct { align_as_type data[ 1 + ( sizeof(value_type) - 1 ) / sizeof(align_as_type) ]; } aligned_storage_t; - aligned_storage_t data; - -# undef optional_ALIGN_AS - -#endif // optional_CONFIG_MAX_ALIGN_HACK - - optional_nodiscard void * ptr() optional_noexcept - { - return &data; - } - - optional_nodiscard void const * ptr() const optional_noexcept - { - return &data; - } - - template - optional_nodiscard U * as() - { - return reinterpret_cast( ptr() ); - } - - template - optional_nodiscard U const * as() const - { - return reinterpret_cast( ptr() ); - } -}; - -} // namespace detail - -/// disengaged state tag - -struct nullopt_t -{ - struct init{}; - explicit optional_constexpr nullopt_t( init /*unused*/ ) optional_noexcept {} -}; - -#if optional_HAVE( CONSTEXPR_11 ) -constexpr nullopt_t nullopt{ nullopt_t::init{} }; -#else -// extra parenthesis to prevent the most vexing parse: -const nullopt_t nullopt(( nullopt_t::init() )); -#endif - -/// optional access error - -#if ! optional_CONFIG_NO_EXCEPTIONS - -class bad_optional_access : public std::logic_error -{ -public: - explicit bad_optional_access() - : logic_error( "bad optional access" ) {} -}; - -#endif //optional_CONFIG_NO_EXCEPTIONS - -/// optional - -template< typename T> -class optional -{ -private: - template< typename > friend class optional; - - typedef void (optional::*safe_bool)() const; - -public: - typedef T value_type; - - // x.x.3.1, constructors - - // 1a - default construct - optional_constexpr optional() optional_noexcept - : has_value_( false ) - , contained() - {} - - // 1b - construct explicitly empty - // NOLINTNEXTLINE( google-explicit-constructor, hicpp-explicit-conversions ) - optional_constexpr optional( nullopt_t /*unused*/ ) optional_noexcept - : has_value_( false ) - , contained() - {} - - // 2 - copy-construct -#if optional_CPP11_OR_GREATER - // template< typename U = T - // optional_REQUIRES_T( - // std::is_copy_constructible::value - // || std11::is_trivially_copy_constructible::value - // ) - // > -#endif - optional_constexpr14 optional( optional const & other ) - : has_value_( other.has_value() ) - { - if ( other.has_value() ) - { - contained.construct_value( other.contained.value() ); - } - } - -#if optional_CPP11_OR_GREATER - - // 3 (C++11) - move-construct from optional - template< typename U = T - optional_REQUIRES_T( - std::is_move_constructible::value - || std11::is_trivially_move_constructible::value - ) - > - optional_constexpr14 optional( optional && other ) - // NOLINTNEXTLINE( performance-noexcept-move-constructor ) - noexcept( std::is_nothrow_move_constructible::value ) - : has_value_( other.has_value() ) - { - if ( other.has_value() ) - { - contained.construct_value( std::move( other.contained.value() ) ); - } - } - - // 4a (C++11) - explicit converting copy-construct from optional - template< typename U - optional_REQUIRES_T( - std::is_constructible::value - && !std::is_constructible & >::value - && !std::is_constructible && >::value - && !std::is_constructible const & >::value - && !std::is_constructible const && >::value - && !std::is_convertible< optional & , T>::value - && !std::is_convertible< optional && , T>::value - && !std::is_convertible< optional const & , T>::value - && !std::is_convertible< optional const &&, T>::value - && !std::is_convertible< U const & , T>::value /*=> explicit */ - ) - > - explicit optional( optional const & other ) - : has_value_( other.has_value() ) - { - if ( other.has_value() ) - { - contained.construct_value( T{ other.contained.value() } ); - } - } -#endif // optional_CPP11_OR_GREATER - - // 4b (C++98 and later) - non-explicit converting copy-construct from optional - template< typename U -#if optional_CPP11_OR_GREATER - optional_REQUIRES_T( - std::is_constructible::value - && !std::is_constructible & >::value - && !std::is_constructible && >::value - && !std::is_constructible const & >::value - && !std::is_constructible const && >::value - && !std::is_convertible< optional & , T>::value - && !std::is_convertible< optional && , T>::value - && !std::is_convertible< optional const & , T>::value - && !std::is_convertible< optional const &&, T>::value - && std::is_convertible< U const & , T>::value /*=> non-explicit */ - ) -#endif // optional_CPP11_OR_GREATER - > - // NOLINTNEXTLINE( google-explicit-constructor, hicpp-explicit-conversions ) - /*non-explicit*/ optional( optional const & other ) - : has_value_( other.has_value() ) - { - if ( other.has_value() ) - { - contained.construct_value( other.contained.value() ); - } - } - -#if optional_CPP11_OR_GREATER - - // 5a (C++11) - explicit converting move-construct from optional - template< typename U - optional_REQUIRES_T( - std::is_constructible::value - && !std::is_constructible & >::value - && !std::is_constructible && >::value - && !std::is_constructible const & >::value - && !std::is_constructible const && >::value - && !std::is_convertible< optional & , T>::value - && !std::is_convertible< optional && , T>::value - && !std::is_convertible< optional const & , T>::value - && !std::is_convertible< optional const &&, T>::value - && !std::is_convertible< U &&, T>::value /*=> explicit */ - ) - > - explicit optional( optional && other - ) - : has_value_( other.has_value() ) - { - if ( other.has_value() ) - { - contained.construct_value( T{ std::move( other.contained.value() ) } ); - } - } - - // 5a (C++11) - non-explicit converting move-construct from optional - template< typename U - optional_REQUIRES_T( - std::is_constructible::value - && !std::is_constructible & >::value - && !std::is_constructible && >::value - && !std::is_constructible const & >::value - && !std::is_constructible const && >::value - && !std::is_convertible< optional & , T>::value - && !std::is_convertible< optional && , T>::value - && !std::is_convertible< optional const & , T>::value - && !std::is_convertible< optional const &&, T>::value - && std::is_convertible< U &&, T>::value /*=> non-explicit */ - ) - > - // NOLINTNEXTLINE( google-explicit-constructor, hicpp-explicit-conversions ) - /*non-explicit*/ optional( optional && other ) - : has_value_( other.has_value() ) - { - if ( other.has_value() ) - { - contained.construct_value( std::move( other.contained.value() ) ); - } - } - - // 6 (C++11) - in-place construct - template< typename... Args - optional_REQUIRES_T( - std::is_constructible::value - ) - > - optional_constexpr explicit optional( nonstd_lite_in_place_t(T), Args&&... args ) - : has_value_( true ) - , contained( T( std::forward(args)...) ) - {} - - // 7 (C++11) - in-place construct, initializer-list - template< typename U, typename... Args - optional_REQUIRES_T( - std::is_constructible&, Args&&...>::value - ) - > - optional_constexpr explicit optional( nonstd_lite_in_place_t(T), std::initializer_list il, Args&&... args ) - : has_value_( true ) - , contained( T( il, std::forward(args)...) ) - {} - - // 8a (C++11) - explicit move construct from value - template< typename U = T - optional_REQUIRES_T( - std::is_constructible::value - && !std::is_same::type, nonstd_lite_in_place_t(U)>::value - && !std::is_same::type, optional>::value - && !std::is_convertible::value /*=> explicit */ - ) - > - optional_constexpr explicit optional( U && value ) - : has_value_( true ) - , contained( T{ std::forward( value ) } ) - {} - - // 8b (C++11) - non-explicit move construct from value - template< typename U = T - optional_REQUIRES_T( - std::is_constructible::value - && !std::is_same::type, nonstd_lite_in_place_t(U)>::value - && !std::is_same::type, optional>::value - && std::is_convertible::value /*=> non-explicit */ - ) - > - // NOLINTNEXTLINE( google-explicit-constructor, hicpp-explicit-conversions ) - optional_constexpr /*non-explicit*/ optional( U && value ) - : has_value_( true ) - , contained( std::forward( value ) ) - {} - -#else // optional_CPP11_OR_GREATER - - // 8 (C++98) - optional( value_type const & value ) - : has_value_( true ) - , contained( value ) - {} - -#endif // optional_CPP11_OR_GREATER - - // x.x.3.2, destructor - - ~optional() - { - if ( has_value() ) - { - contained.destruct_value(); - } - } - - // x.x.3.3, assignment - - // 1 (C++98and later) - assign explicitly empty - optional & operator=( nullopt_t /*unused*/) optional_noexcept - { - reset(); - return *this; - } - - // 2 (C++98and later) - copy-assign from optional -#if optional_CPP11_OR_GREATER - // NOLINTNEXTLINE( cppcoreguidelines-c-copy-assignment-signature, misc-unconventional-assign-operator ) - optional_REQUIRES_R( - optional &, - true -// std::is_copy_constructible::value -// && std::is_copy_assignable::value - ) - operator=( optional const & other ) - noexcept( - std::is_nothrow_move_assignable::value - && std::is_nothrow_move_constructible::value - ) -#else - optional & operator=( optional const & other ) -#endif - { - if ( (has_value() == true ) && (other.has_value() == false) ) { reset(); } - else if ( (has_value() == false) && (other.has_value() == true ) ) { initialize( *other ); } - else if ( (has_value() == true ) && (other.has_value() == true ) ) { contained.value() = *other; } - return *this; - } - -#if optional_CPP11_OR_GREATER - - // 3 (C++11) - move-assign from optional - // NOLINTNEXTLINE( cppcoreguidelines-c-copy-assignment-signature, misc-unconventional-assign-operator ) - optional_REQUIRES_R( - optional &, - true -// std::is_move_constructible::value -// && std::is_move_assignable::value - ) - operator=( optional && other ) noexcept - { - if ( (has_value() == true ) && (other.has_value() == false) ) { reset(); } - else if ( (has_value() == false) && (other.has_value() == true ) ) { initialize( std::move( *other ) ); } - else if ( (has_value() == true ) && (other.has_value() == true ) ) { contained.value() = std::move( *other ); } - return *this; - } - - // 4 (C++11) - move-assign from value - template< typename U = T > - // NOLINTNEXTLINE( cppcoreguidelines-c-copy-assignment-signature, misc-unconventional-assign-operator ) - optional_REQUIRES_R( - optional &, - std::is_constructible::value - && std::is_assignable::value - && !std::is_same::type, nonstd_lite_in_place_t(U)>::value - && !std::is_same::type, optional>::value - && !(std::is_scalar::value && std::is_same::type>::value) - ) - operator=( U && value ) - { - if ( has_value() ) - { - contained.value() = std::forward( value ); - } - else - { - initialize( T( std::forward( value ) ) ); - } - return *this; - } - -#else // optional_CPP11_OR_GREATER - - // 4 (C++98) - copy-assign from value - template< typename U /*= T*/ > - optional & operator=( U const & value ) - { - if ( has_value() ) contained.value() = value; - else initialize( T( value ) ); - return *this; - } - -#endif // optional_CPP11_OR_GREATER - - // 5 (C++98 and later) - converting copy-assign from optional - template< typename U > -#if optional_CPP11_OR_GREATER - // NOLINTNEXTLINE( cppcoreguidelines-c-copy-assignment-signature, misc-unconventional-assign-operator ) - optional_REQUIRES_R( - optional&, - std::is_constructible< T , U const &>::value - && std::is_assignable< T&, U const &>::value - && !std::is_constructible & >::value - && !std::is_constructible && >::value - && !std::is_constructible const & >::value - && !std::is_constructible const && >::value - && !std::is_convertible< optional & , T>::value - && !std::is_convertible< optional && , T>::value - && !std::is_convertible< optional const & , T>::value - && !std::is_convertible< optional const &&, T>::value - && !std::is_assignable< T&, optional & >::value - && !std::is_assignable< T&, optional && >::value - && !std::is_assignable< T&, optional const & >::value - && !std::is_assignable< T&, optional const && >::value - ) -#else - optional& -#endif // optional_CPP11_OR_GREATER - operator=( optional const & other ) - { - return *this = optional( other ); - } - -#if optional_CPP11_OR_GREATER - - // 6 (C++11) - converting move-assign from optional - template< typename U > - // NOLINTNEXTLINE( cppcoreguidelines-c-copy-assignment-signature, misc-unconventional-assign-operator ) - optional_REQUIRES_R( - optional&, - std::is_constructible< T , U>::value - && std::is_assignable< T&, U>::value - && !std::is_constructible & >::value - && !std::is_constructible && >::value - && !std::is_constructible const & >::value - && !std::is_constructible const && >::value - && !std::is_convertible< optional & , T>::value - && !std::is_convertible< optional && , T>::value - && !std::is_convertible< optional const & , T>::value - && !std::is_convertible< optional const &&, T>::value - && !std::is_assignable< T&, optional & >::value - && !std::is_assignable< T&, optional && >::value - && !std::is_assignable< T&, optional const & >::value - && !std::is_assignable< T&, optional const && >::value - ) - operator=( optional && other ) - { - return *this = optional( std::move( other ) ); - } - - // 7 (C++11) - emplace - template< typename... Args - optional_REQUIRES_T( - std::is_constructible::value - ) - > - T& emplace( Args&&... args ) - { - *this = nullopt; - contained.emplace( std::forward(args)... ); - has_value_ = true; - return contained.value(); - } - - // 8 (C++11) - emplace, initializer-list - template< typename U, typename... Args - optional_REQUIRES_T( - std::is_constructible&, Args&&...>::value - ) - > - T& emplace( std::initializer_list il, Args&&... args ) - { - *this = nullopt; - contained.emplace( il, std::forward(args)... ); - has_value_ = true; - return contained.value(); - } - -#endif // optional_CPP11_OR_GREATER - - // x.x.3.4, swap - - void swap( optional & other ) -#if optional_CPP11_OR_GREATER - noexcept( - std::is_nothrow_move_constructible::value - && std17::is_nothrow_swappable::value - ) -#endif - { - using std::swap; - if ( (has_value() == true ) && (other.has_value() == true ) ) { swap( **this, *other ); } - else if ( (has_value() == false) && (other.has_value() == true ) ) { initialize( std11::move(*other) ); other.reset(); } - else if ( (has_value() == true ) && (other.has_value() == false) ) { other.initialize( std11::move(**this) ); reset(); } - } - - // x.x.3.5, observers - - optional_constexpr value_type const * operator ->() const - { - return assert( has_value() ), - contained.value_ptr(); - } - - optional_constexpr14 value_type * operator ->() - { - return assert( has_value() ), - contained.value_ptr(); - } - - optional_constexpr value_type const & operator *() const optional_ref_qual - { - return assert( has_value() ), - contained.value(); - } - - optional_constexpr14 value_type & operator *() optional_ref_qual - { - return assert( has_value() ), - contained.value(); - } - -#if optional_HAVE( REF_QUALIFIER ) - - optional_constexpr value_type const && operator *() const optional_refref_qual - { - return std::move( **this ); - } - - optional_constexpr14 value_type && operator *() optional_refref_qual - { - return std::move( **this ); - } - -#endif - -#if optional_CPP11_OR_GREATER - optional_constexpr explicit operator bool() const optional_noexcept - { - return has_value(); - } -#else - optional_constexpr operator safe_bool() const optional_noexcept - { - return has_value() ? &optional::this_type_does_not_support_comparisons : 0; - } -#endif - - // NOLINTNEXTLINE( modernize-use-nodiscard ) - /*optional_nodiscard*/ optional_constexpr bool has_value() const optional_noexcept - { - return has_value_; - } - - // NOLINTNEXTLINE( modernize-use-nodiscard ) - /*optional_nodiscard*/ optional_constexpr14 value_type const & value() const optional_ref_qual - { -#if optional_CONFIG_NO_EXCEPTIONS - assert( has_value() ); -#else - if ( ! has_value() ) - { - throw bad_optional_access(); - } -#endif - return contained.value(); - } - - optional_constexpr14 value_type & value() optional_ref_qual - { -#if optional_CONFIG_NO_EXCEPTIONS - assert( has_value() ); -#else - if ( ! has_value() ) - { - throw bad_optional_access(); - } -#endif - return contained.value(); - } - -#if optional_HAVE( REF_QUALIFIER ) && ( !optional_COMPILER_GNUC_VERSION || optional_COMPILER_GNUC_VERSION >= 490 ) - - // NOLINTNEXTLINE( modernize-use-nodiscard ) - /*optional_nodiscard*/ optional_constexpr value_type const && value() const optional_refref_qual - { - return std::move( value() ); - } - - optional_constexpr14 value_type && value() optional_refref_qual - { - return std::move( value() ); - } - -#endif - -#if optional_CPP11_OR_GREATER - - template< typename U > - optional_constexpr value_type value_or( U && v ) const optional_ref_qual - { - return has_value() ? contained.value() : static_cast(std::forward( v ) ); - } - - template< typename U > - optional_constexpr14 value_type value_or( U && v ) optional_refref_qual - { - return has_value() ? std::move( contained.value() ) : static_cast(std::forward( v ) ); - } - -#else - - template< typename U > - optional_constexpr value_type value_or( U const & v ) const - { - return has_value() ? contained.value() : static_cast( v ); - } - -#endif // optional_CPP11_OR_GREATER - - // x.x.3.6, modifiers - - void reset() optional_noexcept - { - if ( has_value() ) - { - contained.destruct_value(); - } - - has_value_ = false; - } - -private: - void this_type_does_not_support_comparisons() const {} - - template< typename V > - void initialize( V const & value ) - { - assert( ! has_value() ); - contained.construct_value( value ); - has_value_ = true; - } - -#if optional_CPP11_OR_GREATER - template< typename V > - void initialize( V && value ) - { - assert( ! has_value() ); - contained.construct_value( std::move( value ) ); - has_value_ = true; - } - -#endif - -private: - bool has_value_; - detail::storage_t< value_type > contained; - -}; - -// Relational operators - -template< typename T, typename U > -inline optional_constexpr bool operator==( optional const & x, optional const & y ) -{ - return bool(x) != bool(y) ? false : !bool( x ) ? true : *x == *y; -} - -template< typename T, typename U > -inline optional_constexpr bool operator!=( optional const & x, optional const & y ) -{ - return !(x == y); -} - -template< typename T, typename U > -inline optional_constexpr bool operator<( optional const & x, optional const & y ) -{ - return (!y) ? false : (!x) ? true : *x < *y; -} - -template< typename T, typename U > -inline optional_constexpr bool operator>( optional const & x, optional const & y ) -{ - return (y < x); -} - -template< typename T, typename U > -inline optional_constexpr bool operator<=( optional const & x, optional const & y ) -{ - return !(y < x); -} - -template< typename T, typename U > -inline optional_constexpr bool operator>=( optional const & x, optional const & y ) -{ - return !(x < y); -} - -// Comparison with nullopt - -template< typename T > -inline optional_constexpr bool operator==( optional const & x, nullopt_t /*unused*/ ) optional_noexcept -{ - return (!x); -} - -template< typename T > -inline optional_constexpr bool operator==( nullopt_t /*unused*/, optional const & x ) optional_noexcept -{ - return (!x); -} - -template< typename T > -inline optional_constexpr bool operator!=( optional const & x, nullopt_t /*unused*/ ) optional_noexcept -{ - return bool(x); -} - -template< typename T > -inline optional_constexpr bool operator!=( nullopt_t /*unused*/, optional const & x ) optional_noexcept -{ - return bool(x); -} - -template< typename T > -inline optional_constexpr bool operator<( optional const & /*unused*/, nullopt_t /*unused*/ ) optional_noexcept -{ - return false; -} - -template< typename T > -inline optional_constexpr bool operator<( nullopt_t /*unused*/, optional const & x ) optional_noexcept -{ - return bool(x); -} - -template< typename T > -inline optional_constexpr bool operator<=( optional const & x, nullopt_t /*unused*/ ) optional_noexcept -{ - return (!x); -} - -template< typename T > -inline optional_constexpr bool operator<=( nullopt_t /*unused*/, optional const & /*unused*/ ) optional_noexcept -{ - return true; -} - -template< typename T > -inline optional_constexpr bool operator>( optional const & x, nullopt_t /*unused*/ ) optional_noexcept -{ - return bool(x); -} - -template< typename T > -inline optional_constexpr bool operator>( nullopt_t /*unused*/, optional const & /*unused*/ ) optional_noexcept -{ - return false; -} - -template< typename T > -inline optional_constexpr bool operator>=( optional const & /*unused*/, nullopt_t /*unused*/ ) optional_noexcept -{ - return true; -} - -template< typename T > -inline optional_constexpr bool operator>=( nullopt_t /*unused*/, optional const & x ) optional_noexcept -{ - return (!x); -} - -// Comparison with T - -template< typename T, typename U > -inline optional_constexpr bool operator==( optional const & x, U const & v ) -{ - return bool(x) ? *x == v : false; -} - -template< typename T, typename U > -inline optional_constexpr bool operator==( U const & v, optional const & x ) -{ - return bool(x) ? v == *x : false; -} - -template< typename T, typename U > -inline optional_constexpr bool operator!=( optional const & x, U const & v ) -{ - return bool(x) ? *x != v : true; -} - -template< typename T, typename U > -inline optional_constexpr bool operator!=( U const & v, optional const & x ) -{ - return bool(x) ? v != *x : true; -} - -template< typename T, typename U > -inline optional_constexpr bool operator<( optional const & x, U const & v ) -{ - return bool(x) ? *x < v : true; -} - -template< typename T, typename U > -inline optional_constexpr bool operator<( U const & v, optional const & x ) -{ - return bool(x) ? v < *x : false; -} - -template< typename T, typename U > -inline optional_constexpr bool operator<=( optional const & x, U const & v ) -{ - return bool(x) ? *x <= v : true; -} - -template< typename T, typename U > -inline optional_constexpr bool operator<=( U const & v, optional const & x ) -{ - return bool(x) ? v <= *x : false; -} - -template< typename T, typename U > -inline optional_constexpr bool operator>( optional const & x, U const & v ) -{ - return bool(x) ? *x > v : false; -} - -template< typename T, typename U > -inline optional_constexpr bool operator>( U const & v, optional const & x ) -{ - return bool(x) ? v > *x : true; -} - -template< typename T, typename U > -inline optional_constexpr bool operator>=( optional const & x, U const & v ) -{ - return bool(x) ? *x >= v : false; -} - -template< typename T, typename U > -inline optional_constexpr bool operator>=( U const & v, optional const & x ) -{ - return bool(x) ? v >= *x : true; -} - -// Specialized algorithms - -template< typename T -#if optional_CPP11_OR_GREATER - optional_REQUIRES_T( - std::is_move_constructible::value - && std17::is_swappable::value ) -#endif -> -void swap( optional & x, optional & y ) -#if optional_CPP11_OR_GREATER - noexcept( noexcept( x.swap(y) ) ) -#endif -{ - x.swap( y ); -} - -#if optional_CPP11_OR_GREATER - -template< typename T > -optional_constexpr optional< typename std::decay::type > make_optional( T && value ) -{ - return optional< typename std::decay::type >( std::forward( value ) ); -} - -template< typename T, typename...Args > -optional_constexpr optional make_optional( Args&&... args ) -{ - return optional( nonstd_lite_in_place(T), std::forward(args)...); -} - -template< typename T, typename U, typename... Args > -optional_constexpr optional make_optional( std::initializer_list il, Args&&... args ) -{ - return optional( nonstd_lite_in_place(T), il, std::forward(args)...); -} - -#else - -template< typename T > -optional make_optional( T const & value ) -{ - return optional( value ); -} - -#endif // optional_CPP11_OR_GREATER - -} // namespace optional_lite - -using optional_lite::optional; -using optional_lite::nullopt_t; -using optional_lite::nullopt; - -#if ! optional_CONFIG_NO_EXCEPTIONS -using optional_lite::bad_optional_access; -#endif - -using optional_lite::make_optional; - -} // namespace nonstd - -#if optional_CPP11_OR_GREATER - -// specialize the std::hash algorithm: - -namespace std { - -template< class T > -struct hash< nonstd::optional > -{ -public: - std::size_t operator()( nonstd::optional const & v ) const optional_noexcept - { - return bool( v ) ? std::hash{}( *v ) : 0; - } -}; - -} //namespace std - -#endif // optional_CPP11_OR_GREATER - -#if defined(__clang__) -# pragma clang diagnostic pop -#elif defined(__GNUC__) -# pragma GCC diagnostic pop -#elif defined(_MSC_VER ) -# pragma warning( pop ) -#endif - -#endif // optional_USES_STD_OPTIONAL - - -namespace tf { namespace nstd { - -template -using optional = nonstd::optional; - -}} diff --git a/bundled/taskflow-2.5.0/include/taskflow/nstd/variant.hpp b/bundled/taskflow-2.5.0/include/taskflow/nstd/variant.hpp deleted file mode 100644 index c07ee2057d..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/nstd/variant.hpp +++ /dev/null @@ -1,2631 +0,0 @@ -// The file is modified from MPark in order to use variant in C++11/14 -// We change the namespace to stick with the v1.4, in order to avoid clashing -// problem when upstream code uses the same library. - -// MPark.Variant -// -// Copyright Michael Park, 2015-2017 -// -// Distributed under the Boost Software License, Version 1.0. -// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -// MPark.Variant -// -// Copyright Michael Park, 2015-2017 -// -// Distributed under the Boost Software License, Version 1.0. -// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) - -// MSVC 2015 Update 3. -#if __cplusplus < 201103L && (!defined(_MSC_VER) || _MSC_FULL_VER < 190024210) -#error "MPark.Variant requires C++11 support." -#endif - -#ifndef __has_attribute -#define __has_attribute(x) 0 -#endif - -#ifndef __has_builtin -#define __has_builtin(x) 0 -#endif - -#ifndef __has_include -#define __has_include(x) 0 -#endif - -#ifndef __has_feature -#define __has_feature(x) 0 -#endif - -#if __has_attribute(always_inline) || defined(__GNUC__) -#define TF_ALWAYS_INLINE __attribute__((__always_inline__)) inline -#elif defined(_MSC_VER) -#define TF_ALWAYS_INLINE __forceinline -#else -#define TF_ALWAYS_INLINE inline -#endif - -#if __has_builtin(__builtin_addressof) || \ - (defined(__GNUC__) && __GNUC__ >= 7) || defined(_MSC_VER) -#define TF_BUILTIN_ADDRESSOF -#endif - -#if __has_builtin(__builtin_unreachable) || defined(__GNUC__) -#define TF_BUILTIN_UNREACHABLE __builtin_unreachable() -#elif defined(_MSC_VER) -#define TF_BUILTIN_UNREACHABLE __assume(false) -#else -#define TF_BUILTIN_UNREACHABLE -#endif - -#if __has_builtin(__type_pack_element) -#define TF_TYPE_PACK_ELEMENT -#endif - -#if defined(__cpp_constexpr) && __cpp_constexpr >= 200704 && \ - !(defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 9) -#define TF_CPP11_CONSTEXPR -#endif - -#if defined(__cpp_constexpr) && __cpp_constexpr >= 201304 -#define TF_CPP14_CONSTEXPR -#endif - -#if __has_feature(cxx_exceptions) || defined(__cpp_exceptions) || \ - (defined(_MSC_VER) && defined(_CPPUNWIND)) -#define TF_EXCEPTIONS -#endif - -#if defined(__cpp_generic_lambdas) || defined(_MSC_VER) -#define TF_GENERIC_LAMBDAS -#endif - -#if defined(__cpp_lib_integer_sequence) -#define TF_INTEGER_SEQUENCE -#endif - -#if (defined(__cpp_decltype_auto) && defined(__cpp_return_type_deduction)) || defined(_MSC_VER) -#define TF_RETURN_TYPE_DEDUCTION -#endif - -#if defined(__cpp_lib_transparent_operators) || defined(_MSC_VER) -#define TF_TRANSPARENT_OPERATORS -#endif - -#if defined(__cpp_variable_templates) || defined(_MSC_VER) -#define TF_VARIABLE_TEMPLATES -#endif - -#if !defined(__GLIBCXX__) || __has_include() // >= libstdc++-5 -#define TF_TRIVIALITY_TYPE_TRAITS -#define TF_INCOMPLETE_TYPE_TRAITS -#endif - -// MPark.Variant -// -// Copyright Michael Park, 2015-2017 -// -// Distributed under the Boost Software License, Version 1.0. -// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) - -// in_place_t definition -namespace tf { namespace nstd { - - struct in_place_t { explicit in_place_t() = default; }; - - template - struct in_place_index_t { explicit in_place_index_t() = default; }; - - template - struct in_place_type_t { explicit in_place_type_t() = default; }; - -#ifdef TF_VARIABLE_TEMPLATES - constexpr in_place_t in_place{}; - - template constexpr in_place_index_t in_place_index{}; - - template constexpr in_place_type_t in_place_type{}; -#endif - -}} // namespace tf::nstd - -// MPark.Variant -// -// Copyright Michael Park, 2015-2017 -// -// Distributed under the Boost Software License, Version 1.0. -// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) - - -#define TF_RETURN(...) \ - noexcept(noexcept(__VA_ARGS__)) -> decltype(__VA_ARGS__) { return __VA_ARGS__; } - -namespace tf { namespace nstd { - namespace lib { - template - struct identity { using type = T; }; - - inline namespace cpp14 { - template - struct array { - constexpr const T &operator[](std::size_t index) const { - return data[index]; - } - - T data[N == 0 ? 1 : N]; - }; - - template - using add_pointer_t = typename std::add_pointer::type; - - template - using common_type_t = typename std::common_type::type; - - template - using decay_t = typename std::decay::type; - - template - using enable_if_t = typename std::enable_if::type; - - template - using remove_const_t = typename std::remove_const::type; - - template - using remove_reference_t = typename std::remove_reference::type; - - template - using remove_cvref_t = - typename std::remove_cv>::type; - - template - inline constexpr T &&forward(remove_reference_t &t) noexcept { - return static_cast(t); - } - - template - inline constexpr T &&forward(remove_reference_t &&t) noexcept { - static_assert(!std::is_lvalue_reference::value, - "can not forward an rvalue as an lvalue"); - return static_cast(t); - } - - template - inline constexpr remove_reference_t &&move(T &&t) noexcept { - return static_cast &&>(t); - } - -#ifdef TF_INTEGER_SEQUENCE - using std::integer_sequence; - using std::index_sequence; - using std::make_index_sequence; - using std::index_sequence_for; -#else - template - struct integer_sequence { - using value_type = T; - static constexpr std::size_t size() noexcept { return sizeof...(Is); } - }; - - template - using index_sequence = integer_sequence; - - template - struct make_index_sequence_concat; - - template - struct make_index_sequence_concat, - index_sequence> - : identity> {}; - - template - struct make_index_sequence_impl; - - template - using make_index_sequence = typename make_index_sequence_impl::type; - - template - struct make_index_sequence_impl - : make_index_sequence_concat, - make_index_sequence> {}; - - template <> - struct make_index_sequence_impl<0> : identity> {}; - - template <> - struct make_index_sequence_impl<1> : identity> {}; - - template - using index_sequence_for = make_index_sequence; -#endif - - // -#ifdef TF_TRANSPARENT_OPERATORS - using equal_to = std::equal_to<>; -#else - struct equal_to { - template - inline constexpr auto operator()(Lhs &&lhs, Rhs &&rhs) const - TF_RETURN(lib::forward(lhs) == lib::forward(rhs)) - }; -#endif - -#ifdef TF_TRANSPARENT_OPERATORS - using not_equal_to = std::not_equal_to<>; -#else - struct not_equal_to { - template - inline constexpr auto operator()(Lhs &&lhs, Rhs &&rhs) const - TF_RETURN(lib::forward(lhs) != lib::forward(rhs)) - }; -#endif - -#ifdef TF_TRANSPARENT_OPERATORS - using less = std::less<>; -#else - struct less { - template - inline constexpr auto operator()(Lhs &&lhs, Rhs &&rhs) const - TF_RETURN(lib::forward(lhs) < lib::forward(rhs)) - }; -#endif - -#ifdef TF_TRANSPARENT_OPERATORS - using greater = std::greater<>; -#else - struct greater { - template - inline constexpr auto operator()(Lhs &&lhs, Rhs &&rhs) const - TF_RETURN(lib::forward(lhs) > lib::forward(rhs)) - }; -#endif - -#ifdef TF_TRANSPARENT_OPERATORS - using less_equal = std::less_equal<>; -#else - struct less_equal { - template - inline constexpr auto operator()(Lhs &&lhs, Rhs &&rhs) const - TF_RETURN(lib::forward(lhs) <= lib::forward(rhs)) - }; -#endif - -#ifdef TF_TRANSPARENT_OPERATORS - using greater_equal = std::greater_equal<>; -#else - struct greater_equal { - template - inline constexpr auto operator()(Lhs &&lhs, Rhs &&rhs) const - TF_RETURN(lib::forward(lhs) >= lib::forward(rhs)) - }; -#endif - } // namespace cpp14 - - inline namespace cpp17 { - - // - template - using bool_constant = std::integral_constant; - - template - struct voider : identity {}; - - template - using void_t = typename voider::type; - - namespace detail { - namespace swappable { - - using std::swap; - - template - struct is_swappable { - private: - template (), - std::declval()))> - inline static std::true_type test(int); - - template - inline static std::false_type test(...); - - public: - static constexpr bool value = decltype(test(0))::value; - }; - - template - struct is_nothrow_swappable { - static constexpr bool value = - noexcept(swap(std::declval(), std::declval())); - }; - - template - struct is_nothrow_swappable : std::false_type {}; - - } // namespace swappable - } // namespace detail - - using detail::swappable::is_swappable; - - template - using is_nothrow_swappable = - detail::swappable::is_nothrow_swappable::value, T>; - - // - namespace detail { - - template - struct is_reference_wrapper : std::false_type {}; - - template - struct is_reference_wrapper> - : std::true_type {}; - - template - struct Invoke; - - template <> - struct Invoke { - template - inline static constexpr auto invoke(R T::*pmf, Arg &&arg, Args &&... args) - TF_RETURN((lib::forward(arg).*pmf)(lib::forward(args)...)) - }; - - template <> - struct Invoke { - template - inline static constexpr auto invoke(R T::*pmf, Arg &&arg, Args &&... args) - TF_RETURN((lib::forward(arg).get().*pmf)(lib::forward(args)...)) - }; - - template <> - struct Invoke { - template - inline static constexpr auto invoke(R T::*pmf, Arg &&arg, Args &&... args) - TF_RETURN(((*lib::forward(arg)).*pmf)(lib::forward(args)...)) - }; - - template <> - struct Invoke { - template - inline static constexpr auto invoke(R T::*pmo, Arg &&arg) - TF_RETURN(lib::forward(arg).*pmo) - }; - - template <> - struct Invoke { - template - inline static constexpr auto invoke(R T::*pmo, Arg &&arg) - TF_RETURN(lib::forward(arg).get().*pmo) - }; - - template <> - struct Invoke { - template - inline static constexpr auto invoke(R T::*pmo, Arg &&arg) - TF_RETURN((*lib::forward(arg)).*pmo) - }; - - template - inline constexpr auto invoke(R T::*f, Arg &&arg, Args &&... args) - TF_RETURN( - Invoke::value, - (std::is_base_of>::value - ? 0 - : is_reference_wrapper>::value - ? 1 - : 2)>::invoke(f, - lib::forward(arg), - lib::forward(args)...)) - -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4100) -#endif - template - inline constexpr auto invoke(F &&f, Args &&... args) - TF_RETURN(lib::forward(f)(lib::forward(args)...)) -#ifdef _MSC_VER -#pragma warning(pop) -#endif - } // namespace detail - - template - inline constexpr auto invoke(F &&f, Args &&... args) - TF_RETURN(detail::invoke(lib::forward(f), - lib::forward(args)...)) - - namespace detail { - - template - struct invoke_result {}; - - template - struct invoke_result(), std::declval()...))>, - F, - Args...> - : identity(), std::declval()...))> {}; - - } // namespace detail - - template - using invoke_result = detail::invoke_result; - - template - using invoke_result_t = typename invoke_result::type; - - namespace detail { - - template - struct is_invocable : std::false_type {}; - - template - struct is_invocable>, F, Args...> - : std::true_type {}; - - template - struct is_invocable_r : std::false_type {}; - - template - struct is_invocable_r>, - R, - F, - Args...> - : std::is_convertible, R> {}; - - } // namespace detail - - template - using is_invocable = detail::is_invocable; - - template - using is_invocable_r = detail::is_invocable_r; - - // -#ifdef TF_BUILTIN_ADDRESSOF - template - inline constexpr T *addressof(T &arg) noexcept { - return __builtin_addressof(arg); - } -#else - namespace detail { - - namespace has_addressof_impl { - - struct fail; - - template - inline fail operator&(T &&); - - template - inline static constexpr bool impl() { - return (std::is_class::value || std::is_union::value) && - !std::is_same()), fail>::value; - } - - } // namespace has_addressof_impl - - template - using has_addressof = bool_constant()>; - - template - inline constexpr T *addressof(T &arg, std::true_type) noexcept { - return std::addressof(arg); - } - - template - inline constexpr T *addressof(T &arg, std::false_type) noexcept { - return &arg; - } - - } // namespace detail - - template - inline constexpr T *addressof(T &arg) noexcept { - return detail::addressof(arg, detail::has_addressof{}); - } -#endif - - template - inline constexpr T *addressof(const T &&) = delete; - - } // namespace cpp17 - - template - struct remove_all_extents : identity {}; - - template - struct remove_all_extents> : remove_all_extents {}; - - template - using remove_all_extents_t = typename remove_all_extents::type; - - template - using size_constant = std::integral_constant; - - template - struct indexed_type : size_constant { using type = T; }; - - template - using all = std::is_same, - integer_sequence>; - -#ifdef TF_TYPE_PACK_ELEMENT - template - using type_pack_element_t = __type_pack_element; -#else - template - struct type_pack_element_impl { - private: - template - struct set; - - template - struct set> : indexed_type... {}; - - template - inline static std::enable_if impl(indexed_type); - - inline static std::enable_if impl(...); - - public: - using type = decltype(impl(set>{})); - }; - - template - using type_pack_element = typename type_pack_element_impl::type; - - template - using type_pack_element_t = typename type_pack_element::type; -#endif - -#ifdef TF_TRIVIALITY_TYPE_TRAITS - using std::is_trivially_copy_constructible; - using std::is_trivially_move_constructible; - using std::is_trivially_copy_assignable; - using std::is_trivially_move_assignable; -#else - template - struct is_trivially_copy_constructible - : bool_constant< - std::is_copy_constructible::value && __has_trivial_copy(T)> {}; - - template - struct is_trivially_move_constructible : bool_constant<__is_trivial(T)> {}; - - template - struct is_trivially_copy_assignable - : bool_constant< - std::is_copy_assignable::value && __has_trivial_assign(T)> {}; - - template - struct is_trivially_move_assignable : bool_constant<__is_trivial(T)> {}; -#endif - - template - struct dependent_type : T {}; - - template - struct push_back; - - template - using push_back_t = typename push_back::type; - - template - struct push_back, J> { - using type = index_sequence; - }; - - } // namespace lib -}} // namespace tf::nstd - -#undef TF_RETURN - - -namespace tf { namespace nstd { - -#ifdef TF_RETURN_TYPE_DEDUCTION - -#define AUTO auto -#define AUTO_RETURN(...) { return __VA_ARGS__; } - -#define AUTO_REFREF auto && -#define AUTO_REFREF_RETURN(...) { return __VA_ARGS__; } - -#define DECLTYPE_AUTO decltype(auto) -#define DECLTYPE_AUTO_RETURN(...) { return __VA_ARGS__; } - -#else - -#define AUTO auto -#define AUTO_RETURN(...) \ - -> lib::decay_t { return __VA_ARGS__; } - -#define AUTO_REFREF auto -#define AUTO_REFREF_RETURN(...) \ - -> decltype((__VA_ARGS__)) { \ - static_assert(std::is_reference::value, ""); \ - return __VA_ARGS__; \ - } - -#define DECLTYPE_AUTO auto -#define DECLTYPE_AUTO_RETURN(...) \ - -> decltype(__VA_ARGS__) { return __VA_ARGS__; } - -#endif - - class bad_variant_access : public std::exception { - public: - virtual const char *what() const noexcept override { return "bad_variant_access"; } - }; - - [[noreturn]] inline void throw_bad_variant_access() { -#ifdef TF_EXCEPTIONS - throw bad_variant_access{}; -#else - std::terminate(); - TF_BUILTIN_UNREACHABLE; -#endif - } - - template - class variant; - - template - struct variant_size; - -#ifdef TF_VARIABLE_TEMPLATES - template - constexpr std::size_t variant_size_v = variant_size::value; -#endif - - template - struct variant_size : variant_size {}; - - template - struct variant_size : variant_size {}; - - template - struct variant_size : variant_size {}; - - template - struct variant_size> : lib::size_constant {}; - - template - struct variant_alternative; - - template - using variant_alternative_t = typename variant_alternative::type; - - template - struct variant_alternative - : std::add_const> {}; - - template - struct variant_alternative - : std::add_volatile> {}; - - template - struct variant_alternative - : std::add_cv> {}; - - template - struct variant_alternative> { - static_assert(I < sizeof...(Ts), - "index out of bounds in `std::variant_alternative<>`"); - using type = lib::type_pack_element_t; - }; - - constexpr std::size_t variant_npos = static_cast(-1); - - namespace detail { - - constexpr std::size_t not_found = static_cast(-1); - constexpr std::size_t ambiguous = static_cast(-2); - -#ifdef TF_CPP14_CONSTEXPR - template - inline constexpr std::size_t find_index() { - constexpr lib::array matches = { - {std::is_same::value...} - }; - std::size_t result = not_found; - for (std::size_t i = 0; i < sizeof...(Ts); ++i) { - if (matches[i]) { - if (result != not_found) { - return ambiguous; - } - result = i; - } - } - return result; - } -#else - inline constexpr std::size_t find_index_impl(std::size_t result, - std::size_t) { - return result; - } - - template - inline constexpr std::size_t find_index_impl(std::size_t result, - std::size_t idx, - bool b, - Bs... bs) { - return b ? (result != not_found ? ambiguous - : find_index_impl(idx, idx + 1, bs...)) - : find_index_impl(result, idx + 1, bs...); - } - - template - inline constexpr std::size_t find_index() { - return find_index_impl(not_found, 0, std::is_same::value...); - } -#endif - - template - using find_index_sfinae_impl = - lib::enable_if_t>; - - template - using find_index_sfinae = find_index_sfinae_impl()>; - - template - struct find_index_checked_impl : lib::size_constant { - static_assert(I != not_found, "the specified type is not found."); - static_assert(I != ambiguous, "the specified type is ambiguous."); - }; - - template - using find_index_checked = find_index_checked_impl()>; - - struct valueless_t {}; - - enum class Trait { TriviallyAvailable, Available, Unavailable }; - - template class IsTriviallyAvailable, - template class IsAvailable> - inline constexpr Trait trait() { - return IsTriviallyAvailable::value - ? Trait::TriviallyAvailable - : IsAvailable::value ? Trait::Available - : Trait::Unavailable; - } - -#ifdef TF_CPP14_CONSTEXPR - template - inline constexpr Trait common_trait(Traits... traits_) { - Trait result = Trait::TriviallyAvailable; - lib::array traits = {{traits_...}}; - for (std::size_t i = 0; i < sizeof...(Traits); ++i) { - Trait t = traits[i]; - if (static_cast(t) > static_cast(result)) { - result = t; - } - } - return result; - } -#else - inline constexpr Trait common_trait_impl(Trait result) { return result; } - - template - inline constexpr Trait common_trait_impl(Trait result, - Trait t, - Traits... ts) { - return static_cast(t) > static_cast(result) - ? common_trait_impl(t, ts...) - : common_trait_impl(result, ts...); - } - - template - inline constexpr Trait common_trait(Traits... ts) { - return common_trait_impl(Trait::TriviallyAvailable, ts...); - } -#endif - - template - struct traits { - static constexpr Trait copy_constructible_trait = - common_trait(trait()...); - - static constexpr Trait move_constructible_trait = - common_trait(trait()...); - - static constexpr Trait copy_assignable_trait = - common_trait(copy_constructible_trait, - trait()...); - - static constexpr Trait move_assignable_trait = - common_trait(move_constructible_trait, - trait()...); - - static constexpr Trait destructible_trait = - common_trait(trait()...); - }; - - namespace access { - - struct recursive_union { -#ifdef TF_RETURN_TYPE_DEDUCTION - template - inline static constexpr auto &&get_alt(V &&v, in_place_index_t<0>) { - return lib::forward(v).head_; - } - - template - inline static constexpr auto &&get_alt(V &&v, in_place_index_t) { - return get_alt(lib::forward(v).tail_, in_place_index_t{}); - } -#else - template - struct get_alt_impl { - template - inline constexpr AUTO_REFREF operator()(V &&v) const - AUTO_REFREF_RETURN(get_alt_impl{}(lib::forward(v).tail_)) - }; - - template - struct get_alt_impl<0, Dummy> { - template - inline constexpr AUTO_REFREF operator()(V &&v) const - AUTO_REFREF_RETURN(lib::forward(v).head_) - }; - - template - inline static constexpr AUTO_REFREF get_alt(V &&v, in_place_index_t) - AUTO_REFREF_RETURN(get_alt_impl{}(lib::forward(v))) -#endif - }; - - struct base { - template - inline static constexpr AUTO_REFREF get_alt(V &&v) -#ifdef _MSC_VER - AUTO_REFREF_RETURN(recursive_union::get_alt( - lib::forward(v).data_, in_place_index_t{})) -#else - AUTO_REFREF_RETURN(recursive_union::get_alt( - data(lib::forward(v)), in_place_index_t{})) -#endif - }; - - struct variant { - template - inline static constexpr AUTO_REFREF get_alt(V &&v) - AUTO_REFREF_RETURN(base::get_alt(lib::forward(v).impl_)) - }; - - } // namespace access - - namespace visitation { - -#if defined(TF_CPP14_CONSTEXPR) && !defined(_MSC_VER) -#define TF_VARIANT_SWITCH_VISIT -#endif - - struct base { - template - using dispatch_result_t = decltype( - lib::invoke(std::declval(), - access::base::get_alt<0>(std::declval())...)); - - template - struct expected { - template - inline static constexpr bool but_got() { - return std::is_same::value; - } - }; - - template - struct visit_return_type_check { - static_assert( - expected::template but_got(), - "`visit` requires the visitor to have a single return type"); - - template - inline static constexpr DECLTYPE_AUTO invoke(Visitor &&visitor, - Alts &&... alts) - DECLTYPE_AUTO_RETURN(lib::invoke(lib::forward(visitor), - lib::forward(alts)...)) - }; - -#ifdef TF_VARIANT_SWITCH_VISIT - template - struct dispatcher; - - template - struct dispatcher { - template - TF_ALWAYS_INLINE static constexpr R dispatch( - F &&, typename ITs::type &&..., Vs &&...) { - TF_BUILTIN_UNREACHABLE; - } - - template - TF_ALWAYS_INLINE static constexpr R dispatch_case(F &&, Vs &&...) { - TF_BUILTIN_UNREACHABLE; - } - - template - TF_ALWAYS_INLINE static constexpr R dispatch_at(std::size_t, - F &&, - Vs &&...) { - TF_BUILTIN_UNREACHABLE; - } - }; - - template - struct dispatcher { - template - TF_ALWAYS_INLINE static constexpr R dispatch( - F &&f, typename ITs::type &&... visited_vs) { - using Expected = R; - using Actual = decltype(lib::invoke( - lib::forward(f), - access::base::get_alt( - lib::forward(visited_vs))...)); - return visit_return_type_check::invoke( - lib::forward(f), - access::base::get_alt( - lib::forward(visited_vs))...); - } - - template - TF_ALWAYS_INLINE static constexpr R dispatch( - F &&f, typename ITs::type &&... visited_vs, V &&v, Vs &&... vs) { -#define TF_DISPATCH(I) \ - dispatcher<(I < lib::decay_t::size()), \ - R, \ - ITs..., \ - lib::indexed_type>:: \ - template dispatch<0>(lib::forward(f), \ - lib::forward(visited_vs)..., \ - lib::forward(v), \ - lib::forward(vs)...) - -#define TF_DEFAULT(I) \ - dispatcher<(I < lib::decay_t::size()), R, ITs...>::template dispatch( \ - lib::forward(f), \ - lib::forward(visited_vs)..., \ - lib::forward(v), \ - lib::forward(vs)...) - - switch (v.index()) { - case B + 0: return TF_DISPATCH(B + 0); - case B + 1: return TF_DISPATCH(B + 1); - case B + 2: return TF_DISPATCH(B + 2); - case B + 3: return TF_DISPATCH(B + 3); - case B + 4: return TF_DISPATCH(B + 4); - case B + 5: return TF_DISPATCH(B + 5); - case B + 6: return TF_DISPATCH(B + 6); - case B + 7: return TF_DISPATCH(B + 7); - case B + 8: return TF_DISPATCH(B + 8); - case B + 9: return TF_DISPATCH(B + 9); - case B + 10: return TF_DISPATCH(B + 10); - case B + 11: return TF_DISPATCH(B + 11); - case B + 12: return TF_DISPATCH(B + 12); - case B + 13: return TF_DISPATCH(B + 13); - case B + 14: return TF_DISPATCH(B + 14); - case B + 15: return TF_DISPATCH(B + 15); - case B + 16: return TF_DISPATCH(B + 16); - case B + 17: return TF_DISPATCH(B + 17); - case B + 18: return TF_DISPATCH(B + 18); - case B + 19: return TF_DISPATCH(B + 19); - case B + 20: return TF_DISPATCH(B + 20); - case B + 21: return TF_DISPATCH(B + 21); - case B + 22: return TF_DISPATCH(B + 22); - case B + 23: return TF_DISPATCH(B + 23); - case B + 24: return TF_DISPATCH(B + 24); - case B + 25: return TF_DISPATCH(B + 25); - case B + 26: return TF_DISPATCH(B + 26); - case B + 27: return TF_DISPATCH(B + 27); - case B + 28: return TF_DISPATCH(B + 28); - case B + 29: return TF_DISPATCH(B + 29); - case B + 30: return TF_DISPATCH(B + 30); - case B + 31: return TF_DISPATCH(B + 31); - default: return TF_DEFAULT(B + 32); - } - -#undef TF_DEFAULT -#undef TF_DISPATCH - } - - template - TF_ALWAYS_INLINE static constexpr R dispatch_case(F &&f, - Vs &&... vs) { - using Expected = R; - using Actual = decltype( - lib::invoke(lib::forward(f), - access::base::get_alt(lib::forward(vs))...)); - return visit_return_type_check::invoke( - lib::forward(f), - access::base::get_alt(lib::forward(vs))...); - } - - template - TF_ALWAYS_INLINE static constexpr R dispatch_at(std::size_t index, - F &&f, - V &&v, - Vs &&... vs) { - static_assert(lib::all<(lib::decay_t::size() == - lib::decay_t::size())...>::value, - "all of the variants must be the same size."); -#define TF_DISPATCH_AT(I) \ - dispatcher<(I < lib::decay_t::size()), R>::template dispatch_case( \ - lib::forward(f), lib::forward(v), lib::forward(vs)...) - -#define TF_DEFAULT(I) \ - dispatcher<(I < lib::decay_t::size()), R>::template dispatch_at( \ - index, lib::forward(f), lib::forward(v), lib::forward(vs)...) - - switch (index) { - case B + 0: return TF_DISPATCH_AT(B + 0); - case B + 1: return TF_DISPATCH_AT(B + 1); - case B + 2: return TF_DISPATCH_AT(B + 2); - case B + 3: return TF_DISPATCH_AT(B + 3); - case B + 4: return TF_DISPATCH_AT(B + 4); - case B + 5: return TF_DISPATCH_AT(B + 5); - case B + 6: return TF_DISPATCH_AT(B + 6); - case B + 7: return TF_DISPATCH_AT(B + 7); - case B + 8: return TF_DISPATCH_AT(B + 8); - case B + 9: return TF_DISPATCH_AT(B + 9); - case B + 10: return TF_DISPATCH_AT(B + 10); - case B + 11: return TF_DISPATCH_AT(B + 11); - case B + 12: return TF_DISPATCH_AT(B + 12); - case B + 13: return TF_DISPATCH_AT(B + 13); - case B + 14: return TF_DISPATCH_AT(B + 14); - case B + 15: return TF_DISPATCH_AT(B + 15); - case B + 16: return TF_DISPATCH_AT(B + 16); - case B + 17: return TF_DISPATCH_AT(B + 17); - case B + 18: return TF_DISPATCH_AT(B + 18); - case B + 19: return TF_DISPATCH_AT(B + 19); - case B + 20: return TF_DISPATCH_AT(B + 20); - case B + 21: return TF_DISPATCH_AT(B + 21); - case B + 22: return TF_DISPATCH_AT(B + 22); - case B + 23: return TF_DISPATCH_AT(B + 23); - case B + 24: return TF_DISPATCH_AT(B + 24); - case B + 25: return TF_DISPATCH_AT(B + 25); - case B + 26: return TF_DISPATCH_AT(B + 26); - case B + 27: return TF_DISPATCH_AT(B + 27); - case B + 28: return TF_DISPATCH_AT(B + 28); - case B + 29: return TF_DISPATCH_AT(B + 29); - case B + 30: return TF_DISPATCH_AT(B + 30); - case B + 31: return TF_DISPATCH_AT(B + 31); - default: return TF_DEFAULT(B + 32); - } - -#undef TF_DEFAULT -#undef TF_DISPATCH_AT - } - }; -#else - template - inline static constexpr const T &at(const T &elem) noexcept { - return elem; - } - - template - inline static constexpr const lib::remove_all_extents_t &at( - const lib::array &elems, std::size_t i, Is... is) noexcept { - return at(elems[i], is...); - } - - template - inline static constexpr lib::array, sizeof...(Fs) + 1> - make_farray(F &&f, Fs &&... fs) { - return {{lib::forward(f), lib::forward(fs)...}}; - } - - template - struct make_fmatrix_impl { - - template - inline static constexpr dispatch_result_t dispatch( - F &&f, Vs &&... vs) { - using Expected = dispatch_result_t; - using Actual = decltype(lib::invoke( - lib::forward(f), - access::base::get_alt(lib::forward(vs))...)); - return visit_return_type_check::invoke( - lib::forward(f), - access::base::get_alt(lib::forward(vs))...); - } - -#ifdef TF_RETURN_TYPE_DEDUCTION - template - inline static constexpr auto impl(lib::index_sequence) { - return &dispatch; - } - - template - inline static constexpr auto impl(Is, - lib::index_sequence, - Ls... ls) { - return make_farray(impl(lib::push_back_t{}, ls...)...); - } -#else - template - struct impl; - - template - struct impl> { - inline constexpr AUTO operator()() const - AUTO_RETURN(&dispatch) - }; - - template - struct impl, Ls...> { - inline constexpr AUTO operator()() const - AUTO_RETURN( - make_farray(impl, Ls...>{}()...)) - }; -#endif - }; - -#ifdef TF_RETURN_TYPE_DEDUCTION - template - inline static constexpr auto make_fmatrix() { - return make_fmatrix_impl::impl( - lib::index_sequence<>{}, - lib::make_index_sequence::size()>{}...); - } -#else - template - inline static constexpr AUTO make_fmatrix() - AUTO_RETURN( - typename make_fmatrix_impl::template impl< - lib::index_sequence<>, - lib::make_index_sequence::size()>...>{}()) -#endif - - template - struct make_fdiagonal_impl { - template - inline static constexpr dispatch_result_t dispatch( - F &&f, Vs &&... vs) { - using Expected = dispatch_result_t; - using Actual = decltype( - lib::invoke(lib::forward(f), - access::base::get_alt(lib::forward(vs))...)); - return visit_return_type_check::invoke( - lib::forward(f), - access::base::get_alt(lib::forward(vs))...); - } - - template - inline static constexpr AUTO impl(lib::index_sequence) - AUTO_RETURN(make_farray(&dispatch...)) - }; - - template - inline static constexpr auto make_fdiagonal() - -> decltype(make_fdiagonal_impl::impl( - lib::make_index_sequence::size()>{})) { - static_assert(lib::all<(lib::decay_t::size() == - lib::decay_t::size())...>::value, - "all of the variants must be the same size."); - return make_fdiagonal_impl::impl( - lib::make_index_sequence::size()>{}); - } -#endif - }; - -#if !defined(TF_VARIANT_SWITCH_VISIT) && \ - (!defined(_MSC_VER) || _MSC_VER >= 1910) - template - using fmatrix_t = decltype(base::make_fmatrix()); - - template - struct fmatrix { - static constexpr fmatrix_t value = - base::make_fmatrix(); - }; - - template - constexpr fmatrix_t fmatrix::value; - - template - using fdiagonal_t = decltype(base::make_fdiagonal()); - - template - struct fdiagonal { - static constexpr fdiagonal_t value = - base::make_fdiagonal(); - }; - - template - constexpr fdiagonal_t fdiagonal::value; -#endif - - struct alt { - template - inline static constexpr DECLTYPE_AUTO visit_alt(Visitor &&visitor, - Vs &&... vs) -#ifdef TF_VARIANT_SWITCH_VISIT - DECLTYPE_AUTO_RETURN( - base::dispatcher< - true, - base::dispatch_result_t(vs)))...>>:: - template dispatch<0>(lib::forward(visitor), - as_base(lib::forward(vs))...)) -#elif !defined(_MSC_VER) || _MSC_VER >= 1910 - DECLTYPE_AUTO_RETURN(base::at( - fmatrix(vs)))...>::value, - vs.index()...)(lib::forward(visitor), - as_base(lib::forward(vs))...)) -#else - DECLTYPE_AUTO_RETURN(base::at( - base::make_fmatrix(vs)))...>(), - vs.index()...)(lib::forward(visitor), - as_base(lib::forward(vs))...)) -#endif - - template - inline static constexpr DECLTYPE_AUTO visit_alt_at(std::size_t index, - Visitor &&visitor, - Vs &&... vs) -#ifdef TF_VARIANT_SWITCH_VISIT - DECLTYPE_AUTO_RETURN( - base::dispatcher< - true, - base::dispatch_result_t(vs)))...>>:: - template dispatch_at<0>(index, - lib::forward(visitor), - as_base(lib::forward(vs))...)) -#elif !defined(_MSC_VER) || _MSC_VER >= 1910 - DECLTYPE_AUTO_RETURN(base::at( - fdiagonal(vs)))...>::value, - index)(lib::forward(visitor), - as_base(lib::forward(vs))...)) -#else - DECLTYPE_AUTO_RETURN(base::at( - base::make_fdiagonal(vs)))...>(), - index)(lib::forward(visitor), - as_base(lib::forward(vs))...)) -#endif - }; - - struct variant { - private: - template - struct visitor { - template - inline static constexpr bool does_not_handle() { - return lib::is_invocable::value; - } - }; - - template - struct visit_exhaustiveness_check { - static_assert(visitor::template does_not_handle(), - "`visit` requires the visitor to be exhaustive."); - - inline static constexpr DECLTYPE_AUTO invoke(Visitor &&visitor, - Values &&... values) - DECLTYPE_AUTO_RETURN(lib::invoke(lib::forward(visitor), - lib::forward(values)...)) - }; - - template - struct value_visitor { - Visitor &&visitor_; - - template - inline constexpr DECLTYPE_AUTO operator()(Alts &&... alts) const - DECLTYPE_AUTO_RETURN( - visit_exhaustiveness_check< - Visitor, - decltype((lib::forward(alts).value))...>:: - invoke(lib::forward(visitor_), - lib::forward(alts).value...)) - }; - - template - inline static constexpr AUTO make_value_visitor(Visitor &&visitor) - AUTO_RETURN(value_visitor{lib::forward(visitor)}) - - public: - template - inline static constexpr DECLTYPE_AUTO visit_alt(Visitor &&visitor, - Vs &&... vs) - DECLTYPE_AUTO_RETURN(alt::visit_alt(lib::forward(visitor), - lib::forward(vs).impl_...)) - - template - inline static constexpr DECLTYPE_AUTO visit_alt_at(std::size_t index, - Visitor &&visitor, - Vs &&... vs) - DECLTYPE_AUTO_RETURN( - alt::visit_alt_at(index, - lib::forward(visitor), - lib::forward(vs).impl_...)) - - template - inline static constexpr DECLTYPE_AUTO visit_value(Visitor &&visitor, - Vs &&... vs) - DECLTYPE_AUTO_RETURN( - visit_alt(make_value_visitor(lib::forward(visitor)), - lib::forward(vs)...)) - - template - inline static constexpr DECLTYPE_AUTO visit_value_at(std::size_t index, - Visitor &&visitor, - Vs &&... vs) - DECLTYPE_AUTO_RETURN( - visit_alt_at(index, - make_value_visitor(lib::forward(visitor)), - lib::forward(vs)...)) - }; - - } // namespace visitation - - template - struct alt { - using value_type = T; - -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4244) -#endif - template - inline explicit constexpr alt(in_place_t, Args &&... args) - : value(lib::forward(args)...) {} -#ifdef _MSC_VER -#pragma warning(pop) -#endif - - T value; - }; - - template - union recursive_union; - - template - union recursive_union {}; - -#define TF_VARIANT_RECURSIVE_UNION(destructible_trait, destructor) \ - template \ - union recursive_union { \ - public: \ - inline explicit constexpr recursive_union(valueless_t) noexcept \ - : dummy_{} {} \ - \ - template \ - inline explicit constexpr recursive_union(in_place_index_t<0>, \ - Args &&... args) \ - : head_(in_place_t{}, lib::forward(args)...) {} \ - \ - template \ - inline explicit constexpr recursive_union(in_place_index_t, \ - Args &&... args) \ - : tail_(in_place_index_t{}, lib::forward(args)...) {} \ - \ - recursive_union(const recursive_union &) = default; \ - recursive_union(recursive_union &&) = default; \ - \ - destructor \ - \ - recursive_union &operator=(const recursive_union &) = default; \ - recursive_union &operator=(recursive_union &&) = default; \ - \ - private: \ - char dummy_; \ - alt head_; \ - recursive_union tail_; \ - \ - friend struct access::recursive_union; \ - } - - TF_VARIANT_RECURSIVE_UNION(Trait::TriviallyAvailable, - ~recursive_union() = default;); - TF_VARIANT_RECURSIVE_UNION(Trait::Available, - ~recursive_union() {}); - TF_VARIANT_RECURSIVE_UNION(Trait::Unavailable, - ~recursive_union() = delete;); - -#undef TF_VARIANT_RECURSIVE_UNION - - template - using index_t = typename std::conditional< - sizeof...(Ts) < (std::numeric_limits::max)(), - unsigned char, - typename std::conditional< - sizeof...(Ts) < (std::numeric_limits::max)(), - unsigned short, - unsigned int>::type - >::type; - - template - class base { - public: - inline explicit constexpr base(valueless_t tag) noexcept - : data_(tag), index_(static_cast>(-1)) {} - - template - inline explicit constexpr base(in_place_index_t, Args &&... args) - : data_(in_place_index_t{}, lib::forward(args)...), - index_(I) {} - - inline constexpr bool valueless_by_exception() const noexcept { - return index_ == static_cast>(-1); - } - - inline constexpr std::size_t index() const noexcept { - return valueless_by_exception() ? variant_npos : index_; - } - - protected: - using data_t = recursive_union; - - friend inline constexpr base &as_base(base &b) { return b; } - friend inline constexpr const base &as_base(const base &b) { return b; } - friend inline constexpr base &&as_base(base &&b) { return lib::move(b); } - friend inline constexpr const base &&as_base(const base &&b) { return lib::move(b); } - - friend inline constexpr data_t &data(base &b) { return b.data_; } - friend inline constexpr const data_t &data(const base &b) { return b.data_; } - friend inline constexpr data_t &&data(base &&b) { return lib::move(b).data_; } - friend inline constexpr const data_t &&data(const base &&b) { return lib::move(b).data_; } - - inline static constexpr std::size_t size() { return sizeof...(Ts); } - - data_t data_; - index_t index_; - - friend struct access::base; - friend struct visitation::base; - }; - - struct dtor { -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4100) -#endif - template - inline void operator()(Alt &alt) const noexcept { alt.~Alt(); } -#ifdef _MSC_VER -#pragma warning(pop) -#endif - }; - -#if !defined(_MSC_VER) || _MSC_VER >= 1910 -#define TF_INHERITING_CTOR(type, base) using base::base; -#else -#define TF_INHERITING_CTOR(type, base) \ - template \ - inline explicit constexpr type(Args &&... args) \ - : base(lib::forward(args)...) {} -#endif - - template - class destructor; - -#define TF_VARIANT_DESTRUCTOR(destructible_trait, definition, destroy) \ - template \ - class destructor, destructible_trait> \ - : public base { \ - using super = base; \ - \ - public: \ - TF_INHERITING_CTOR(destructor, super) \ - using super::operator=; \ - \ - destructor(const destructor &) = default; \ - destructor(destructor &&) = default; \ - definition \ - destructor &operator=(const destructor &) = default; \ - destructor &operator=(destructor &&) = default; \ - \ - protected: \ - destroy \ - } - - TF_VARIANT_DESTRUCTOR( - Trait::TriviallyAvailable, - ~destructor() = default;, - inline void destroy() noexcept { - this->index_ = static_cast>(-1); - }); - - TF_VARIANT_DESTRUCTOR( - Trait::Available, - ~destructor() { destroy(); }, - inline void destroy() noexcept { - if (!this->valueless_by_exception()) { - visitation::alt::visit_alt(dtor{}, *this); - } - this->index_ = static_cast>(-1); - }); - - TF_VARIANT_DESTRUCTOR( - Trait::Unavailable, - ~destructor() = delete;, - inline void destroy() noexcept = delete;); - -#undef TF_VARIANT_DESTRUCTOR - - template - class constructor : public destructor { - using super = destructor; - - public: - TF_INHERITING_CTOR(constructor, super) - using super::operator=; - - protected: -#ifndef TF_GENERIC_LAMBDAS - struct ctor { - template - inline void operator()(LhsAlt &lhs_alt, RhsAlt &&rhs_alt) const { - constructor::construct_alt(lhs_alt, - lib::forward(rhs_alt).value); - } - }; -#endif - - template - inline static T &construct_alt(alt &a, Args &&... args) { - auto *result = ::new (static_cast(lib::addressof(a))) - alt(in_place_t{}, lib::forward(args)...); - return result->value; - } - - template - inline static void generic_construct(constructor &lhs, Rhs &&rhs) { - lhs.destroy(); - if (!rhs.valueless_by_exception()) { - visitation::alt::visit_alt_at( - rhs.index(), -#ifdef TF_GENERIC_LAMBDAS - [](auto &lhs_alt, auto &&rhs_alt) { - constructor::construct_alt( - lhs_alt, lib::forward(rhs_alt).value); - } -#else - ctor{} -#endif - , - lhs, - lib::forward(rhs)); - lhs.index_ = rhs.index_; - } - } - }; - - template - class move_constructor; - -#define TF_VARIANT_MOVE_CONSTRUCTOR(move_constructible_trait, definition) \ - template \ - class move_constructor, move_constructible_trait> \ - : public constructor> { \ - using super = constructor>; \ - \ - public: \ - TF_INHERITING_CTOR(move_constructor, super) \ - using super::operator=; \ - \ - move_constructor(const move_constructor &) = default; \ - definition \ - ~move_constructor() = default; \ - move_constructor &operator=(const move_constructor &) = default; \ - move_constructor &operator=(move_constructor &&) = default; \ - } - - TF_VARIANT_MOVE_CONSTRUCTOR( - Trait::TriviallyAvailable, - move_constructor(move_constructor &&that) = default;); - - TF_VARIANT_MOVE_CONSTRUCTOR( - Trait::Available, - move_constructor(move_constructor &&that) noexcept( - lib::all::value...>::value) - : move_constructor(valueless_t{}) { - this->generic_construct(*this, lib::move(that)); - }); - - TF_VARIANT_MOVE_CONSTRUCTOR( - Trait::Unavailable, - move_constructor(move_constructor &&) = delete;); - -#undef TF_VARIANT_MOVE_CONSTRUCTOR - - template - class copy_constructor; - -#define TF_VARIANT_COPY_CONSTRUCTOR(copy_constructible_trait, definition) \ - template \ - class copy_constructor, copy_constructible_trait> \ - : public move_constructor> { \ - using super = move_constructor>; \ - \ - public: \ - TF_INHERITING_CTOR(copy_constructor, super) \ - using super::operator=; \ - \ - definition \ - copy_constructor(copy_constructor &&) = default; \ - ~copy_constructor() = default; \ - copy_constructor &operator=(const copy_constructor &) = default; \ - copy_constructor &operator=(copy_constructor &&) = default; \ - } - - TF_VARIANT_COPY_CONSTRUCTOR( - Trait::TriviallyAvailable, - copy_constructor(const copy_constructor &that) = default;); - - TF_VARIANT_COPY_CONSTRUCTOR( - Trait::Available, - copy_constructor(const copy_constructor &that) - : copy_constructor(valueless_t{}) { - this->generic_construct(*this, that); - }); - - TF_VARIANT_COPY_CONSTRUCTOR( - Trait::Unavailable, - copy_constructor(const copy_constructor &) = delete;); - -#undef TF_VARIANT_COPY_CONSTRUCTOR - - template - class assignment : public copy_constructor { - using super = copy_constructor; - - public: - TF_INHERITING_CTOR(assignment, super) - using super::operator=; - - template - inline /* auto & */ auto emplace(Args &&... args) - -> decltype(this->construct_alt(access::base::get_alt(*this), - lib::forward(args)...)) { - this->destroy(); - auto &result = this->construct_alt(access::base::get_alt(*this), - lib::forward(args)...); - this->index_ = I; - return result; - } - - protected: -#ifndef TF_GENERIC_LAMBDAS - template - struct assigner { - template - inline void operator()(ThisAlt &this_alt, ThatAlt &&that_alt) const { - self->assign_alt(this_alt, lib::forward(that_alt).value); - } - assignment *self; - }; -#endif - - template - inline void assign_alt(alt &a, Arg &&arg) { - if (this->index() == I) { -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4244) -#endif - a.value = lib::forward(arg); -#ifdef _MSC_VER -#pragma warning(pop) -#endif - } else { - struct { - void operator()(std::true_type) const { - this_->emplace(lib::forward(arg_)); - } - void operator()(std::false_type) const { - this_->emplace(T(lib::forward(arg_))); - } - assignment *this_; - Arg &&arg_; - } impl{this, lib::forward(arg)}; - impl(lib::bool_constant< - std::is_nothrow_constructible::value || - !std::is_nothrow_move_constructible::value>{}); - } - } - - template - inline void generic_assign(That &&that) { - if (this->valueless_by_exception() && that.valueless_by_exception()) { - // do nothing. - } else if (that.valueless_by_exception()) { - this->destroy(); - } else { - visitation::alt::visit_alt_at( - that.index(), -#ifdef TF_GENERIC_LAMBDAS - [this](auto &this_alt, auto &&that_alt) { - this->assign_alt( - this_alt, lib::forward(that_alt).value); - } -#else - assigner{this} -#endif - , - *this, - lib::forward(that)); - } - } - }; - - template - class move_assignment; - -#define TF_VARIANT_MOVE_ASSIGNMENT(move_assignable_trait, definition) \ - template \ - class move_assignment, move_assignable_trait> \ - : public assignment> { \ - using super = assignment>; \ - \ - public: \ - TF_INHERITING_CTOR(move_assignment, super) \ - using super::operator=; \ - \ - move_assignment(const move_assignment &) = default; \ - move_assignment(move_assignment &&) = default; \ - ~move_assignment() = default; \ - move_assignment &operator=(const move_assignment &) = default; \ - definition \ - } - - TF_VARIANT_MOVE_ASSIGNMENT( - Trait::TriviallyAvailable, - move_assignment &operator=(move_assignment &&that) = default;); - - TF_VARIANT_MOVE_ASSIGNMENT( - Trait::Available, - move_assignment & - operator=(move_assignment &&that) noexcept( - lib::all<(std::is_nothrow_move_constructible::value && - std::is_nothrow_move_assignable::value)...>::value) { - this->generic_assign(lib::move(that)); - return *this; - }); - - TF_VARIANT_MOVE_ASSIGNMENT( - Trait::Unavailable, - move_assignment &operator=(move_assignment &&) = delete;); - -#undef TF_VARIANT_MOVE_ASSIGNMENT - - template - class copy_assignment; - -#define TF_VARIANT_COPY_ASSIGNMENT(copy_assignable_trait, definition) \ - template \ - class copy_assignment, copy_assignable_trait> \ - : public move_assignment> { \ - using super = move_assignment>; \ - \ - public: \ - TF_INHERITING_CTOR(copy_assignment, super) \ - using super::operator=; \ - \ - copy_assignment(const copy_assignment &) = default; \ - copy_assignment(copy_assignment &&) = default; \ - ~copy_assignment() = default; \ - definition \ - copy_assignment &operator=(copy_assignment &&) = default; \ - } - - TF_VARIANT_COPY_ASSIGNMENT( - Trait::TriviallyAvailable, - copy_assignment &operator=(const copy_assignment &that) = default;); - - TF_VARIANT_COPY_ASSIGNMENT( - Trait::Available, - copy_assignment &operator=(const copy_assignment &that) { - this->generic_assign(that); - return *this; - }); - - TF_VARIANT_COPY_ASSIGNMENT( - Trait::Unavailable, - copy_assignment &operator=(const copy_assignment &) = delete;); - -#undef TF_VARIANT_COPY_ASSIGNMENT - - template - class impl : public copy_assignment> { - using super = copy_assignment>; - - public: - TF_INHERITING_CTOR(impl, super) - using super::operator=; - - impl(const impl&) = default; - impl(impl&&) = default; - ~impl() = default; - impl &operator=(const impl &) = default; - impl &operator=(impl &&) = default; - - template - inline void assign(Arg &&arg) { - this->assign_alt(access::base::get_alt(*this), - lib::forward(arg)); - } - - inline void swap(impl &that) { - if (this->valueless_by_exception() && that.valueless_by_exception()) { - // do nothing. - } else if (this->index() == that.index()) { - visitation::alt::visit_alt_at(this->index(), -#ifdef TF_GENERIC_LAMBDAS - [](auto &this_alt, auto &that_alt) { - using std::swap; - swap(this_alt.value, - that_alt.value); - } -#else - swapper{} -#endif - , - *this, - that); - } else { - impl *lhs = this; - impl *rhs = lib::addressof(that); - if (lhs->move_nothrow() && !rhs->move_nothrow()) { - std::swap(lhs, rhs); - } - impl tmp(lib::move(*rhs)); -#ifdef TF_EXCEPTIONS - // EXTENSION: When the move construction of `lhs` into `rhs` throws - // and `tmp` is nothrow move constructible then we move `tmp` back - // into `rhs` and provide the strong exception safety guarantee. - try { - this->generic_construct(*rhs, lib::move(*lhs)); - } catch (...) { - if (tmp.move_nothrow()) { - this->generic_construct(*rhs, lib::move(tmp)); - } - throw; - } -#else - this->generic_construct(*rhs, lib::move(*lhs)); -#endif - this->generic_construct(*lhs, lib::move(tmp)); - } - } - - private: -#ifndef TF_GENERIC_LAMBDAS - struct swapper { - template - inline void operator()(ThisAlt &this_alt, ThatAlt &that_alt) const { - using std::swap; - swap(this_alt.value, that_alt.value); - } - }; -#endif - - inline constexpr bool move_nothrow() const { - return this->valueless_by_exception() || - lib::array{ - {std::is_nothrow_move_constructible::value...} - }[this->index()]; - } - }; - -#undef TF_INHERITING_CTOR - - template - struct is_non_narrowing_convertible { - template - static std::true_type test(T(&&)[1]); - - template - static auto impl(int) -> decltype(test({std::declval()})); - - template - static auto impl(...) -> std::false_type; - - static constexpr bool value = decltype(impl(0))::value; - }; - - template ::value, - typename = void> - struct overload_leaf {}; - - template - struct overload_leaf { - using impl = lib::size_constant (*)(T); - operator impl() const { return nullptr; }; - }; - - template - struct overload_leaf< - Arg, - I, - T, - true -#if defined(__clang__) || !defined(__GNUC__) || __GNUC__ >= 5 - , - lib::enable_if_t< - std::is_same, bool>::value - ? std::is_same, bool>::value - : is_non_narrowing_convertible::value> -#endif - > { - using impl = lib::size_constant (*)(T); - operator impl() const { return nullptr; }; - }; - - template - struct overload_impl { - private: - template - struct impl; - - template - struct impl> : overload_leaf... {}; - - public: - using type = impl>; - }; - - template - using overload = typename overload_impl::type; - - template - using best_match = lib::invoke_result_t, Arg>; - - template - struct is_in_place_index : std::false_type {}; - - template - struct is_in_place_index> : std::true_type {}; - - template - struct is_in_place_type : std::false_type {}; - - template - struct is_in_place_type> : std::true_type {}; - - } // detail - - template - class variant { - static_assert(0 < sizeof...(Ts), - "variant must consist of at least one alternative."); - - static_assert(lib::all::value...>::value, - "variant can not have an array type as an alternative."); - - static_assert(lib::all::value...>::value, - "variant can not have a reference type as an alternative."); - - static_assert(lib::all::value...>::value, - "variant can not have a void type as an alternative."); - - public: - template < - typename Front = lib::type_pack_element_t<0, Ts...>, - lib::enable_if_t::value, int> = 0> - inline constexpr variant() noexcept( - std::is_nothrow_default_constructible::value) - : impl_(in_place_index_t<0>{}) {} - - variant(const variant &) = default; - variant(variant &&) = default; - - template < - typename Arg, - typename Decayed = lib::decay_t, - lib::enable_if_t::value, int> = 0, - lib::enable_if_t::value, int> = 0, - lib::enable_if_t::value, int> = 0, - std::size_t I = detail::best_match::value, - typename T = lib::type_pack_element_t, - lib::enable_if_t::value, int> = 0> - inline constexpr variant(Arg &&arg) noexcept( - std::is_nothrow_constructible::value) - : impl_(in_place_index_t{}, lib::forward(arg)) {} - - template < - std::size_t I, - typename... Args, - typename T = lib::type_pack_element_t, - lib::enable_if_t::value, int> = 0> - inline explicit constexpr variant( - in_place_index_t, - Args &&... args) noexcept(std::is_nothrow_constructible::value) - : impl_(in_place_index_t{}, lib::forward(args)...) {} - - template < - std::size_t I, - typename Up, - typename... Args, - typename T = lib::type_pack_element_t, - lib::enable_if_t &, - Args...>::value, - int> = 0> - inline explicit constexpr variant( - in_place_index_t, - std::initializer_list il, - Args &&... args) noexcept(std:: - is_nothrow_constructible< - T, - std::initializer_list &, - Args...>::value) - : impl_(in_place_index_t{}, il, lib::forward(args)...) {} - - template < - typename T, - typename... Args, - std::size_t I = detail::find_index_sfinae::value, - lib::enable_if_t::value, int> = 0> - inline explicit constexpr variant( - in_place_type_t, - Args &&... args) noexcept(std::is_nothrow_constructible::value) - : impl_(in_place_index_t{}, lib::forward(args)...) {} - - template < - typename T, - typename Up, - typename... Args, - std::size_t I = detail::find_index_sfinae::value, - lib::enable_if_t &, - Args...>::value, - int> = 0> - inline explicit constexpr variant( - in_place_type_t, - std::initializer_list il, - Args &&... args) noexcept(std:: - is_nothrow_constructible< - T, - std::initializer_list &, - Args...>::value) - : impl_(in_place_index_t{}, il, lib::forward(args)...) {} - - ~variant() = default; - - variant &operator=(const variant &) = default; - variant &operator=(variant &&) = default; - - template , variant>::value, - int> = 0, - std::size_t I = detail::best_match::value, - typename T = lib::type_pack_element_t, - lib::enable_if_t<(std::is_assignable::value && - std::is_constructible::value), - int> = 0> - inline variant &operator=(Arg &&arg) noexcept( - (std::is_nothrow_assignable::value && - std::is_nothrow_constructible::value)) { - impl_.template assign(lib::forward(arg)); - return *this; - } - - template < - std::size_t I, - typename... Args, - typename T = lib::type_pack_element_t, - lib::enable_if_t::value, int> = 0> - inline T &emplace(Args &&... args) { - return impl_.template emplace(lib::forward(args)...); - } - - template < - std::size_t I, - typename Up, - typename... Args, - typename T = lib::type_pack_element_t, - lib::enable_if_t &, - Args...>::value, - int> = 0> - inline T &emplace(std::initializer_list il, Args &&... args) { - return impl_.template emplace(il, lib::forward(args)...); - } - - template < - typename T, - typename... Args, - std::size_t I = detail::find_index_sfinae::value, - lib::enable_if_t::value, int> = 0> - inline T &emplace(Args &&... args) { - return impl_.template emplace(lib::forward(args)...); - } - - template < - typename T, - typename Up, - typename... Args, - std::size_t I = detail::find_index_sfinae::value, - lib::enable_if_t &, - Args...>::value, - int> = 0> - inline T &emplace(std::initializer_list il, Args &&... args) { - return impl_.template emplace(il, lib::forward(args)...); - } - - inline constexpr bool valueless_by_exception() const noexcept { - return impl_.valueless_by_exception(); - } - - inline constexpr std::size_t index() const noexcept { - return impl_.index(); - } - - template , - Dummy>::value && - lib::dependent_type, - Dummy>::value)...>::value, - int> = 0> - inline void swap(variant &that) noexcept( - lib::all<(std::is_nothrow_move_constructible::value && - lib::is_nothrow_swappable::value)...>::value) { - impl_.swap(that.impl_); - } - - private: - detail::impl impl_; - - friend struct detail::access::variant; - friend struct detail::visitation::variant; - }; - - template - inline constexpr bool holds_alternative(const variant &v) noexcept { - return v.index() == I; - } - - template - inline constexpr bool holds_alternative(const variant &v) noexcept { - return holds_alternative::value>(v); - } - - namespace detail { - template - struct generic_get_impl { - constexpr generic_get_impl(int) noexcept {} - - constexpr AUTO_REFREF operator()(V &&v) const - AUTO_REFREF_RETURN( - access::variant::get_alt(lib::forward(v)).value) - }; - - template - inline constexpr AUTO_REFREF generic_get(V &&v) - AUTO_REFREF_RETURN(generic_get_impl( - holds_alternative(v) ? 0 : (throw_bad_variant_access(), 0))( - lib::forward(v))) - } // namespace detail - - template - inline constexpr variant_alternative_t> &get( - variant &v) { - return detail::generic_get(v); - } - - template - inline constexpr variant_alternative_t> &&get( - variant &&v) { - return detail::generic_get(lib::move(v)); - } - - template - inline constexpr const variant_alternative_t> &get( - const variant &v) { - return detail::generic_get(v); - } - - template - inline constexpr const variant_alternative_t> &&get( - const variant &&v) { - return detail::generic_get(lib::move(v)); - } - - template - inline constexpr T &get(variant &v) { - return get::value>(v); - } - - template - inline constexpr T &&get(variant &&v) { - return get::value>(lib::move(v)); - } - - template - inline constexpr const T &get(const variant &v) { - return get::value>(v); - } - - template - inline constexpr const T &&get(const variant &&v) { - return get::value>(lib::move(v)); - } - - namespace detail { - - template - inline constexpr /* auto * */ AUTO generic_get_if(V *v) noexcept - AUTO_RETURN(v && holds_alternative(*v) - ? lib::addressof(access::variant::get_alt(*v).value) - : nullptr) - - } // namespace detail - - template - inline constexpr lib::add_pointer_t>> - get_if(variant *v) noexcept { - return detail::generic_get_if(v); - } - - template - inline constexpr lib::add_pointer_t< - const variant_alternative_t>> - get_if(const variant *v) noexcept { - return detail::generic_get_if(v); - } - - template - inline constexpr lib::add_pointer_t - get_if(variant *v) noexcept { - return get_if::value>(v); - } - - template - inline constexpr lib::add_pointer_t - get_if(const variant *v) noexcept { - return get_if::value>(v); - } - - namespace detail { - template - struct convert_to_bool { - template - inline constexpr bool operator()(Lhs &&lhs, Rhs &&rhs) const { - static_assert(std::is_convertible, - bool>::value, - "relational operators must return a type" - " implicitly convertible to bool"); - return lib::invoke( - RelOp{}, lib::forward(lhs), lib::forward(rhs)); - } - }; - } // namespace detail - - template - inline constexpr bool operator==(const variant &lhs, - const variant &rhs) { - using detail::visitation::variant; - using equal_to = detail::convert_to_bool; -#ifdef TF_CPP14_CONSTEXPR - if (lhs.index() != rhs.index()) return false; - if (lhs.valueless_by_exception()) return true; - return variant::visit_value_at(lhs.index(), equal_to{}, lhs, rhs); -#else - return lhs.index() == rhs.index() && - (lhs.valueless_by_exception() || - variant::visit_value_at(lhs.index(), equal_to{}, lhs, rhs)); -#endif - } - - template - inline constexpr bool operator!=(const variant &lhs, - const variant &rhs) { - using detail::visitation::variant; - using not_equal_to = detail::convert_to_bool; -#ifdef TF_CPP14_CONSTEXPR - if (lhs.index() != rhs.index()) return true; - if (lhs.valueless_by_exception()) return false; - return variant::visit_value_at(lhs.index(), not_equal_to{}, lhs, rhs); -#else - return lhs.index() != rhs.index() || - (!lhs.valueless_by_exception() && - variant::visit_value_at(lhs.index(), not_equal_to{}, lhs, rhs)); -#endif - } - - template - inline constexpr bool operator<(const variant &lhs, - const variant &rhs) { - using detail::visitation::variant; - using less = detail::convert_to_bool; -#ifdef TF_CPP14_CONSTEXPR - if (rhs.valueless_by_exception()) return false; - if (lhs.valueless_by_exception()) return true; - if (lhs.index() < rhs.index()) return true; - if (lhs.index() > rhs.index()) return false; - return variant::visit_value_at(lhs.index(), less{}, lhs, rhs); -#else - return !rhs.valueless_by_exception() && - (lhs.valueless_by_exception() || lhs.index() < rhs.index() || - (lhs.index() == rhs.index() && - variant::visit_value_at(lhs.index(), less{}, lhs, rhs))); -#endif - } - - template - inline constexpr bool operator>(const variant &lhs, - const variant &rhs) { - using detail::visitation::variant; - using greater = detail::convert_to_bool; -#ifdef TF_CPP14_CONSTEXPR - if (lhs.valueless_by_exception()) return false; - if (rhs.valueless_by_exception()) return true; - if (lhs.index() > rhs.index()) return true; - if (lhs.index() < rhs.index()) return false; - return variant::visit_value_at(lhs.index(), greater{}, lhs, rhs); -#else - return !lhs.valueless_by_exception() && - (rhs.valueless_by_exception() || lhs.index() > rhs.index() || - (lhs.index() == rhs.index() && - variant::visit_value_at(lhs.index(), greater{}, lhs, rhs))); -#endif - } - - template - inline constexpr bool operator<=(const variant &lhs, - const variant &rhs) { - using detail::visitation::variant; - using less_equal = detail::convert_to_bool; -#ifdef TF_CPP14_CONSTEXPR - if (lhs.valueless_by_exception()) return true; - if (rhs.valueless_by_exception()) return false; - if (lhs.index() < rhs.index()) return true; - if (lhs.index() > rhs.index()) return false; - return variant::visit_value_at(lhs.index(), less_equal{}, lhs, rhs); -#else - return lhs.valueless_by_exception() || - (!rhs.valueless_by_exception() && - (lhs.index() < rhs.index() || - (lhs.index() == rhs.index() && - variant::visit_value_at(lhs.index(), less_equal{}, lhs, rhs)))); -#endif - } - - template - inline constexpr bool operator>=(const variant &lhs, - const variant &rhs) { - using detail::visitation::variant; - using greater_equal = detail::convert_to_bool; -#ifdef TF_CPP14_CONSTEXPR - if (rhs.valueless_by_exception()) return true; - if (lhs.valueless_by_exception()) return false; - if (lhs.index() > rhs.index()) return true; - if (lhs.index() < rhs.index()) return false; - return variant::visit_value_at(lhs.index(), greater_equal{}, lhs, rhs); -#else - return rhs.valueless_by_exception() || - (!lhs.valueless_by_exception() && - (lhs.index() > rhs.index() || - (lhs.index() == rhs.index() && - variant::visit_value_at( - lhs.index(), greater_equal{}, lhs, rhs)))); -#endif - } - - struct monostate {}; - - inline constexpr bool operator<(monostate, monostate) noexcept { - return false; - } - - inline constexpr bool operator>(monostate, monostate) noexcept { - return false; - } - - inline constexpr bool operator<=(monostate, monostate) noexcept { - return true; - } - - inline constexpr bool operator>=(monostate, monostate) noexcept { - return true; - } - - inline constexpr bool operator==(monostate, monostate) noexcept { - return true; - } - - inline constexpr bool operator!=(monostate, monostate) noexcept { - return false; - } - -#ifdef TF_CPP14_CONSTEXPR - namespace detail { - - inline constexpr bool any(std::initializer_list bs) { - for (bool b : bs) { - if (b) { - return true; - } - } - return false; - } - - } // namespace detail - - template - inline constexpr decltype(auto) visit(Visitor &&visitor, Vs &&... vs) { - return (!detail::any({vs.valueless_by_exception()...}) - ? (void)0 - : throw_bad_variant_access()), - detail::visitation::variant::visit_value( - lib::forward(visitor), lib::forward(vs)...); - } -#else - namespace detail { - - template - inline constexpr bool all_impl(const lib::array &bs, - std::size_t idx) { - return idx >= N || (bs[idx] && all_impl(bs, idx + 1)); - } - - template - inline constexpr bool all(const lib::array &bs) { - return all_impl(bs, 0); - } - - } // namespace detail - - template - inline constexpr DECLTYPE_AUTO visit(Visitor &&visitor, Vs &&... vs) - DECLTYPE_AUTO_RETURN( - (detail::all( - lib::array{{!vs.valueless_by_exception()...}}) - ? (void)0 - : throw_bad_variant_access()), - detail::visitation::variant::visit_value(lib::forward(visitor), - lib::forward(vs)...)) -#endif - - template - inline auto swap(variant &lhs, - variant &rhs) noexcept(noexcept(lhs.swap(rhs))) - -> decltype(lhs.swap(rhs)) { - lhs.swap(rhs); - } - - namespace detail { - - template - using enabled_type = T; - - namespace hash { - - template - constexpr bool meets_requirements() noexcept { - return std::is_copy_constructible::value && - std::is_move_constructible::value && - lib::is_invocable_r::value; - } - - template - constexpr bool is_enabled() noexcept { - using H = std::hash; - return meets_requirements() && - std::is_default_constructible::value && - std::is_copy_assignable::value && - std::is_move_assignable::value; - } - - } // namespace hash - - } // namespace detail - -#undef AUTO -#undef AUTO_RETURN - -#undef AUTO_REFREF -#undef AUTO_REFREF_RETURN - -#undef DECLTYPE_AUTO -#undef DECLTYPE_AUTO_RETURN - -}} // namespace tf::nstd - -namespace std { - - template - struct hash, - tf::nstd::lib::enable_if_t>()...>::value>>> { - using argument_type = tf::nstd::variant; - using result_type = std::size_t; - - inline result_type operator()(const argument_type &v) const { - using tf::nstd::detail::visitation::variant; - std::size_t result = - v.valueless_by_exception() - ? 299792458 // Random value chosen by the universe upon creation - : variant::visit_alt( -#ifdef TF_GENERIC_LAMBDAS - [](const auto &alt) { - using alt_type = tf::nstd::lib::decay_t; - using value_type = tf::nstd::lib::remove_const_t< - typename alt_type::value_type>; - return hash{}(alt.value); - } -#else - hasher{} -#endif - , - v); - return hash_combine(result, hash{}(v.index())); - } - - private: -#ifndef TF_GENERIC_LAMBDAS - struct hasher { - template - inline std::size_t operator()(const Alt &alt) const { - using alt_type = tf::nstd::lib::decay_t; - using value_type = - tf::nstd::lib::remove_const_t; - return hash{}(alt.value); - } - }; -#endif - - static std::size_t hash_combine(std::size_t lhs, std::size_t rhs) { - return lhs ^= rhs + 0x9e3779b9 + (lhs << 6) + (lhs >> 2); - } - }; - - template <> - struct hash { - using argument_type = tf::nstd::monostate; - using result_type = std::size_t; - - inline result_type operator()(const argument_type &) const noexcept { - return 66740831; // return a fundamentally attractive random value. - } - }; - -} // namespace std - - - diff --git a/bundled/taskflow-2.5.0/include/taskflow/taskflow.hpp b/bundled/taskflow-2.5.0/include/taskflow/taskflow.hpp deleted file mode 100644 index 192f8a8cfd..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/taskflow.hpp +++ /dev/null @@ -1,13 +0,0 @@ -#pragma once - -#include "core/executor.hpp" - -namespace tf { - - -} // end of namespace tf. --------------------------------------------------- - - - - - diff --git a/bundled/taskflow-2.5.0/include/taskflow/utility/os.hpp b/bundled/taskflow-2.5.0/include/taskflow/utility/os.hpp deleted file mode 100644 index c211d87ec3..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/utility/os.hpp +++ /dev/null @@ -1,27 +0,0 @@ -#include -#include -#include - -namespace tf { - -// Function: get_env -inline std::string get_env(const std::string& str) { -#ifdef _MSC_VER - char *ptr = nullptr; - size_t len = 0; - - if(_dupenv_s(&ptr, &len, str.c_str()) == 0 && ptr != nullptr) { - std::string res(ptr, len); - free(ptr); - return res; - } - return ""; - -#else - auto ptr = std::getenv(str.c_str()); - return ptr ? ptr : ""; -#endif -} - - -} // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-2.5.0/include/taskflow/utility/passive_vector.hpp b/bundled/taskflow-2.5.0/include/taskflow/utility/passive_vector.hpp deleted file mode 100644 index b5710c054d..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/utility/passive_vector.hpp +++ /dev/null @@ -1,213 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -namespace tf { - -// Class: PassiveVector -// A vector storing only passive data structure (PDS) or POD data type. -template > -class PassiveVector { - - static_assert( - std::is_trivial::value && std::is_standard_layout::value, - "must be a plain old data type" - ); - - public: - - typedef T value_type; - typedef T & reference; - typedef const T & const_reference; - typedef T * pointer; - typedef const T * const_pointer; - typedef T * iterator; - typedef const T * const_iterator; - typedef std::reverse_iterator reverse_iterator; - typedef std::reverse_iterator const_reverse_iterator; - typedef ptrdiff_t difference_type; - typedef size_t size_type; - - PassiveVector() noexcept : - _data {reinterpret_cast(_stack)}, - _num {0}, - _cap {S} { - } - - explicit PassiveVector(size_type n) : _num {n} { - - // need to place on heap - if(n > S) { - _cap = n << 2; - _data = _allocator.allocate(_cap); - } - // stack - else { - _cap = S; - _data = reinterpret_cast(_stack); - } - - } - - PassiveVector(const PassiveVector& rhs) : _num {rhs._num} { - - // heap - if(rhs._num > S) { - _cap = rhs._cap; - _data = _allocator.allocate(rhs._cap); - } - else { - _cap = S; - _data = reinterpret_cast(_stack); - } - - std::memcpy(_data, rhs._data, _num * sizeof(T)); - } - - PassiveVector(PassiveVector&& rhs) : _num {rhs._num} { - - // rhs is in the stack - if(rhs.in_stack()) { - _cap = S; - _data = reinterpret_cast(_stack); - std::memcpy(_stack, rhs._stack, rhs._num*sizeof(T)); - } - // rhs is in the heap - else { - _cap = rhs._cap; - _data = rhs._data; - rhs._data = reinterpret_cast(rhs._stack); - rhs._cap = S; - } - - rhs._num = 0; - } - - ~PassiveVector() { - if(!in_stack()) { - _allocator.deallocate(_data, _cap); - } - } - - iterator begin() noexcept { return _data; } - const_iterator begin() const noexcept { return _data; } - const_iterator cbegin() const noexcept { return _data; } - iterator end() noexcept { return _data + _num; } - const_iterator end() const noexcept { return _data + _num; } - const_iterator cend() const noexcept { return _data + _num; } - - reverse_iterator rbegin() noexcept { return _data + _num; } - const_reverse_iterator crbegin() const noexcept { return _data + _num; } - reverse_iterator rend() noexcept { return _data; } - const_reverse_iterator crend() const noexcept { return _data; } - - reference operator [] (size_type idx) { return _data[idx]; } - const_reference operator [] (size_type idx) const { return _data[idx]; } - - reference at(size_type pos) { - if(pos >= _num) { - throw std::out_of_range("accessed position is out of range"); - } - return this->operator[](pos); - } - - const_reference at(size_type pos) const { - if(pos >= _num) { - throw std::out_of_range("accessed position is out of range"); - } - return this->operator[](pos); - } - - - reference front() { return _data[0]; } - const_reference front() const { return _data[0]; } - reference back() { return _data[_num-1]; } - const_reference back() const { return _data[_num-1]; } - - pointer data() noexcept { return _data; } - const_pointer data() const noexcept { return _data; } - - void push_back(const T& item) { - if(_num == _cap) { - _enlarge(_cap << 1); - } - _data[_num++] = item; - } - - void push_back(T&& item) { - if(_num == _cap) { - _enlarge(_cap << 1); - } - _data[_num++] = item; - } - - void pop_back() { - if(_num > 0) { - --_num; - } - } - - void clear() { - _num = 0; - } - - void resize(size_type N) { - if(N > _cap) { - _enlarge(N<<1); - } - _num = N; - } - - void reserve(size_type C) { - if(C > _cap) { - _enlarge(C); - } - } - - bool empty() const { return _num == 0; } - bool in_stack() const { return _data == reinterpret_cast(_stack); } - - size_type size() const { return _num; } - size_type capacity() const { return _cap; } - size_type max_size() const { return std::numeric_limits::max(); } - - bool operator == (const PassiveVector& rhs) const { - if(_num != rhs._num) { - return false; - } - return std::memcmp(_data, rhs._data, _num * sizeof(T)) == 0; - } - - private: - - char _stack[S*sizeof(T)]; - - T* _data; - - size_type _num; - size_type _cap; - - A _allocator; - - void _enlarge(size_type new_cap) { - - auto new_data = _allocator.allocate(new_cap); - - std::memcpy(new_data, _data, sizeof(T) * _num); - - if(!in_stack()) { - _allocator.deallocate(_data, _cap); - } - - _cap = new_cap; - _data = new_data; - } -}; - - -} // end of namespace tf. ---------------------------------------------------- - diff --git a/bundled/taskflow-2.5.0/include/taskflow/utility/stringify.hpp b/bundled/taskflow-2.5.0/include/taskflow/utility/stringify.hpp deleted file mode 100644 index 6efd1a8c2b..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/utility/stringify.hpp +++ /dev/null @@ -1,21 +0,0 @@ -#pragma once - -#include -#include - -namespace tf { - -// Procedure: stringify -template -void ostreamize(std::ostringstream& oss, T&& token) { - oss << std::forward(token); -} - -// Procedure: stringify -template -void ostreamize(std::ostringstream& oss, T&& token, Rest&&... rest) { - oss << std::forward(token); - ostreamize(oss, std::forward(rest)...); -} - -} // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-2.5.0/include/taskflow/utility/traits.hpp b/bundled/taskflow-2.5.0/include/taskflow/utility/traits.hpp deleted file mode 100644 index 1250ddfa45..0000000000 --- a/bundled/taskflow-2.5.0/include/taskflow/utility/traits.hpp +++ /dev/null @@ -1,308 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../nstd/variant.hpp" - -namespace tf { - -//----------------------------------------------------------------------------- -// Traits -//----------------------------------------------------------------------------- - -// Macro to check whether a class has a member function -#define define_has_member(member_name) \ -template \ -class has_member_##member_name \ -{ \ - typedef char yes_type; \ - typedef long no_type; \ - template static yes_type test(decltype(&U::member_name)); \ - template static no_type test(...); \ - public: \ - static constexpr bool value = sizeof(test(0)) == sizeof(yes_type); \ -} - -#define has_member(class_, member_name) has_member_##member_name::value - -// Struct: dependent_false -template -struct dependent_false { - static constexpr bool value = false; -}; - -template -constexpr auto dependent_false_v = dependent_false::value; - -//----------------------------------------------------------------------------- -// Move-On-Copy -//----------------------------------------------------------------------------- - -// Struct: MoC -template -struct MoC { - - MoC(T&& rhs) : object(std::move(rhs)) {} - MoC(const MoC& other) : object(std::move(other.object)) {} - - T& get() { return object; } - - mutable T object; -}; - -template -auto make_moc(T&& m) { - return MoC(std::forward(m)); -} - -//----------------------------------------------------------------------------- -// Functors. -//----------------------------------------------------------------------------- - -//// Overloadded. -//template -//struct Functors : Ts... { -// using Ts::operator()... ; -//}; -// -//template -//Functors(Ts...) -> Functors; - -// ---------------------------------------------------------------------------- -// callable traits -// ---------------------------------------------------------------------------- - -template -struct is_invocable : - std::is_constructible< - std::function, - std::reference_wrapper::type> - > { -}; - -template -constexpr bool is_invocable_v = is_invocable::value; - -template -struct is_invocable_r : - std::is_constructible< - std::function, - std::reference_wrapper::type> - > { -}; - -template -constexpr bool is_invocable_r_v = is_invocable_r::value; - - -// ---------------------------------------------------------------------------- -// Function Traits -// reference: https://github.com/ros2/rclcpp -// ---------------------------------------------------------------------------- - -template -struct tuple_tail; - -template -struct tuple_tail> { - using type = std::tuple; -}; - -// std::function -template -struct function_traits -{ - using arguments = typename tuple_tail< - typename function_traits::argument_tuple_type - >::type; - - static constexpr size_t arity = std::tuple_size::value; - - template - struct argument { - static_assert(N < arity, "error: invalid parameter index."); - using type = std::tuple_element_t; - }; - - template - using argument_t = typename argument::type; - - using return_type = typename function_traits::return_type; -}; - -// Free functions -template -struct function_traits { - - using return_type = R; - using argument_tuple_type = std::tuple; - - static constexpr size_t arity = sizeof...(Args); - - template - struct argument { - static_assert(N < arity, "error: invalid parameter index."); - using type = std::tuple_element_t>; - }; - - template - using argument_t = typename argument::type; -}; - -// function pointer -template -struct function_traits : function_traits { -}; - -// function reference -template -struct function_traits : function_traits { -}; - -// immutable lambda -template -struct function_traits - : function_traits -{}; - -// mutable lambda -template -struct function_traits - : function_traits -{}; - -/*// std::bind for object methods -template -#if defined _LIBCPP_VERSION // libc++ (Clang) -struct function_traits> -#elif defined _GLIBCXX_RELEASE // glibc++ (GNU C++ >= 7.1) -struct function_traits> -#elif defined __GLIBCXX__ // glibc++ (GNU C++) -struct function_traits(FArgs ...)>> -#elif defined _MSC_VER // MS Visual Studio -struct function_traits< - std::_Binder> -#else -#error "Unsupported C++ compiler / standard library" -#endif - : function_traits -{}; - -// std::bind for object const methods -template -#if defined _LIBCPP_VERSION // libc++ (Clang) -struct function_traits> -#elif defined _GLIBCXX_RELEASE // glibc++ (GNU C++ >= 7.1) -struct function_traits> -#elif defined __GLIBCXX__ // glibc++ (GNU C++) -struct function_traits(FArgs ...)>> -#elif defined _MSC_VER // MS Visual Studio -struct function_traits< - std::_Binder> -#else -#error "Unsupported C++ compiler / standard library" -#endif - : function_traits -{}; - -// std::bind for free functions -template -#if defined _LIBCPP_VERSION // libc++ (Clang) -struct function_traits> -#elif defined __GLIBCXX__ // glibc++ (GNU C++) -struct function_traits> -#elif defined _MSC_VER // MS Visual Studio -struct function_traits> -#else -#error "Unsupported C++ compiler / standard library" -#endif - : function_traits -{}; */ - -// decay to the raw type -template -struct function_traits : function_traits {}; - -template -struct function_traits : function_traits {}; - - -// ---------------------------------------------------------------------------- -// nstd::variant -// ---------------------------------------------------------------------------- -template -struct get_index; - -template -struct get_index_impl {}; - -template -struct get_index_impl : std::integral_constant{}; - -template -struct get_index_impl : get_index_impl{}; - -template -struct get_index> : get_index_impl<0, T, Ts...>{}; - -template -constexpr auto get_index_v = get_index::value; - -// ---------------------------------------------------------------------------- -// is_pod -//----------------------------------------------------------------------------- -template -struct is_pod { - static const bool value = std::is_trivial::value && - std::is_standard_layout::value; -}; - -template -constexpr bool is_pod_v = is_pod::value; - -// ---------------------------------------------------------------------------- -// bit_cast -//----------------------------------------------------------------------------- -template -typename std::enable_if< - (sizeof(To) == sizeof(From)) && - std::is_trivially_copyable::value && - std::is_trivial::value, - // this implementation requires that To is trivially default constructible - To ->::type -// constexpr support needs compiler magic -bit_cast(const From &src) noexcept { - To dst; - std::memcpy(&dst, &src, sizeof(To)); - return dst; -} - -} // end of namespace tf. --------------------------------------------------- - - - diff --git a/bundled/taskflow-2.5.0/LICENSE b/bundled/taskflow-3.6.0/LICENSE similarity index 84% rename from bundled/taskflow-2.5.0/LICENSE rename to bundled/taskflow-3.6.0/LICENSE index 871f579f65..37524bc431 100644 --- a/bundled/taskflow-2.5.0/LICENSE +++ b/bundled/taskflow-3.6.0/LICENSE @@ -1,9 +1,6 @@ -Taskflow LICENSE +TASKFLOW MIT LICENSE -Copyright (c) 2018-2020 T.-W. Huang, C.-X. Lin, G. Guo, and M. Wong - -University of Utah, Salt Lake City, UT, USA -University of Illinois at Urbana-Champaign, IL, USA +Copyright (c) 2018-2022 Dr. Tsung-Wei Huang Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/bundled/taskflow-3.6.0/README.md b/bundled/taskflow-3.6.0/README.md new file mode 100644 index 0000000000..cbb21a0757 --- /dev/null +++ b/bundled/taskflow-3.6.0/README.md @@ -0,0 +1,450 @@ +# Taskflow + + +[![Ubuntu](https://github.com/taskflow/taskflow/workflows/Ubuntu/badge.svg)](https://github.com/taskflow/taskflow/actions?query=workflow%3AUbuntu) +[![macOS](https://github.com/taskflow/taskflow/workflows/macOS/badge.svg)](https://github.com/taskflow/taskflow/actions?query=workflow%3AmacOS) +[![Windows](https://github.com/taskflow/taskflow/workflows/Windows/badge.svg)](https://github.com/taskflow/taskflow/actions?query=workflow%3AWindows) +[![Wiki](image/api-doc.svg)][documentation] +[![TFProf](image/tfprof.svg)](https://taskflow.github.io/tfprof/) +[![Cite](image/cite-tpds.svg)][TPDS21] + +Taskflow helps you quickly write parallel and heterogeneous task programs in modern C++ + +# Why Taskflow? + +Taskflow is faster, more expressive, and easier for drop-in integration +than many of existing task programming frameworks +in handling complex parallel workloads. + +![](image/performance.png) + +Taskflow lets you quickly implement task decomposition strategies +that incorporate both regular and irregular compute patterns, +together with an efficient *work-stealing* scheduler to optimize your multithreaded performance. + +| [Static Tasking](#get-started-with-taskflow) | [Dynamic Tasking](#dynamic-tasking) | +| :------------: | :-------------: | +| ![](image/static_graph.svg) | | + +Taskflow supports conditional tasking for you to make rapid control-flow decisions +across dependent tasks to implement cycles and conditions that were otherwise difficult to do +with existing tools. + +| [Conditional Tasking](#conditional-tasking) | +| :-----------------: | +| ![](image/condition.svg) | + +Taskflow is composable. You can create large parallel graphs through +composition of modular and reusable blocks that are easier to optimize +at an individual scope. + +| [Taskflow Composition](#composable-tasking) | +| :---------------: | +|![](image/framework.svg)| + +Taskflow supports heterogeneous tasking for you to +accelerate a wide range of scientific computing applications +by harnessing the power of CPU-GPU collaborative computing. + +| [Concurrent CPU-GPU Tasking](#concurrent-cpu-gpu-tasking) | +| :-----------------: | +| ![](image/cudaflow.svg) | + + +Taskflow provides visualization and tooling needed for profiling Taskflow programs. + +| [Taskflow Profiler](https://taskflow.github.io/tfprof) | +| :-----------------: | +| ![](image/tfprof.png) | + +We are committed to support trustworthy developments for both academic and industrial research projects +in parallel computing. Check out [Who is Using Taskflow](https://taskflow.github.io/#tag_users) and what our users say: + ++ *"Taskflow is the cleanest Task API I've ever seen." [Damien Hocking @Corelium Inc](http://coreliuminc.com)* ++ *"Taskflow has a very simple and elegant tasking interface. The performance also scales very well." [Glen Fraser][totalgee]* ++ *"Taskflow lets me handle parallel processing in a smart way." [Hayabusa @Learning](https://cpp-learning.com/cpp-taskflow/)* ++ *"Taskflow improves the throughput of our graph engine in just a few hours of coding." [Jean-Michaël @KDAB](https://ossia.io/)* ++ *"Best poster award for open-source parallel programming library." [Cpp Conference 2018][Cpp Conference 2018]* ++ *"Second Prize of Open-source Software Competition." [ACM Multimedia Conference 2019](https://tsung-wei-huang.github.io/img/mm19-ossc-award.jpg)* + +See a quick [presentation](https://taskflow.github.io/) and +visit the [documentation][documentation] to learn more about Taskflow. +Technical details can be referred to our [IEEE TPDS paper][TPDS21]. + +# Start Your First Taskflow Program + +The following program (`simple.cpp`) creates four tasks +`A`, `B`, `C`, and `D`, where `A` runs before `B` and `C`, and `D` +runs after `B` and `C`. +When `A` finishes, `B` and `C` can run in parallel. + + + +```cpp +#include // Taskflow is header-only + +int main(){ + + tf::Executor executor; + tf::Taskflow taskflow; + + auto [A, B, C, D] = taskflow.emplace( // create four tasks + [] () { std::cout << "TaskA\n"; }, + [] () { std::cout << "TaskB\n"; }, + [] () { std::cout << "TaskC\n"; }, + [] () { std::cout << "TaskD\n"; } + ); + + A.precede(B, C); // A runs before B and C + D.succeed(B, C); // D runs after B and C + + executor.run(taskflow).wait(); + + return 0; +} +``` + +Taskflow is *header-only* and there is no wrangle with installation. +To compile the program, clone the Taskflow project and +tell the compiler to include the [headers](./taskflow/). + +```bash +~$ git clone https://github.com/taskflow/taskflow.git # clone it only once +~$ g++ -std=c++17 examples/simple.cpp -I. -O2 -pthread -o simple +~$ ./simple +TaskA +TaskC +TaskB +TaskD +``` + +# Visualize Your First Taskflow Program + +Taskflow comes with a built-in profiler, +[TFProf](https://taskflow.github.io/tfprof/), +for you to profile and visualize taskflow programs +in an easy-to-use web-based interface. + +![](doxygen/images/tfprof.png) + +```bash +# run the program with the environment variable TF_ENABLE_PROFILER enabled +~$ TF_ENABLE_PROFILER=simple.json ./simple +~$ cat simple.json +[ +{"executor":"0","data":[{"worker":0,"level":0,"data":[{"span":[172,186],"name":"0_0","type":"static"},{"span":[187,189],"name":"0_1","type":"static"}]},{"worker":2,"level":0,"data":[{"span":[93,164],"name":"2_0","type":"static"},{"span":[170,179],"name":"2_1","type":"static"}]}]} +] +# paste the profiling json data to https://taskflow.github.io/tfprof/ +``` + +In addition to execution diagram, you can dump the graph to a DOT format +and visualize it using a number of free [GraphViz][GraphViz] tools. + +``` +// dump the taskflow graph to a DOT format through std::cout +taskflow.dump(std::cout); +``` + +

+ +# Express Task Graph Parallelism + +Taskflow empowers users with both static and dynamic task graph constructions +to express end-to-end parallelism in a task graph that +embeds in-graph control flow. + +1. [Create a Subflow Graph](#create-a-subflow-graph) +2. [Integrate Control Flow to a Task Graph](#integrate-control-flow-to-a-task-graph) +3. [Offload a Task to a GPU](#offload-a-task-to-a-gpu) +4. [Compose Task Graphs](#compose-task-graphs) +5. [Launch Asynchronous Tasks](#launch-asynchronous-tasks) +6. [Execute a Taskflow](#execute-a-taskflow) +7. [Leverage Standard Parallel Algorithms](#leverage-standard-parallel-algorithms) + +## Create a Subflow Graph + +Taskflow supports *dynamic tasking* for you to create a subflow +graph from the execution of a task to perform dynamic parallelism. +The following program spawns a task dependency graph parented at task `B`. + +```cpp +tf::Task A = taskflow.emplace([](){}).name("A"); +tf::Task C = taskflow.emplace([](){}).name("C"); +tf::Task D = taskflow.emplace([](){}).name("D"); + +tf::Task B = taskflow.emplace([] (tf::Subflow& subflow) { + tf::Task B1 = subflow.emplace([](){}).name("B1"); + tf::Task B2 = subflow.emplace([](){}).name("B2"); + tf::Task B3 = subflow.emplace([](){}).name("B3"); + B3.succeed(B1, B2); // B3 runs after B1 and B2 +}).name("B"); + +A.precede(B, C); // A runs before B and C +D.succeed(B, C); // D runs after B and C +``` + +

+ +## Integrate Control Flow to a Task Graph + +Taskflow supports *conditional tasking* for you to make rapid +control-flow decisions across dependent tasks to implement cycles +and conditions in an *end-to-end* task graph. + +```cpp +tf::Task init = taskflow.emplace([](){}).name("init"); +tf::Task stop = taskflow.emplace([](){}).name("stop"); + +// creates a condition task that returns a random binary +tf::Task cond = taskflow.emplace( + [](){ return std::rand() % 2; } +).name("cond"); + +init.precede(cond); + +// creates a feedback loop {0: cond, 1: stop} +cond.precede(cond, stop); +``` + +

+ + +## Offload a Task to a GPU + +Taskflow supports GPU tasking for you to accelerate a wide range of scientific computing applications by harnessing the power of CPU-GPU collaborative computing using CUDA. + +```cpp +__global__ void saxpy(size_t N, float alpha, float* dx, float* dy) { + int i = blockIdx.x*blockDim.x + threadIdx.x; + if (i < n) { + y[i] = a*x[i] + y[i]; + } +} +tf::Task cudaflow = taskflow.emplace([&](tf::cudaFlow& cf) { + + // data copy tasks + tf::cudaTask h2d_x = cf.copy(dx, hx.data(), N).name("h2d_x"); + tf::cudaTask h2d_y = cf.copy(dy, hy.data(), N).name("h2d_y"); + tf::cudaTask d2h_x = cf.copy(hx.data(), dx, N).name("d2h_x"); + tf::cudaTask d2h_y = cf.copy(hy.data(), dy, N).name("d2h_y"); + + // kernel task with parameters to launch the saxpy kernel + tf::cudaTask saxpy = cf.kernel( + (N+255)/256, 256, 0, saxpy, N, 2.0f, dx, dy + ).name("saxpy"); + + saxpy.succeed(h2d_x, h2d_y) + .precede(d2h_x, d2h_y); +}).name("cudaFlow"); +``` + +

+ +## Compose Task Graphs + +Taskflow is composable. +You can create large parallel graphs through composition of modular +and reusable blocks that are easier to optimize at an individual scope. + +```cpp +tf::Taskflow f1, f2; + +// create taskflow f1 of two tasks +tf::Task f1A = f1.emplace([]() { std::cout << "Task f1A\n"; }) + .name("f1A"); +tf::Task f1B = f1.emplace([]() { std::cout << "Task f1B\n"; }) + .name("f1B"); + +// create taskflow f2 with one module task composed of f1 +tf::Task f2A = f2.emplace([]() { std::cout << "Task f2A\n"; }) + .name("f2A"); +tf::Task f2B = f2.emplace([]() { std::cout << "Task f2B\n"; }) + .name("f2B"); +tf::Task f2C = f2.emplace([]() { std::cout << "Task f2C\n"; }) + .name("f2C"); + +tf::Task f1_module_task = f2.composed_of(f1) + .name("module"); + +f1_module_task.succeed(f2A, f2B) + .precede(f2C); +``` + +

+ +## Launch Asynchronous Tasks + +Taskflow supports *asynchronous* tasking. +You can launch tasks asynchronously to incorporate independent, dynamic +parallelism in your taskflows. + +```cpp +tf::Executor executor; +tf::Taskflow taskflow; + +// create asynchronous tasks directly from an executor +tf::Future> future = executor.async([](){ + std::cout << "async task returns 1\n"; + return 1; +}); +executor.silent_async([](){ std::cout << "async task of no return\n"; }); + +// launch an asynchronous task from a running task +taskflow.emplace([&](){ + executor.async([](){ std::cout << "async task within a task\n"; }); +}); + +executor.run(taskflow).wait(); +``` + +## Execute a Taskflow + +The executor provides several *thread-safe* methods to run a taskflow. +You can run a taskflow once, multiple times, or until a stopping criteria is met. +These methods are non-blocking with a `tf::Future` return +to let you query the execution status. + +```cpp +// runs the taskflow once +tf::Future run_once = executor.run(taskflow); + +// wait on this run to finish +run_once.get(); + +// run the taskflow four times +executor.run_n(taskflow, 4); + +// runs the taskflow five times +executor.run_until(taskflow, [counter=5](){ return --counter == 0; }); + +// block the executor until all submitted taskflows complete +executor.wait_for_all(); +``` + +## Leverage Standard Parallel Algorithms + +Taskflow defines algorithms for you to quickly express common parallel +patterns using standard C++ syntaxes, +such as parallel iterations, parallel reductions, and parallel sort. + +```cpp +// standard parallel CPU algorithms +tf::Task task1 = taskflow.for_each( // assign each element to 100 in parallel + first, last, [] (auto& i) { i = 100; } +); +tf::Task task2 = taskflow.reduce( // reduce a range of items in parallel + first, last, init, [] (auto a, auto b) { return a + b; } +); +tf::Task task3 = taskflow.sort( // sort a range of items in parallel + first, last, [] (auto a, auto b) { return a < b; } +); + +// standard parallel GPU algorithms +tf::cudaTask cuda1 = cudaflow.for_each( // assign each element to 100 on GPU + dfirst, dlast, [] __device__ (auto i) { i = 100; } +); +tf::cudaTask cuda2 = cudaflow.reduce( // reduce a range of items on GPU + dfirst, dlast, init, [] __device__ (auto a, auto b) { return a + b; } +); +tf::cudaTask cuda3 = cudaflow.sort( // sort a range of items on GPU + dfirst, dlast, [] __device__ (auto a, auto b) { return a < b; } +); +``` + +Additionally, Taskflow provides composable graph building blocks for you to +efficiently implement common parallel algorithms, such as parallel pipeline. + +```cpp +// create a pipeline to propagate five tokens through three serial stages +tf::Pipeline pl(num_parallel_lines, + tf::Pipe{tf::PipeType::SERIAL, [](tf::Pipeflow& pf) { + if(pf.token() == 5) { + pf.stop(); + } + }}, + tf::Pipe{tf::PipeType::SERIAL, [](tf::Pipeflow& pf) { + printf("stage 2: input buffer[%zu] = %d\n", pf.line(), buffer[pf.line()]); + }}, + tf::Pipe{tf::PipeType::SERIAL, [](tf::Pipeflow& pf) { + printf("stage 3: input buffer[%zu] = %d\n", pf.line(), buffer[pf.line()]); + }} +); +taskflow.composed_of(pl) +executor.run(taskflow).wait(); +``` + + +# Supported Compilers + +To use Taskflow, you only need a compiler that supports C++17: + ++ GNU C++ Compiler at least v8.4 with -std=c++17 ++ Clang C++ Compiler at least v6.0 with -std=c++17 ++ Microsoft Visual Studio at least v19.27 with /std:c++17 ++ AppleClang Xode Version at least v12.0 with -std=c++17 ++ Nvidia CUDA Toolkit and Compiler (nvcc) at least v11.1 with -std=c++17 ++ Intel C++ Compiler at least v19.0.1 with -std=c++17 ++ Intel DPC++ Clang Compiler at least v13.0.0 with -std=c++17 and SYCL20 + +Taskflow works on Linux, Windows, and Mac OS X. + +# Learn More about Taskflow + +Visit our [project website][Project Website] and [documentation][documentation] +to learn more about Taskflow. To get involved: + + + See [release notes][release notes] to stay up-to-date with newest versions + + Read the step-by-step tutorial at [cookbook][cookbook] + + Submit an issue at [GitHub issues][GitHub issues] + + Find out our technical details at [references][references] + + Watch our technical talks at YouTube + +| [CppCon20 Tech Talk][cppcon20 talk] | [MUC++ Tech Talk](https://www.youtube.com/watch?v=u8Mc_WgGwVY) | +| :------------: | :-------------: | +| ![](doxygen/images/cppcon20-thumbnail.jpg) | | + +We are committed to support trustworthy developments for +both academic and industrial research projects in parallel +and heterogeneous computing. +If you are using Taskflow, please cite the following paper we publised at 2021 IEEE TPDS: + ++ Tsung-Wei Huang, Dian-Lun Lin, Chun-Xun Lin, and Yibo Lin, "[Taskflow: A Lightweight Parallel and Heterogeneous Task Graph Computing System](https://tsung-wei-huang.github.io/papers/tpds21-taskflow.pdf)," IEEE Transactions on Parallel and Distributed Systems (TPDS), vol. 33, no. 6, pp. 1303-1320, June 2022 + +More importantly, we appreciate all Taskflow [contributors][contributors] and +the following organizations for sponsoring the Taskflow project! + +| | | | | +|:-------------------------:|:-------------------------:|:-------------------------:|:-------------------------:| +|| | | | +| | | | | + +# License + +Taskflow is licensed with the [MIT License](./LICENSE). +You are completely free to re-distribute your work derived from Taskflow. + +* * * + +[Tsung-Wei Huang]: https://tsung-wei-huang.github.io/ +[GitHub releases]: https://github.com/taskflow/taskflow/releases +[GitHub issues]: https://github.com/taskflow/taskflow/issues +[GitHub insights]: https://github.com/taskflow/taskflow/pulse +[GitHub pull requests]: https://github.com/taskflow/taskflow/pulls +[GraphViz]: https://www.graphviz.org/ +[Project Website]: https://taskflow.github.io/ +[cppcon20 talk]: https://www.youtube.com/watch?v=MX15huP5DsM +[contributors]: https://taskflow.github.io/taskflow/contributors.html +[totalgee]: https://github.com/totalgee +[NSF]: https://www.nsf.gov/ +[UIUC]: https://illinois.edu/ +[CSL]: https://csl.illinois.edu/ +[UofU]: https://www.utah.edu/ +[documentation]: https://taskflow.github.io/taskflow/index.html +[release notes]: https://taskflow.github.io/taskflow/Releases.html +[cookbook]: https://taskflow.github.io/taskflow/pages.html +[references]: https://taskflow.github.io/taskflow/References.html +[PayMe]: https://www.paypal.me/twhuang/10 +[email me]: mailto:twh760812@gmail.com +[Cpp Conference 2018]: https://github.com/CppCon/CppCon2018 +[TPDS21]: https://tsung-wei-huang.github.io/papers/tpds21-taskflow.pdf + diff --git a/bundled/taskflow-3.6.0/include/algorithm/critical.hpp b/bundled/taskflow-3.6.0/include/algorithm/critical.hpp new file mode 100644 index 0000000000..c781d28271 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/algorithm/critical.hpp @@ -0,0 +1,78 @@ +#pragma once + +#include "../core/task.hpp" + +/** +@file critical.hpp +@brief critical include file +*/ + +namespace tf { + +// ---------------------------------------------------------------------------- +// CriticalSection +// ---------------------------------------------------------------------------- + +/** +@class CriticalSection + +@brief class to create a critical region of limited workers to run tasks + +tf::CriticalSection is a warpper over tf::Semaphore and is specialized for +limiting the maximum concurrency over a set of tasks. +A critical section starts with an initial count representing that limit. +When a task is added to the critical section, +the task acquires and releases the semaphore internal to the critical section. +This design avoids explicit call of tf::Task::acquire and tf::Task::release. +The following example creates a critical section of one worker and adds +the five tasks to the critical section. + +@code{.cpp} +tf::Executor executor(8); // create an executor of 8 workers +tf::Taskflow taskflow; + +// create a critical section of 1 worker +tf::CriticalSection critical_section(1); + +tf::Task A = taskflow.emplace([](){ std::cout << "A" << std::endl; }); +tf::Task B = taskflow.emplace([](){ std::cout << "B" << std::endl; }); +tf::Task C = taskflow.emplace([](){ std::cout << "C" << std::endl; }); +tf::Task D = taskflow.emplace([](){ std::cout << "D" << std::endl; }); +tf::Task E = taskflow.emplace([](){ std::cout << "E" << std::endl; }); + +critical_section.add(A, B, C, D, E); + +executor.run(taskflow).wait(); +@endcode + +*/ +class CriticalSection : public Semaphore { + + public: + + /** + @brief constructs a critical region of a limited number of workers + */ + explicit CriticalSection(size_t max_workers = 1); + + /** + @brief adds a task into the critical region + */ + template + void add(Tasks...tasks); +}; + +inline CriticalSection::CriticalSection(size_t max_workers) : + Semaphore {max_workers} { +} + +template +void CriticalSection::add(Tasks... tasks) { + (tasks.acquire(*this), ...); + (tasks.release(*this), ...); +} + + +} // end of namespace tf. --------------------------------------------------- + + diff --git a/bundled/taskflow-3.6.0/include/algorithm/data_pipeline.hpp b/bundled/taskflow-3.6.0/include/algorithm/data_pipeline.hpp new file mode 100644 index 0000000000..03935480b8 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/algorithm/data_pipeline.hpp @@ -0,0 +1,637 @@ +#pragma once + +#include "pipeline.hpp" + + +namespace tf { + +// ---------------------------------------------------------------------------- +// Class Definition: DataPipe +// ---------------------------------------------------------------------------- + +/** +@class DataPipe + +@brief class to create a stage in a data-parallel pipeline + +A data pipe represents a stage of a data-parallel pipeline. +A data pipe can be either @em parallel direction or @em serial direction +(specified by tf::PipeType) and is associated with a callable to invoke +by the pipeline scheduler. + +You need to use the template function, tf::make_data_pipe, to create +a data pipe. The input and output types of a tf::DataPipe should be decayed types +(though the library will always decay them for you using `std::decay`) +to allow internal storage to work. +The data will be passed by reference to your callable, at which you can take +it by copy or reference. + +@code{.cpp} +tf::make_data_pipe( + tf::PipeType::SERIAL, + [](int& input) {return std::to_string(input + 100);} +); +@endcode + +In addition to the data, you callable can take an additional reference +of tf::Pipeflow in the second argument to probe the runtime information +for a stage task, such as its line number and token number: + +@code{.cpp} +tf::make_data_pipe( + tf::PipeType::SERIAL, + [](int& input, tf::Pipeflow& pf) { + printf("token=%lu, line=%lu\n", pf.token(), pf.line()); + return std::to_string(input + 100); + } +); +@endcode + +*/ +template +class DataPipe { + + template + friend class DataPipeline; + + public: + + /** + @brief callable type of the data pipe + */ + using callable_t = C; + + /** + @brief input type of the data pipe + */ + using input_t = Input; + + /** + @brief output type of the data pipe + */ + using output_t = Output; + + /** + @brief default constructor + */ + DataPipe() = default; + + /** + @brief constructs a data pipe + + You should use the helper function, tf::make_data_pipe, + to create a DataPipe object, especially when you need tf::DataPipe + to automatically deduct the lambda type. + */ + DataPipe(PipeType d, callable_t&& callable) : + _type{d}, _callable{std::forward(callable)} { + } + + /** + @brief queries the type of the data pipe + + A data pipe can be either parallel (tf::PipeType::PARALLEL) or serial + (tf::PipeType::SERIAL). + */ + PipeType type() const { + return _type; + } + + /** + @brief assigns a new type to the data pipe + */ + void type(PipeType type) { + _type = type; + } + + /** + @brief assigns a new callable to the data pipe + + @tparam U callable type + @param callable a callable object constructible from the callable type + of this data pipe + + Assigns a new callable to the pipe using universal forwarding. + */ + template + void callable(U&& callable) { + _callable = std::forward(callable); + } + + private: + + PipeType _type; + + callable_t _callable; +}; + +/** +@brief function to construct a data pipe (tf::DataPipe) + +@tparam Input input data type +@tparam Output output data type +@tparam C callable type + +tf::make_data_pipe is a helper function to create a data pipe (tf::DataPipe) +in a data-parallel pipeline (tf::DataPipeline). +The first argument specifies the direction of the data pipe, +either tf::PipeType::SERIAL or tf::PipeType::PARALLEL, +and the second argument is a callable to invoke by the pipeline scheduler. +Input and output data types are specified via template parameters, +which will always be decayed by the library to its original form +for storage purpose. +The callable must take the input data type in its first argument +and returns a value of the output data type. + +@code{.cpp} +tf::make_data_pipe( + tf::PipeType::SERIAL, + [](int& input) { + return std::to_string(input + 100); + } +); +@endcode + +The callable can additionally take a reference of tf::Pipeflow, +which allows you to query the runtime information of a stage task, +such as its line number and token number. + +@code{.cpp} +tf::make_data_pipe( + tf::PipeType::SERIAL, + [](int& input, tf::Pipeflow& pf) { + printf("token=%lu, line=%lu\n", pf.token(), pf.line()); + return std::to_string(input + 100); + } +); +@endcode + +*/ +template +auto make_data_pipe(PipeType d, C&& callable) { + return DataPipe(d, std::forward(callable)); +} + +// ---------------------------------------------------------------------------- +// Class Definition: DataPipeline +// ---------------------------------------------------------------------------- + +/** +@class DataPipeline + +@brief class to create a data-parallel pipeline scheduling framework + +@tparam Ps data pipe types + +Similar to tf::Pipeline, a tf::DataPipeline is a composable graph object +for users to create a data-parallel pipeline scheduling framework +using a module task in a taskflow. +The only difference is that tf::DataPipeline provides a data abstraction +for users to quickly express dataflow in a pipeline. +The following example creates a data-parallel pipeline of three stages +that generate dataflow from `void` to `int`, `std::string`, `float`, and `void`. + +@code{.cpp} +#include +#include + +int main() { + + // data flow => void -> int -> std::string -> float -> void + tf::Taskflow taskflow("pipeline"); + tf::Executor executor; + + const size_t num_lines = 4; + + tf::DataPipeline pl(num_lines, + tf::make_data_pipe(tf::PipeType::SERIAL, [&](tf::Pipeflow& pf) -> int{ + if(pf.token() == 5) { + pf.stop(); + return 0; + } + else { + return pf.token(); + } + }), + tf::make_data_pipe(tf::PipeType::SERIAL, [](int& input) { + return std::to_string(input + 100); + }), + tf::make_data_pipe(tf::PipeType::SERIAL, [](std::string& input) { + std::cout << input << std::endl; + }) + ); + + // build the pipeline graph using composition + taskflow.composed_of(pl).name("pipeline"); + + // dump the pipeline graph structure (with composition) + taskflow.dump(std::cout); + + // run the pipeline + executor.run(taskflow).wait(); + + return 0; +} +@endcode + +The pipeline schedules five tokens over four parallel lines in a circular fashion, +as depicted below: + +@code{.shell-session} +o -> o -> o +| | | +v v v +o -> o -> o +| | | +v v v +o -> o -> o +| | | +v v v +o -> o -> o +@endcode +*/ +template +class DataPipeline { + + static_assert(sizeof...(Ps)>0, "must have at least one pipe"); + + /** + @private + */ + struct Line { + std::atomic join_counter; + }; + + /** + @private + */ + struct PipeMeta { + PipeType type; + }; + + + public: + + /** + @brief internal storage type for each data token (default std::variant) + */ + using data_t = unique_variant_t, + std::monostate, + std::decay_t>... + >>; + + /** + @brief constructs a data-parallel pipeline object + + @param num_lines the number of parallel lines + @param ps a list of pipes + + Constructs a data-parallel pipeline of up to @c num_lines parallel lines to schedule + tokens through the given linear chain of pipes. + The first pipe must define a serial direction (tf::PipeType::SERIAL) + or an exception will be thrown. + */ + DataPipeline(size_t num_lines, Ps&&... ps); + + /** + @brief constructs a data-parallel pipeline object + + @param num_lines the number of parallel lines + @param ps a tuple of pipes + + Constructs a data-parallel pipeline of up to @c num_lines parallel lines to schedule + tokens through the given linear chain of pipes stored in a std::tuple. + The first pipe must define a serial direction (tf::PipeType::SERIAL) + or an exception will be thrown. + */ + DataPipeline(size_t num_lines, std::tuple&& ps); + + /** + @brief queries the number of parallel lines + + The function returns the number of parallel lines given by the user + upon the construction of the pipeline. + The number of lines represents the maximum parallelism this pipeline + can achieve. + */ + size_t num_lines() const noexcept; + + /** + @brief queries the number of pipes + + The Function returns the number of pipes given by the user + upon the construction of the pipeline. + */ + constexpr size_t num_pipes() const noexcept; + + /** + @brief resets the pipeline + + Resetting the pipeline to the initial state. After resetting a pipeline, + its token identifier will start from zero as if the pipeline was just + constructed. + */ + void reset(); + + /** + @brief queries the number of generated tokens in the pipeline + + The number represents the total scheduling tokens that has been + generated by the pipeline so far. + */ + size_t num_tokens() const noexcept; + + /** + @brief obtains the graph object associated with the pipeline construct + + This method is primarily used as an opaque data structure for creating + a module task of this pipeline. + */ + Graph& graph(); + + private: + + Graph _graph; + + size_t _num_tokens; + + std::tuple _pipes; + std::array _meta; + std::vector> _lines; + std::vector _tasks; + std::vector _pipeflows; + std::vector> _buffer; + + template + auto _gen_meta(std::tuple&&, std::index_sequence); + + void _on_pipe(Pipeflow&, Runtime&); + void _build(); +}; + +// constructor +template +DataPipeline::DataPipeline(size_t num_lines, Ps&&... ps) : + _pipes {std::make_tuple(std::forward(ps)...)}, + _meta {PipeMeta{ps.type()}...}, + _lines (num_lines), + _tasks (num_lines + 1), + _pipeflows (num_lines), + _buffer (num_lines) { + + if(num_lines == 0) { + TF_THROW("must have at least one line"); + } + + if(std::get<0>(_pipes).type() != PipeType::SERIAL) { + TF_THROW("first pipe must be serial"); + } + + reset(); + _build(); +} + +// constructor +template +DataPipeline::DataPipeline(size_t num_lines, std::tuple&& ps) : + _pipes {std::forward>(ps)}, + _meta {_gen_meta( + std::forward>(ps), std::make_index_sequence{} + )}, + _lines (num_lines), + _tasks (num_lines + 1), + _pipeflows (num_lines), + _buffer (num_lines) { + + if(num_lines == 0) { + TF_THROW("must have at least one line"); + } + + if(std::get<0>(_pipes).type() != PipeType::SERIAL) { + TF_THROW("first pipe must be serial"); + } + + reset(); + _build(); +} + +// Function: _get_meta +template +template +auto DataPipeline::_gen_meta(std::tuple&& ps, std::index_sequence) { + return std::array{PipeMeta{std::get(ps).type()}...}; +} + +// Function: num_lines +template +size_t DataPipeline::num_lines() const noexcept { + return _pipeflows.size(); +} + +// Function: num_pipes +template +constexpr size_t DataPipeline::num_pipes() const noexcept { + return sizeof...(Ps); +} + +// Function: num_tokens +template +size_t DataPipeline::num_tokens() const noexcept { + return _num_tokens; +} + +// Function: graph +template +Graph& DataPipeline::graph() { + return _graph; +} + +// Function: reset +template +void DataPipeline::reset() { + + _num_tokens = 0; + + for(size_t l = 0; l(_meta[f].type), std::memory_order_relaxed + ); + } + } + + for(size_t f=1; f(_meta[0].type) - 1, std::memory_order_relaxed + ); + } +} + +// Procedure: _on_pipe +template +void DataPipeline::_on_pipe(Pipeflow& pf, Runtime&) { + + visit_tuple([&](auto&& pipe){ + + using data_pipe_t = std::decay_t; + using callable_t = typename data_pipe_t::callable_t; + using input_t = std::decay_t; + using output_t = std::decay_t; + + // first pipe + if constexpr (std::is_invocable_v) { + // [](tf::Pipeflow&) -> void {}, i.e., we only have one pipe + if constexpr (std::is_void_v) { + pipe._callable(pf); + // [](tf::Pipeflow&) -> output_t {} + } else { + _buffer[pf._line].data = pipe._callable(pf); + } + } + // other pipes without pipeflow in the second argument + else if constexpr (std::is_invocable_v >) { + // [](input_t&) -> void {}, i.e., the last pipe + if constexpr (std::is_void_v) { + pipe._callable(std::get(_buffer[pf._line].data)); + // [](input_t&) -> output_t {} + } else { + _buffer[pf._line].data = pipe._callable( + std::get(_buffer[pf._line].data) + ); + } + } + // other pipes with pipeflow in the second argument + else if constexpr (std::is_invocable_v) { + // [](input_t&, tf::Pipeflow&) -> void {} + if constexpr (std::is_void_v) { + pipe._callable(std::get(_buffer[pf._line].data), pf); + // [](input_t&, tf::Pipeflow&) -> output_t {} + } else { + _buffer[pf._line].data = pipe._callable( + std::get(_buffer[pf._line].data), pf + ); + } + } + //else if constexpr(std::is_invocable_v) { + // pipe._callable(pf, rt); + //} + else { + static_assert(dependent_false_v, "un-supported pipe callable type"); + } + }, _pipes, pf._pipe); +} + +// Procedure: _build +template +void DataPipeline::_build() { + + using namespace std::literals::string_literals; + + FlowBuilder fb(_graph); + + // init task + _tasks[0] = fb.emplace([this]() { + return static_cast(_num_tokens % num_lines()); + }).name("cond"); + + // line task + for(size_t l = 0; l < num_lines(); l++) { + + _tasks[l + 1] = fb.emplace([this, l] (tf::Runtime& rt) mutable { + + auto pf = &_pipeflows[l]; + + pipeline: + + _lines[pf->_line][pf->_pipe].join_counter.store( + static_cast(_meta[pf->_pipe].type), std::memory_order_relaxed + ); + + if (pf->_pipe == 0) { + pf->_token = _num_tokens; + if (pf->_stop = false, _on_pipe(*pf, rt); pf->_stop == true) { + // here, the pipeline is not stopped yet because other + // lines of tasks may still be running their last stages + return; + } + ++_num_tokens; + } + else { + _on_pipe(*pf, rt); + } + + size_t c_f = pf->_pipe; + size_t n_f = (pf->_pipe + 1) % num_pipes(); + size_t n_l = (pf->_line + 1) % num_lines(); + + pf->_pipe = n_f; + + // ---- scheduling starts here ---- + // Notice that the shared variable f must not be changed after this + // point because it can result in data race due to the following + // condition: + // + // a -> b + // | | + // v v + // c -> d + // + // d will be spawned by either c or b, so if c changes f but b spawns d + // then data race on f will happen + + std::array retval; + size_t n = 0; + + // downward dependency + if(_meta[c_f].type == PipeType::SERIAL && + _lines[n_l][c_f].join_counter.fetch_sub( + 1, std::memory_order_acq_rel) == 1 + ) { + retval[n++] = 1; + } + + // forward dependency + if(_lines[pf->_line][n_f].join_counter.fetch_sub( + 1, std::memory_order_acq_rel) == 1 + ) { + retval[n++] = 0; + } + + // notice that the task index starts from 1 + switch(n) { + case 2: { + rt.schedule(_tasks[n_l+1]); + goto pipeline; + } + case 1: { + if (retval[0] == 1) { + pf = &_pipeflows[n_l]; + } + goto pipeline; + } + } + }).name("rt-"s + std::to_string(l)); + + _tasks[0].precede(_tasks[l+1]); + } +} + + +} // end of namespace tf ----------------------------------------------------- + + + + + diff --git a/bundled/taskflow-3.6.0/include/algorithm/find.hpp b/bundled/taskflow-3.6.0/include/algorithm/find.hpp new file mode 100644 index 0000000000..ab0d8011de --- /dev/null +++ b/bundled/taskflow-3.6.0/include/algorithm/find.hpp @@ -0,0 +1,547 @@ +#pragma once + +#include "launch.hpp" + +namespace tf { + +namespace detail { + +// Function: find_if_loop +template +TF_FORCE_INLINE bool find_if_loop( + std::atomic& offset, + Iterator& beg, + size_t& prev_e, + size_t curr_b, + size_t curr_e, + Predicate&& predicate +) { + // early prune + if(offset.load(std::memory_order_relaxed) < curr_b) { + return true; + } + std::advance(beg, curr_b - prev_e); + for(size_t x = curr_b; x +TF_FORCE_INLINE bool find_if_not_loop( + std::atomic& offset, + Iterator& beg, + size_t& prev_e, + size_t curr_b, + size_t curr_e, + Predicate&& predicate +) { + + // early prune + if(offset.load(std::memory_order_relaxed) < curr_b) { + return true; + } + std::advance(beg, curr_b - prev_e); + for(size_t x = curr_b; x +TF_FORCE_INLINE auto make_find_if_task( + B first, E last, T& result, UOP predicate, P&& part +) { + + using B_t = std::decay_t>; + using E_t = std::decay_t>; + using namespace std::string_literals; + + return + [b=first, e=last, predicate, &result, part=std::forward

(part)] + (Runtime& rt) mutable { + + // fetch the stateful values + B_t beg = b; + E_t end = e; + + size_t W = rt.executor().num_workers(); + size_t N = std::distance(beg, end); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= part.chunk_size()) { + result = std::find_if(beg, end, predicate); + return; + } + + if(N < W) { + W = N; + } + + std::atomic offset(N); + + // static partitioner + if constexpr(std::is_same_v, StaticPartitioner>) { + + size_t chunk_size; + + for(size_t w=0, curr_b=0; w next(0); + launch_loop(N, W, rt, next, part, + [N, W, beg, &predicate, &offset, &next, &part] () mutable { + part.loop_until(N, W, next, + [&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable { + return detail::find_if_loop( + offset, beg, prev_e, curr_b, curr_e, predicate + ); + } + ); + } + ); + } + + // update the result iterator by the offset + result = std::next(beg, offset.load(std::memory_order_relaxed)); + }; +} + +// Function: make_find_if_not_task +template +TF_FORCE_INLINE auto make_find_if_not_task( + B first, E last, T& result, UOP predicate, P&& part +) { + + using B_t = std::decay_t>; + using E_t = std::decay_t>; + using namespace std::string_literals; + + return + [b=first, e=last, predicate, &result, part=std::forward

(part)] + (Runtime& rt) mutable { + + // fetch the stateful values + B_t beg = b; + E_t end = e; + + size_t W = rt.executor().num_workers(); + size_t N = std::distance(beg, end); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= part.chunk_size()) { + result = std::find_if_not(beg, end, predicate); + return; + } + + if(N < W) { + W = N; + } + + std::atomic offset(N); + + // static partitioner + if constexpr(std::is_same_v, StaticPartitioner>) { + + size_t chunk_size; + + for(size_t w=0, curr_b=0; w next(0); + launch_loop(N, W, rt, next, part, + [N, W, beg, &predicate, &offset, &next, &part] () mutable { + part.loop_until(N, W, next, + [&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable { + return detail::find_if_not_loop( + offset, beg, prev_e, curr_b, curr_e, predicate + ); + } + ); + } + ); + } + + // update the result iterator by the offset + result = std::next(beg, offset.load(std::memory_order_relaxed)); + }; +} + +// Function: make_min_element_task +template +TF_FORCE_INLINE auto make_min_element_task( + B first, E last, T& result, C comp, P&& part +) { + + using B_t = std::decay_t>; + using E_t = std::decay_t>; + using namespace std::string_literals; + + return + [b=first, e=last, &result, comp, part=std::forward

(part)] + (Runtime& rt) mutable { + + // fetch the iterator values + B_t beg = b; + E_t end = e; + + size_t W = rt.executor().num_workers(); + size_t N = std::distance(beg, end); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= part.chunk_size()) { + result = std::min_element(beg, end, comp); + return; + } + + if(N < W) { + W = N; + } + + std::mutex mutex; + + // initialize the result to the first element + result = beg++; + N--; + + // static partitioner + if constexpr(std::is_same_v, StaticPartitioner>) { + + size_t chunk_size; + + for(size_t w=0, curr_b=0; w lock(mutex); + if(comp(*beg, *result)) { + result = beg; + } + return; + } + + auto beg1 = beg++; + auto beg2 = beg++; + T smallest = comp(*beg1, *beg2) ? beg1 : beg2; + + // loop reduce + part.loop(N, W, curr_b, chunk_size, + [&, prev_e=curr_b+2](size_t curr_b, size_t curr_e) mutable { + + if(curr_b > prev_e) { + std::advance(beg, curr_b - prev_e); + } + else { + curr_b = prev_e; + } + + for(size_t x=curr_b; x lock(mutex); + if(comp(*smallest, *result)) { + result = smallest; + } + }); + } + rt.join(); + } + // dynamic partitioner + else { + std::atomic next(0); + launch_loop(N, W, rt, next, part, + [beg, N, W, &next, &comp, &mutex, &result, &part] () mutable { + // pre-reduce + size_t s0 = next.fetch_add(2, std::memory_order_relaxed); + + if(s0 >= N) { + return; + } + + std::advance(beg, s0); + + if(N - s0 == 1) { + std::lock_guard lock(mutex); + if(comp(*beg, *result)) { + result = beg; + } + return; + } + + auto beg1 = beg++; + auto beg2 = beg++; + + T smallest = comp(*beg1, *beg2) ? beg1 : beg2; + + // loop reduce + part.loop(N, W, next, + [&, prev_e=s0+2](size_t curr_b, size_t curr_e) mutable { + std::advance(beg, curr_b - prev_e); + for(size_t x=curr_b; x lock(mutex); + if(comp(*smallest, *result)) { + result = smallest; + } + } + ); + } + }; +} + +// Function: make_max_element_task +template +TF_FORCE_INLINE auto make_max_element_task(B first, E last, T& result, C comp, P&& part) { + + using B_t = std::decay_t>; + using E_t = std::decay_t>; + using namespace std::string_literals; + + return + [b=first, e=last, &result, comp, part=std::forward

(part)] + (Runtime& rt) mutable { + + // fetch the iterator values + B_t beg = b; + E_t end = e; + + size_t W = rt.executor().num_workers(); + size_t N = std::distance(beg, end); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= part.chunk_size()) { + result = std::max_element(beg, end, comp); + return; + } + + if(N < W) { + W = N; + } + + std::mutex mutex; + + // initialize the result to the first element + result = beg++; + N--; + + // static partitioner + if constexpr(std::is_same_v, StaticPartitioner>) { + + size_t chunk_size; + + for(size_t w=0, curr_b=0; w lock(mutex); + if(comp(*result, *beg)) { + result = beg; + } + return; + } + + auto beg1 = beg++; + auto beg2 = beg++; + T largest = comp(*beg1, *beg2) ? beg2 : beg1; + + // loop reduce + part.loop(N, W, curr_b, chunk_size, + [&, prev_e=curr_b+2](size_t curr_b, size_t curr_e) mutable { + + if(curr_b > prev_e) { + std::advance(beg, curr_b - prev_e); + } + else { + curr_b = prev_e; + } + + for(size_t x=curr_b; x lock(mutex); + if(comp(*result, *largest)) { + result = largest; + } + }); + } + rt.join(); + } + // dynamic partitioner + else { + std::atomic next(0); + launch_loop(N, W, rt, next, part, + [beg, N, W, &next, &comp, &mutex, &result, &part] () mutable { + // pre-reduce + size_t s0 = next.fetch_add(2, std::memory_order_relaxed); + + if(s0 >= N) { + return; + } + + std::advance(beg, s0); + + if(N - s0 == 1) { + std::lock_guard lock(mutex); + if(comp(*result, *beg)) { + result = beg; + } + return; + } + + auto beg1 = beg++; + auto beg2 = beg++; + + T largest = comp(*beg1, *beg2) ? beg2 : beg1; + + // loop reduce + part.loop(N, W, next, + [&, prev_e=s0+2](size_t curr_b, size_t curr_e) mutable { + std::advance(beg, curr_b - prev_e); + for(size_t x=curr_b; x lock(mutex); + if(comp(*result, *largest)) { + result = largest; + } + } + ); + } + }; +} + +} // namespace detail -------------------------------------------------------- + +// Function: find_if +template +Task tf::FlowBuilder::find_if(B first, E last, T& result, UOP predicate, P&& part) { + return emplace(detail::make_find_if_task( + first, last, result, predicate, std::forward

(part) + )); +} + +// Function: find_if_not +template +Task tf::FlowBuilder::find_if_not(B first, E last, T& result, UOP predicate, P&& part) { + return emplace(detail::make_find_if_not_task( + first, last, result, predicate, std::forward

(part) + )); +} + +// ---------------------------------------------------------------------------- +// min_element +// ---------------------------------------------------------------------------- + +// Function: min_element +template +Task FlowBuilder::min_element(B first, E last, T& result, C comp, P&& part) { + return emplace(detail::make_min_element_task( + first, last, result, comp, std::forward

(part) + )); +} + +// ---------------------------------------------------------------------------- +// max_element +// ---------------------------------------------------------------------------- + +// Function: max_element +template +Task FlowBuilder::max_element(B first, E last, T& result, C comp, P&& part) { + return emplace(detail::make_max_element_task( + first, last, result, comp, std::forward

(part) + )); +} + +} // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-3.6.0/include/algorithm/for_each.hpp b/bundled/taskflow-3.6.0/include/algorithm/for_each.hpp new file mode 100644 index 0000000000..d15958abd7 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/algorithm/for_each.hpp @@ -0,0 +1,173 @@ +#pragma once + +#include "launch.hpp" + +namespace tf { + +namespace detail { + +// Function: make_for_each_task +template +TF_FORCE_INLINE auto make_for_each_task(B beg, E end, C c, P&& part) { + + using B_t = std::decay_t>; + using E_t = std::decay_t>; + using namespace std::string_literals; + + return [b=beg, e=end, c, part=std::forward

(part)] (Runtime& rt) mutable { + + // fetch the stateful values + B_t beg = b; + E_t end = e; + + size_t W = rt.executor().num_workers(); + size_t N = std::distance(beg, end); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= part.chunk_size()) { + std::for_each(beg, end, c); + return; + } + + if(N < W) { + W = N; + } + + // static partitioner + if constexpr(std::is_same_v, StaticPartitioner>) { + size_t chunk_size; + for(size_t w=0, curr_b=0; w next(0); + launch_loop(N, W, rt, next, part, [=, &c, &next, &part] () mutable { + part.loop(N, W, next, + [&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable { + std::advance(beg, curr_b - prev_e); + for(size_t x = curr_b; x +TF_FORCE_INLINE auto make_for_each_index_task(B beg, E end, S inc, C c, P&& part){ + + using namespace std::string_literals; + + using B_t = std::decay_t>; + using E_t = std::decay_t>; + using S_t = std::decay_t>; + + return [b=beg, e=end, a=inc, c, part=std::forward

(part)] + (Runtime& rt) mutable { + + // fetch the iterator values + B_t beg = b; + E_t end = e; + S_t inc = a; + + size_t W = rt.executor().num_workers(); + size_t N = distance(beg, end, inc); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= part.chunk_size()) { + for(size_t x=0; x, StaticPartitioner>) { + + size_t chunk_size; + + for(size_t w=0, curr_b=0; w(curr_b) * inc + beg; + for(size_t x=curr_b; x next(0); + launch_loop(N, W, rt, next, part, [=, &c, &next, &part] () mutable { + part.loop(N, W, next, + [&](size_t curr_b, size_t curr_e) { + auto idx = static_cast(curr_b) * inc + beg; + for(size_t x=curr_b; x +Task FlowBuilder::for_each(B beg, E end, C c, P&& part) { + return emplace( + detail::make_for_each_task(beg, end, c, std::forward

(part)) + ); +} + +// ---------------------------------------------------------------------------- +// for_each_index +// ---------------------------------------------------------------------------- + +// Function: for_each_index +template +Task FlowBuilder::for_each_index(B beg, E end, S inc, C c, P&& part){ + return emplace( + detail::make_for_each_index_task(beg, end, inc, c, std::forward

(part)) + ); +} + + +} // end of namespace tf ----------------------------------------------------- + diff --git a/bundled/taskflow-3.6.0/include/algorithm/launch.hpp b/bundled/taskflow-3.6.0/include/algorithm/launch.hpp new file mode 100644 index 0000000000..363223e60e --- /dev/null +++ b/bundled/taskflow-3.6.0/include/algorithm/launch.hpp @@ -0,0 +1,58 @@ +#pragma once + +#include "../core/async.hpp" + +namespace tf { + +// Function: launch_loop +template +TF_FORCE_INLINE void launch_loop( + size_t N, + size_t W, + Runtime& rt, + std::atomic& next, + P&& part, + Loop&& loop +) { + + //static_assert(std::is_lvalue_reference_v, ""); + + using namespace std::string_literals; + + for(size_t w=0; w +TF_FORCE_INLINE void launch_loop( + size_t W, + size_t w, + Runtime& rt, + Loop&& loop +) { + using namespace std::string_literals; + if(w == W-1) { + loop(); + } + else { + rt.silent_async_unchecked("loop-"s + std::to_string(w), loop); + } +} + +} // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-3.6.0/include/algorithm/partitioner.hpp b/bundled/taskflow-3.6.0/include/algorithm/partitioner.hpp new file mode 100644 index 0000000000..4a253fafe6 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/algorithm/partitioner.hpp @@ -0,0 +1,543 @@ +// reference: +// - gomp: https://github.com/gcc-mirror/gcc/blob/master/libgomp/iter.c +// - komp: https://github.com/llvm-mirror/openmp/blob/master/runtime/src/kmp_dispatch.cpp + +#pragma once + +/** +@file partitioner.hpp +@brief partitioner include file +*/ + +namespace tf { + +// ---------------------------------------------------------------------------- +// Partitioner Base +// ---------------------------------------------------------------------------- + +/** +@class PartitionerBase + +@brief class to derive a partitioner for scheduling parallel algorithms + +The class provides base methods to derive a partitioner that can be used +to schedule parallel iterations (e.g., tf::Taskflow::for_each). + +An partitioner defines the scheduling method for running parallel algorithms, +such tf::Taskflow::for_each, tf::Taskflow::reduce, and so on. +By default, we provide the following partitioners: + ++ tf::GuidedPartitioner to enable guided scheduling algorithm of adaptive chunk size ++ tf::DynamicPartitioner to enable dynamic scheduling algorithm of equal chunk size ++ tf::StaticPartitioner to enable static scheduling algorithm of static chunk size ++ tf::RandomPartitioner to enable random scheduling algorithm of random chunk size + +Depending on applications, partitioning algorithms can impact the performance +a lot. +For example, if a parallel-iteration workload contains a regular work unit per +iteration, tf::StaticPartitioner can deliver the best performance. +On the other hand, if the work unit per iteration is irregular and unbalanced, +tf::GuidedPartitioner or tf::DynamicPartitioner can outperform tf::StaticPartitioner. +In most situations, tf::GuidedPartitioner can deliver decent performance and +is thus used as our default partitioner. +*/ +class PartitionerBase { + + public: + + /** + @brief default constructor + */ + PartitionerBase() = default; + + /** + @brief construct a partitioner with the given chunk size + */ + explicit PartitionerBase(size_t chunk_size) : _chunk_size {chunk_size} {} + + /** + @brief query the chunk size of this partitioner + */ + size_t chunk_size() const { return _chunk_size; } + + /** + @brief update the chunk size of this partitioner + */ + void chunk_size(size_t cz) { _chunk_size = cz; } + + protected: + + /** + @brief chunk size + */ + size_t _chunk_size{0}; +}; + +// ---------------------------------------------------------------------------- +// Guided Partitioner +// ---------------------------------------------------------------------------- + +/** +@class GuidedPartitioner + +@brief class to construct a guided partitioner for scheduling parallel algorithms + +The size of a partition is proportional to the number of unassigned iterations +divided by the number of workers, +and the size will gradually decrease to the given chunk size. +The last partition may be smaller than the chunk size. +*/ +class GuidedPartitioner : public PartitionerBase { + + public: + + /** + @brief default constructor + */ + GuidedPartitioner() : PartitionerBase{1} {} + + /** + @brief construct a guided partitioner with the given chunk size + */ + explicit GuidedPartitioner(size_t sz) : PartitionerBase (sz) {} + + // -------------------------------------------------------------------------- + // scheduling methods + // -------------------------------------------------------------------------- + + /** + @private + */ + template , void>* = nullptr + > + void loop( + size_t N, + size_t W, + std::atomic& next, + F&& func + ) const { + + size_t chunk_size = (_chunk_size == 0) ? size_t{1} : _chunk_size; + + size_t p1 = 2 * W * (chunk_size + 1); + float p2 = 0.5f / static_cast(W); + size_t curr_b = next.load(std::memory_order_relaxed); + + while(curr_b < N) { + + size_t r = N - curr_b; + + // fine-grained + if(r < p1) { + while(1) { + curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); + if(curr_b >= N) { + return; + } + func(curr_b, std::min(curr_b + chunk_size, N)); + } + break; + } + // coarse-grained + else { + size_t q = static_cast(p2 * r); + if(q < chunk_size) { + q = chunk_size; + } + //size_t curr_e = (q <= r) ? curr_b + q : N; + size_t curr_e = std::min(curr_b + q, N); + if(next.compare_exchange_strong(curr_b, curr_e, std::memory_order_relaxed, + std::memory_order_relaxed)) { + func(curr_b, curr_e); + curr_b = next.load(std::memory_order_relaxed); + } + } + } + } + + /** + @private + */ + template , void>* = nullptr + > + void loop_until( + size_t N, + size_t W, + std::atomic& next, + F&& func + ) const { + + size_t chunk_size = (_chunk_size == 0) ? size_t{1} : _chunk_size; + + size_t p1 = 2 * W * (chunk_size + 1); + float p2 = 0.5f / static_cast(W); + size_t curr_b = next.load(std::memory_order_relaxed); + + while(curr_b < N) { + + size_t r = N - curr_b; + + // fine-grained + if(r < p1) { + while(1) { + curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); + if(curr_b >= N) { + return; + } + if(func(curr_b, std::min(curr_b + chunk_size, N))) { + return; + } + } + break; + } + // coarse-grained + else { + size_t q = static_cast(p2 * r); + if(q < chunk_size) { + q = chunk_size; + } + //size_t curr_e = (q <= r) ? curr_b + q : N; + size_t curr_e = std::min(curr_b + q, N); + if(next.compare_exchange_strong(curr_b, curr_e, std::memory_order_relaxed, + std::memory_order_relaxed)) { + if(func(curr_b, curr_e)) { + return; + } + curr_b = next.load(std::memory_order_relaxed); + } + } + } + } +}; + +// ---------------------------------------------------------------------------- +// Dynamic Partitioner +// ---------------------------------------------------------------------------- + +/** +@class DynamicPartitioner + +@brief class to construct a dynamic partitioner for scheduling parallel algorithms + +The partitioner splits iterations into many partitions each of size equal to +the given chunk size. +Different partitions are distributed dynamically to workers +without any specific order. +*/ +class DynamicPartitioner : public PartitionerBase { + + public: + + /** + @brief default constructor + */ + DynamicPartitioner() : PartitionerBase{1} {}; + + /** + @brief construct a dynamic partitioner with the given chunk size + */ + explicit DynamicPartitioner(size_t sz) : PartitionerBase (sz) {} + + // -------------------------------------------------------------------------- + // scheduling methods + // -------------------------------------------------------------------------- + + /** + @private + */ + template , void>* = nullptr + > + void loop( + size_t N, + size_t, + std::atomic& next, + F&& func + ) const { + + size_t chunk_size = (_chunk_size == 0) ? size_t{1} : _chunk_size; + size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); + + while(curr_b < N) { + func(curr_b, std::min(curr_b + chunk_size, N)); + curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); + } + } + + /** + @private + */ + template , void>* = nullptr + > + void loop_until( + size_t N, + size_t, + std::atomic& next, + F&& func + ) const { + + size_t chunk_size = (_chunk_size == 0) ? size_t{1} : _chunk_size; + size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); + + while(curr_b < N) { + if(func(curr_b, std::min(curr_b + chunk_size, N))) { + return; + } + curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); + } + } +}; + +// ---------------------------------------------------------------------------- +// Static Partitioner +// ---------------------------------------------------------------------------- + +/** +@class StaticPartitioner + +@brief class to construct a dynamic partitioner for scheduling parallel algorithms + +The partitioner divides iterations into chunks and distributes chunks +to workers in order. +If the chunk size is not specified (default @c 0), the partitioner resorts to a chunk size +that equally distributes iterations into workers. + +@code{.cpp} +std::vector data = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10} +taskflow.for_each( + data.begin(), data.end(), [](int i){}, StaticPartitioner(0) +); +executor.run(taskflow).run(); +@endcode +*/ +class StaticPartitioner : public PartitionerBase { + + public: + + /** + @brief default constructor + */ + StaticPartitioner() : PartitionerBase{0} {}; + + /** + @brief construct a dynamic partitioner with the given chunk size + */ + explicit StaticPartitioner(size_t sz) : PartitionerBase(sz) {} + + /** + @brief queries the adjusted chunk size + + Returns the given chunk size if it is not zero, or returns + N/W + (w < N%W), where @c N is the number of iterations, + @c W is the number of workers, and @c w is the worker ID. + */ + size_t adjusted_chunk_size(size_t N, size_t W, size_t w) const { + return _chunk_size ? _chunk_size : N/W + (w < N%W); + } + + // -------------------------------------------------------------------------- + // scheduling methods + // -------------------------------------------------------------------------- + + /** + @private + */ + template , void>* = nullptr + > + void loop( + size_t N, + size_t W, + size_t curr_b, + size_t chunk_size, + F&& func + ) { + size_t stride = W * chunk_size; + while(curr_b < N) { + size_t curr_e = std::min(curr_b + chunk_size, N); + func(curr_b, curr_e); + curr_b += stride; + } + } + + /** + @private + */ + template , void>* = nullptr + > + void loop_until( + size_t N, + size_t W, + size_t curr_b, + size_t chunk_size, + F&& func + ) { + size_t stride = W * chunk_size; + while(curr_b < N) { + size_t curr_e = std::min(curr_b + chunk_size, N); + if(func(curr_b, curr_e)) { + return; + } + curr_b += stride; + } + } +}; + +// ---------------------------------------------------------------------------- +// RandomPartitioner +// ---------------------------------------------------------------------------- + +/** +@class RandomPartitioner + +@brief class to construct a random partitioner for scheduling parallel algorithms + +Similar to tf::DynamicPartitioner, +the partitioner splits iterations into many partitions but each with a random +chunk size in the range, c = [alpha * N * W, beta * N * W]. +By default, @c alpha is 0.01 and @c beta is 0.5, respectively. + +*/ +class RandomPartitioner : public PartitionerBase { + + public: + + /** + @brief default constructor + */ + RandomPartitioner() = default; + + /** + @brief constructs a random partitioner + */ + RandomPartitioner(size_t cz) : PartitionerBase(cz) {} + + /** + @brief constructs a random partitioner with the given parameters + */ + RandomPartitioner(float alpha, float beta) : _alpha {alpha}, _beta {beta} {} + + /** + @brief queries the @c alpha value + */ + float alpha() const { return _alpha; } + + /** + @brief queries the @c beta value + */ + float beta() const { return _beta; } + + /** + @brief queries the range of chunk size + + @param N number of iterations + @param W number of workers + */ + std::pair chunk_size_range(size_t N, size_t W) const { + + size_t b1 = static_cast(_alpha * N * W); + size_t b2 = static_cast(_beta * N * W); + + if(b1 > b2) { + std::swap(b1, b2); + } + + b1 = std::max(b1, size_t{1}); + b2 = std::max(b2, b1 + 1); + + return {b1, b2}; + } + + // -------------------------------------------------------------------------- + // scheduling methods + // -------------------------------------------------------------------------- + + /** + @private + */ + template , void>* = nullptr + > + void loop( + size_t N, + size_t W, + std::atomic& next, + F&& func + ) const { + + auto [b1, b2] = chunk_size_range(N, W); + + std::default_random_engine engine {std::random_device{}()}; + std::uniform_int_distribution dist(b1, b2); + + size_t chunk_size = dist(engine); + size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); + + while(curr_b < N) { + func(curr_b, std::min(curr_b + chunk_size, N)); + chunk_size = dist(engine); + curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); + } + } + + /** + @private + */ + template , void>* = nullptr + > + void loop_until( + size_t N, + size_t W, + std::atomic& next, + F&& func + ) const { + + auto [b1, b2] = chunk_size_range(N, W); + + std::default_random_engine engine {std::random_device{}()}; + std::uniform_int_distribution dist(b1, b2); + + size_t chunk_size = dist(engine); + size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); + + while(curr_b < N) { + if(func(curr_b, std::min(curr_b + chunk_size, N))){ + return; + } + chunk_size = dist(engine); + curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); + } + } + + private: + + float _alpha {0.01f}; + float _beta {0.5f}; + +}; + +/** +@brief default partitioner set to tf::GuidedPartitioner + +Guided partitioner can achieve decent performance for most parallel algorithms, +especially for those with irregular and unbalanced workload per iteration. +*/ +using DefaultPartitioner = GuidedPartitioner; + +/** +@brief determines if a type is a partitioner + +A partitioner is a derived type from tf::PartitionerBase. +*/ +template +inline constexpr bool is_partitioner_v = std::is_base_of::value; + +} // end of namespace tf ----------------------------------------------------- + + + diff --git a/bundled/taskflow-3.6.0/include/algorithm/pipeline.hpp b/bundled/taskflow-3.6.0/include/algorithm/pipeline.hpp new file mode 100644 index 0000000000..5442d56006 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/algorithm/pipeline.hpp @@ -0,0 +1,1663 @@ +#pragma once + +#include "../taskflow.hpp" + +/** +@file pipeline.hpp +@brief pipeline include file +*/ + +namespace tf { + + +// ---------------------------------------------------------------------------- +// Structure Definition: DeferredPipeflow +// ---------------------------------------------------------------------------- +// For example: +// 12.defer(7); 12.defer(16); +// _____ +// | | +// v | +// 7 12 16 +// | ^ +// |____ | +// +// DeferredPipeflow dpf of 12 : +// dpf._token = 12; +// dpf._num_deferrals = 1; +// dpf._dependents = std::list{7,16}; +// dpf._dependent_satellites has following two entries +// {key: 7, value: dpf._dependents.begin()} +// {key: 16, value: dpf._dependents.begin()+1} +// +/** @private */ +class DeferredPipeflow { + + template + friend class Pipeline; + + template + friend class ScalablePipeline; + + public: + + DeferredPipeflow() = default; + DeferredPipeflow(const DeferredPipeflow&) = delete; + DeferredPipeflow(DeferredPipeflow&&) = delete; + + DeferredPipeflow(size_t t, size_t n, std::unordered_set&& dep) : + _token{t}, _num_deferrals{n}, _dependents{std::move(dep)} { + } + + DeferredPipeflow& operator = (const DeferredPipeflow&) = delete; + DeferredPipeflow& operator = (DeferredPipeflow&&) = delete; + + private: + + // token id + size_t _token; + + // number of deferrals + size_t _num_deferrals; + + // dependents + // For example, + // 12.defer(7); 12.defer(16) + // _dependents = {7, 16} + std::unordered_set _dependents; +}; + + + +// ---------------------------------------------------------------------------- +// Class Definition: Pipeflow +// ---------------------------------------------------------------------------- + +/** +@class Pipeflow + +@brief class to create a pipeflow object used by the pipe callable + +Pipeflow represents a scheduling token in the pipeline scheduling +framework. A pipeflow is created by the pipeline scheduler at runtime to +pass to the pipe callable. Users can query the present statistics +of that scheduling token, including the line identifier, pipe identifier, +and token identifier, and build their application algorithms based on +these statistics. +At the first stage, users can explicitly call the stop method +to stop the pipeline scheduler. + +@code{.cpp} +tf::Pipe{tf::PipeType::SERIAL, [](tf::Pipeflow& pf){ + std::cout << "token id=" << pf.token() + << " at line=" << pf.line() + << " at pipe=" << pf.pipe() + << '\n'; +}}; +@endcode + +Pipeflow can only be created privately by the tf::Pipeline and +be used through the pipe callable. +*/ +class Pipeflow { + + template + friend class Pipeline; + + template + friend class ScalablePipeline; + + template + friend class DataPipeline; + + public: + + /** + @brief default constructor + */ + Pipeflow() = default; + + /** + @brief queries the line identifier of the present token + */ + size_t line() const { + return _line; + } + + /** + @brief queries the pipe identifier of the present token + */ + size_t pipe() const { + return _pipe; + } + + /** + @brief queries the token identifier + */ + size_t token() const { + return _token; + } + + /** + @brief stops the pipeline scheduling + + Only the first pipe can call this method to stop the pipeline. + Calling stop from other pipes will throw exception. + */ + void stop() { + if(_pipe != 0) { + TF_THROW("only the first pipe can stop the token"); + } + _stop = true; + } + + /** + @brief queries the number of deferrals + */ + size_t num_deferrals() const { + return _num_deferrals; + } + + /** + @brief pushes token in _dependents + + Only the first pipe can call this method to defer the current + scheduling token to the given token. + */ + void defer(size_t token) { + if(_pipe != 0) { + TF_THROW("only the first pipe can defer the current scheduling token"); + } + _dependents.insert(token); + } + + private: + + // Regular data + size_t _line; + size_t _pipe; + size_t _token; + bool _stop; + + // Data field for token dependencies + size_t _num_deferrals; + std::unordered_set _dependents; + +}; + +// ---------------------------------------------------------------------------- +// Class Definition: PipeType +// ---------------------------------------------------------------------------- + +/** +@enum PipeType + +@brief enumeration of all pipe types +*/ +enum class PipeType : int { + /** @brief parallel type */ + PARALLEL = 1, + /** @brief serial type */ + SERIAL = 2 +}; + +// ---------------------------------------------------------------------------- +// Class Definition: Pipe +// ---------------------------------------------------------------------------- + +/** +@class Pipe + +@brief class to create a pipe object for a pipeline stage + +@tparam C callable type + +A pipe represents a stage of a pipeline. A pipe can be either +@em parallel direction or @em serial direction (specified by tf::PipeType) +and is coupled with a callable to invoke by the pipeline scheduler. +The callable must take a referenced tf::Pipeflow object in the first argument: + +@code{.cpp} +Pipe{PipeType::SERIAL, [](tf::Pipeflow&){}} +@endcode + +The pipeflow object is used to query the statistics of a scheduling token +in the pipeline, such as pipe, line, and token numbers. +*/ +template > +class Pipe { + + template + friend class Pipeline; + + template + friend class ScalablePipeline; + + public: + + /** + @brief alias of the callable type + */ + using callable_t = C; + + /** + @brief default constructor + */ + Pipe() = default; + + /** + @brief constructs the pipe object + + @param d pipe type (tf::PipeType) + @param callable callable type + + The constructor constructs a pipe with the given direction + (tf::PipeType::SERIAL or tf::PipeType::PARALLEL) and the given callable. + The callable must take a referenced tf::Pipeflow object in the first argument. + + @code{.cpp} + Pipe{PipeType::SERIAL, [](tf::Pipeflow&){}} + @endcode + + When creating a pipeline, the direction of the first pipe must be serial + (tf::PipeType::SERIAL). + */ + Pipe(PipeType d, C&& callable) : + _type{d}, _callable{std::forward(callable)} { + } + + /** + @brief queries the type of the pipe + + Returns the type of the callable. + */ + PipeType type() const { + return _type; + } + + /** + @brief assigns a new type to the pipe + + @param type a tf::PipeType variable + */ + void type(PipeType type) { + _type = type; + } + + /** + @brief assigns a new callable to the pipe + + @tparam U callable type + @param callable a callable object constructible from std::function + + Assigns a new callable to the pipe with universal forwarding. + */ + template + void callable(U&& callable) { + _callable = std::forward(callable); + } + + private: + + PipeType _type; + + C _callable; +}; + +// ---------------------------------------------------------------------------- +// Class Definition: Pipeline +// ---------------------------------------------------------------------------- + +/** +@class Pipeline + +@brief class to create a pipeline scheduling framework + +@tparam Ps pipe types + +A pipeline is a composable graph object for users to create a +pipeline scheduling framework using a module task in a taskflow. +Unlike the conventional pipeline programming frameworks (e.g., Intel TBB), +%Taskflow's pipeline algorithm does not provide any data abstraction, +which often restricts users from optimizing data layouts in their applications, +but a flexible framework for users to customize their application data +atop our pipeline scheduling. +The following code creates a pipeline of four parallel lines to schedule +tokens through three serial pipes: + +@code{.cpp} +tf::Taskflow taskflow; +tf::Executor executor; + +const size_t num_lines = 4; +const size_t num_pipes = 3; + +// create a custom data buffer +std::array, num_lines> buffer; + +// create a pipeline graph of four concurrent lines and three serial pipes +tf::Pipeline pipeline(num_lines, + // first pipe must define a serial direction + tf::Pipe{tf::PipeType::SERIAL, [&buffer](tf::Pipeflow& pf) { + // generate only 5 scheduling tokens + if(pf.token() == 5) { + pf.stop(); + } + // save the token id into the buffer + else { + buffer[pf.line()][pf.pipe()] = pf.token(); + } + }}, + tf::Pipe{tf::PipeType::SERIAL, [&buffer] (tf::Pipeflow& pf) { + // propagate the previous result to this pipe by adding one + buffer[pf.line()][pf.pipe()] = buffer[pf.line()][pf.pipe()-1] + 1; + }}, + tf::Pipe{tf::PipeType::SERIAL, [&buffer](tf::Pipeflow& pf){ + // propagate the previous result to this pipe by adding one + buffer[pf.line()][pf.pipe()] = buffer[pf.line()][pf.pipe()-1] + 1; + }} +); + +// build the pipeline graph using composition +tf::Task init = taskflow.emplace([](){ std::cout << "ready\n"; }) + .name("starting pipeline"); +tf::Task task = taskflow.composed_of(pipeline) + .name("pipeline"); +tf::Task stop = taskflow.emplace([](){ std::cout << "stopped\n"; }) + .name("pipeline stopped"); + +// create task dependency +init.precede(task); +task.precede(stop); + +// run the pipeline +executor.run(taskflow).wait(); +@endcode + +The above example creates a pipeline graph that schedules five tokens over +four parallel lines in a circular fashion, as depicted below: + +@code{.shell-session} +o -> o -> o +| | | +v v v +o -> o -> o +| | | +v v v +o -> o -> o +| | | +v v v +o -> o -> o +@endcode + +At each pipe stage, the program propagates the result to the next pipe +by adding one to the result stored in a custom data storage, @c buffer. +The pipeline scheduler will generate five scheduling tokens and then stop. + +Internally, tf::Pipeline uses std::tuple to store the given sequence of pipes. +The definition of each pipe can be different, completely decided by the compiler +to optimize the object layout. +After a pipeline is constructed, it is not possible to change its pipes. +If applications need to change these pipes, please use tf::ScalablePipeline. +*/ +template +class Pipeline { + + static_assert(sizeof...(Ps)>0, "must have at least one pipe"); + + /** + @private + */ + struct Line { + std::atomic join_counter; + }; + + /** + @private + */ + struct PipeMeta { + PipeType type; + }; + + public: + + /** + @brief constructs a pipeline object + + @param num_lines the number of parallel lines + @param ps a list of pipes + + Constructs a pipeline of up to @c num_lines parallel lines to schedule + tokens through the given linear chain of pipes. + The first pipe must define a serial direction (tf::PipeType::SERIAL) + or an exception will be thrown. + */ + Pipeline(size_t num_lines, Ps&&... ps); + + /** + @brief constructs a pipeline object + + @param num_lines the number of parallel lines + @param ps a tuple of pipes + + Constructs a pipeline of up to @c num_lines parallel lines to schedule + tokens through the given linear chain of pipes. + The first pipe must define a serial direction (tf::PipeType::SERIAL) + or an exception will be thrown. + */ + Pipeline(size_t num_lines, std::tuple&& ps); + + /** + @brief queries the number of parallel lines + + The function returns the number of parallel lines given by the user + upon the construction of the pipeline. + The number of lines represents the maximum parallelism this pipeline + can achieve. + */ + size_t num_lines() const noexcept; + + /** + @brief queries the number of pipes + + The Function returns the number of pipes given by the user + upon the construction of the pipeline. + */ + constexpr size_t num_pipes() const noexcept; + + /** + @brief resets the pipeline + + Resetting the pipeline to the initial state. After resetting a pipeline, + its token identifier will start from zero as if the pipeline was just + constructed. + */ + void reset(); + + /** + @brief queries the number of generated tokens in the pipeline + + The number represents the total scheduling tokens that has been + generated by the pipeline so far. + */ + size_t num_tokens() const noexcept; + + /** + @brief obtains the graph object associated with the pipeline construct + + This method is primarily used as an opaque data structure for creating + a module task of the this pipeline. + */ + Graph& graph(); + + + private: + + Graph _graph; + + size_t _num_tokens; + + std::tuple _pipes; + std::array _meta; + std::vector> _lines; + std::vector _tasks; + std::vector _pipeflows; + + // queue of ready tokens (paired with their deferral times) + // For example, + // when 12 does not have any dependents, + // we put 12 in _ready_tokens queue + // Assume num_deferrals of 12 is 1, + // we push pair{12, 1} in the queue + std::queue> _ready_tokens; + + // unordered_map of token dependencies + // For example, + // 12.defer(16); 13.defer(16); + // _token_dependencies has the following entry + // {key: 16, value: std::vector{12, 13}}. + std::unordered_map> _token_dependencies; + + // unordered_map of deferred tokens + // For example, + // 12.defer(16); 13.defer(16); + // _deferred_tokens has the following two entries + // {key: 12, DeferredPipeflow of 12} and + // {key: 13, DeferredPipeflow of 13} + std::unordered_map _deferred_tokens; + + // variable to keep track of the longest deferred tokens + // For example, + // 2.defer(16) + // 5.defer(19) + // 5.defer(17), + // _longest_deferral will be 19 - after token 19 the pipeline + // has almost zero cost on handling deferred pipeflow + size_t _longest_deferral = 0; + + template + auto _gen_meta(std::tuple&&, std::index_sequence); + + void _on_pipe(Pipeflow&, Runtime&); + void _build(); + void _check_dependents(Pipeflow&); + void _construct_deferred_tokens(Pipeflow&); + void _resolve_token_dependencies(Pipeflow&); +}; + +// constructor +template +Pipeline::Pipeline(size_t num_lines, Ps&&... ps) : + _pipes {std::make_tuple(std::forward(ps)...)}, + _meta {PipeMeta{ps.type()}...}, + _lines (num_lines), + _tasks (num_lines + 1), + _pipeflows (num_lines) { + + if(num_lines == 0) { + TF_THROW("must have at least one line"); + } + + if(std::get<0>(_pipes).type() != PipeType::SERIAL) { + TF_THROW("first pipe must be serial"); + } + + reset(); + _build(); +} + +// constructor +template +Pipeline::Pipeline(size_t num_lines, std::tuple&& ps) : + _pipes {std::forward>(ps)}, + _meta {_gen_meta( + std::forward>(ps), std::make_index_sequence{} + )}, + _lines (num_lines), + _tasks (num_lines + 1), + _pipeflows (num_lines) { + + if(num_lines == 0) { + TF_THROW("must have at least one line"); + } + + if(std::get<0>(_pipes).type() != PipeType::SERIAL) { + TF_THROW("first pipe must be serial"); + } + + reset(); + _build(); +} + +// Function: _get_meta +template +template +auto Pipeline::_gen_meta(std::tuple&& ps, std::index_sequence) { + return std::array{PipeMeta{std::get(ps).type()}...}; +} + +// Function: num_lines +template +size_t Pipeline::num_lines() const noexcept { + return _pipeflows.size(); +} + +// Function: num_pipes +template +constexpr size_t Pipeline::num_pipes() const noexcept { + return sizeof...(Ps); +} + +// Function: num_tokens +template +size_t Pipeline::num_tokens() const noexcept { + return _num_tokens; +} + +// Function: graph +template +Graph& Pipeline::graph() { + return _graph; +} + +// Function: reset +template +void Pipeline::reset() { + + _num_tokens = 0; + + for(size_t l = 0; l(_meta[f].type), std::memory_order_relaxed + ); + } + } + + for(size_t f=1; f(_meta[0].type) - 1, std::memory_order_relaxed + ); + } +} + +// Procedure: _on_pipe +template +void Pipeline::_on_pipe(Pipeflow& pf, Runtime& rt) { + visit_tuple([&](auto&& pipe){ + using callable_t = typename std::decay_t::callable_t; + if constexpr (std::is_invocable_v) { + pipe._callable(pf); + } + else if constexpr(std::is_invocable_v) { + pipe._callable(pf, rt); + } + else { + static_assert(dependent_false_v, "un-supported pipe callable type"); + } + }, _pipes, pf._pipe); +} + +// Procedure: _check_dependents +// Check and remove invalid dependents after on_pipe +// For example, users may defer a pipeflow to multiple tokens, +// and we need to remove invalid tokens. +// 12.defer(7); // valid only if 7 is deferred, or invalid otherwise +// 12.defer(16); // 16 is valid +template +void Pipeline::_check_dependents(Pipeflow& pf) { + //if (pf._dependents.size()) { + ++pf._num_deferrals; + + for (auto it = pf._dependents.begin(); it != pf._dependents.end();) { + + // valid (e.g., 12.defer(16)) + if (*it >= _num_tokens) { + _token_dependencies[*it].push_back(pf._token); + _longest_deferral = std::max(_longest_deferral, *it); + ++it; + } + // valid or invalid (e.g., 12.defer(7)) + else { + auto pit = _deferred_tokens.find(*it); + + // valid (e.g., 7 is deferred) + if (pit != _deferred_tokens.end()) { + _token_dependencies[*it].push_back(pf._token); + ++it; + } + + // invalid (e.g., 7 is finished - this this 12.defer(7) is dummy) + else { + it = pf._dependents.erase(it); + } + } + } +} + +// Procedure: _construct_deferred_tokens +// Construct a data structure for a deferred token +// +// For example, +// 12.defer(7); 12.defer(16); +// After _check_dependents, 12 needs to be deferred, +// so we will construct a data structure for 12 using hashmap: +// {key: 12, value: DeferredPipeflow of 12} +template +void Pipeline::_construct_deferred_tokens(Pipeflow& pf) { + + //auto res = _deferred_tokens.emplace( + // pf._token, DeferredPipeflow{pf._token, pf._num_deferrals, std::move(pf._dependents)} + //); + + // construct the deferred pipeflow with zero copy + //auto res = _deferred_tokens.emplace( + _deferred_tokens.emplace( + std::piecewise_construct, + std::forward_as_tuple(pf._token), + std::forward_as_tuple( + pf._token, pf._num_deferrals, std::move(pf._dependents) + ) + ); + + //assert(res.second == true); +} + +// Procedure: _resolve_token_dependencies +// Resolve dependencies for tokens that defer to current token +// +// For example, +// 12.defer(16); +// 13.defer(16); +// _token_dependencies will have the entry +// {key: 16, value: std::vector{12, 13}} +// +// When 16 finishes, we need to remove 16 from 12's and 13's +// individual_dependents +template +void Pipeline::_resolve_token_dependencies(Pipeflow& pf) { + + if (auto it = _token_dependencies.find(pf._token); + it != _token_dependencies.end()) { + + // iterate tokens that defer to pf._token + // (e.g., 12 and 13) + for(size_t target : it->second) { + + auto dpf = _deferred_tokens.find(target); + + assert(dpf != _deferred_tokens.end()); + + // erase pf._token from target's _dependents + // (e.g., remove 16 from 12's dependents) + dpf->second._dependents.erase(pf._token); + // dpf->second._dependent_satellites[pf._token] + //); + + // target has no dependents + if (dpf->second._dependents.empty()) { + + // push target into _ready_tokens queue + _ready_tokens.emplace(dpf->second._token, dpf->second._num_deferrals); + //_ready_tokens.push( + // std::make_pair(dpf->second._token, dpf->second._num_deferrals) + //); + + // erase target from _deferred_tokens + _deferred_tokens.erase(dpf); + } + } + + // remove pf._token from _token_dependencies + // (e.g., remove the entry + // {key: 16, value: std::vector{12, 13}} from _token_dependencies) + _token_dependencies.erase(it); + } +} + +// Procedure: _build +template +void Pipeline::_build() { + + using namespace std::literals::string_literals; + + FlowBuilder fb(_graph); + + // init task + _tasks[0] = fb.emplace([this]() { + return static_cast(_num_tokens % num_lines()); + }).name("cond"); + + // line task + for(size_t l = 0; l < num_lines(); l++) { + + _tasks[l + 1] = fb.emplace([this, l] (tf::Runtime& rt) mutable { + + auto pf = &_pipeflows[l]; + + pipeline: + + _lines[pf->_line][pf->_pipe].join_counter.store( + static_cast(_meta[pf->_pipe].type), std::memory_order_relaxed + ); + + // First pipe does all jobs of initialization and token dependencies + if (pf->_pipe == 0) { + // _ready_tokens queue is not empty + // substitute pf with the token at the front of the queue + if (!_ready_tokens.empty()) { + pf->_token = _ready_tokens.front().first; + pf->_num_deferrals = _ready_tokens.front().second; + _ready_tokens.pop(); + } + else { + pf->_token = _num_tokens; + pf->_num_deferrals = 0; + } + + handle_token_dependency: + + if (pf->_stop = false, _on_pipe(*pf, rt); pf->_stop == true) { + // here, the pipeline is not stopped yet because other + // lines of tasks may still be running their last stages + return; + } + + if (_num_tokens == pf->_token) { + ++_num_tokens; + } + + if (pf->_dependents.empty() == false){ + // check if the pf->_dependents have valid dependents + _check_dependents(*pf); + + // tokens in pf->_dependents are all valid dependents + if (pf->_dependents.size()) { + + // construct a data structure for pf in _deferred_tokens + _construct_deferred_tokens(*pf); + goto pipeline; + } + + // tokens in pf->_dependents are invalid dependents + // directly goto on_pipe on the same line + else { + goto handle_token_dependency; + } + } + + // Every token within the deferral range needs to check + // if it can resolve dependencies on other tokens. + if (pf->_token <= _longest_deferral) { + _resolve_token_dependencies(*pf); + } + } + else { + _on_pipe(*pf, rt); + } + + size_t c_f = pf->_pipe; + size_t n_f = (pf->_pipe + 1) % num_pipes(); + size_t n_l = (pf->_line + 1) % num_lines(); + + pf->_pipe = n_f; + + // ---- scheduling starts here ---- + // Notice that the shared variable f must not be changed after this + // point because it can result in data race due to the following + // condition: + // + // a -> b + // | | + // v v + // c -> d + // + // d will be spawned by either c or b, so if c changes f but b spawns d + // then data race on f will happen + + std::array retval; + size_t n = 0; + + // downward dependency + if(_meta[c_f].type == PipeType::SERIAL && + _lines[n_l][c_f].join_counter.fetch_sub( + 1, std::memory_order_acq_rel) == 1 + ) { + retval[n++] = 1; + } + + // forward dependency + if(_lines[pf->_line][n_f].join_counter.fetch_sub( + 1, std::memory_order_acq_rel) == 1 + ) { + retval[n++] = 0; + } + + // notice that the task index starts from 1 + switch(n) { + case 2: { + rt.schedule(_tasks[n_l+1]); + goto pipeline; + } + case 1: { + // downward dependency + if (retval[0] == 1) { + pf = &_pipeflows[n_l]; + } + // forward dependency + goto pipeline; + } + } + }).name("rt-"s + std::to_string(l)); + + _tasks[0].precede(_tasks[l+1]); + } +} + +// ---------------------------------------------------------------------------- +// Class Definition: ScalablePipeline +// ---------------------------------------------------------------------------- + +/** +@class ScalablePipeline + +@brief class to create a scalable pipeline object + +@tparam P type of the iterator to a range of pipes + +A scalable pipeline is a composable graph object for users to create a +pipeline scheduling framework using a module task in a taskflow. +Unlike tf::Pipeline that instantiates all pipes upon the construction time, +tf::ScalablePipeline allows variable assignments of pipes using range iterators. +Users can also reset a scalable pipeline to a different range of pipes +between runs. The following code creates a scalable pipeline of four +parallel lines to schedule tokens through three serial pipes in a custom storage, +then resetting the pipeline to a new range of five serial pipes: + +@code{.cpp} +tf::Taskflow taskflow("pipeline"); +tf::Executor executor; + +const size_t num_lines = 4; + +// create data storage +std::array buffer; + +// define the pipe callable +auto pipe_callable = [&buffer] (tf::Pipeflow& pf) mutable { + switch(pf.pipe()) { + // first stage generates only 5 scheduling tokens and saves the + // token number into the buffer. + case 0: { + if(pf.token() == 5) { + pf.stop(); + } + else { + printf("stage 1: input token = %zu\n", pf.token()); + buffer[pf.line()] = pf.token(); + } + return; + } + break; + + // other stages propagate the previous result to this pipe and + // increment it by one + default: { + printf( + "stage %zu: input buffer[%zu] = %d\n", pf.pipe(), pf.line(), buffer[pf.line()] + ); + buffer[pf.line()] = buffer[pf.line()] + 1; + } + break; + } +}; + +// create a vector of three pipes +std::vector< tf::Pipe> > pipes; + +for(size_t i=0; i<3; i++) { + pipes.emplace_back(tf::PipeType::SERIAL, pipe_callable); +} + +// create a pipeline of four parallel lines based on the given vector of pipes +tf::ScalablePipeline pl(num_lines, pipes.begin(), pipes.end()); + +// build the pipeline graph using composition +tf::Task init = taskflow.emplace([](){ std::cout << "ready\n"; }) + .name("starting pipeline"); +tf::Task task = taskflow.composed_of(pl) + .name("pipeline"); +tf::Task stop = taskflow.emplace([](){ std::cout << "stopped\n"; }) + .name("pipeline stopped"); + +// create task dependency +init.precede(task); +task.precede(stop); + +// dump the pipeline graph structure (with composition) +taskflow.dump(std::cout); + +// run the pipeline +executor.run(taskflow).wait(); + +// reset the pipeline to a new range of five pipes and starts from +// the initial state (i.e., token counts from zero) +for(size_t i=0; i<2; i++) { + pipes.emplace_back(tf::PipeType::SERIAL, pipe_callable); +} +pl.reset(pipes.begin(), pipes.end()); + +executor.run(taskflow).wait(); +@endcode + +The above example creates a pipeline graph that schedules five tokens over +four parallel lines in a circular fashion, first going through three serial pipes +and then five serial pipes: + +@code{.shell-session} +# initial construction of three serial pipes +o -> o -> o +| | | +v v v +o -> o -> o +| | | +v v v +o -> o -> o +| | | +v v v +o -> o -> o + +# resetting to a new range of five serial pipes +o -> o -> o -> o -> o +| | | | | +v v v v v +o -> o -> o -> o -> o +| | | | | +v v v v v +o -> o -> o -> o -> o +| | | | | +v v v v v +o -> o -> o -> o -> o +@endcode + +Each pipe has the same type of `%tf::Pipe<%std::function>` +and is kept in a vector that is amenable to change. +We construct the scalable pipeline using two range iterators pointing to the +beginning and the end of the vector. +At each pipe stage, the program propagates the result to the next pipe +by adding one to the result stored in a custom data storage, @c buffer. +The pipeline scheduler will generate five scheduling tokens and then stop. + +A scalable pipeline is move-only. +*/ +template +class ScalablePipeline { + + /** + @private + */ + struct Line { + std::atomic join_counter; + }; + + public: + + /** + @brief pipe type + */ + using pipe_t = typename std::iterator_traits

::value_type; + + /** + @brief default constructor + */ + ScalablePipeline() = default; + + /** + @brief constructs an empty scalable pipeline object + + @param num_lines the number of parallel lines + + An empty scalable pipeline does not have any pipes. + The pipeline needs to be reset to a valid range of pipes + before running. + */ + ScalablePipeline(size_t num_lines); + + /** + @brief constructs a scalable pipeline object + + @param num_lines the number of parallel lines + @param first iterator to the beginning of the range + @param last iterator to the end of the range + + Constructs a pipeline from the given range of pipes specified in + [first, last) using @c num_lines parallel lines. + The first pipe must define a serial direction (tf::PipeType::SERIAL) + or an exception will be thrown. + + Internally, the scalable pipeline copies the iterators + from the specified range. Those pipe callables pointed to by + these iterators must remain valid during the execution of the pipeline. + */ + ScalablePipeline(size_t num_lines, P first, P last); + + /** + @brief disabled copy constructor + */ + ScalablePipeline(const ScalablePipeline&) = delete; + + /** + @brief move constructor + + Constructs a pipeline from the given @c rhs using move semantics + (i.e. the data in @c rhs is moved into this pipeline). + After the move, @c rhs is in a state as if it is just constructed. + The behavior is undefined if @c rhs is running during the move. + */ + ScalablePipeline(ScalablePipeline&& rhs); + + /** + @brief disabled copy assignment operator + */ + ScalablePipeline& operator = (const ScalablePipeline&) = delete; + + /** + @brief move constructor + + Replaces the contents with those of @c rhs using move semantics + (i.e. the data in @c rhs is moved into this pipeline). + After the move, @c rhs is in a state as if it is just constructed. + The behavior is undefined if @c rhs is running during the move. + */ + ScalablePipeline& operator = (ScalablePipeline&& rhs); + + /** + @brief queries the number of parallel lines + + The function returns the number of parallel lines given by the user + upon the construction of the pipeline. + The number of lines represents the maximum parallelism this pipeline + can achieve. + */ + size_t num_lines() const noexcept; + + /** + @brief queries the number of pipes + + The Function returns the number of pipes given by the user + upon the construction of the pipeline. + */ + size_t num_pipes() const noexcept; + + /** + @brief resets the pipeline + + Resets the pipeline to the initial state. After resetting a pipeline, + its token identifier will start from zero. + */ + void reset(); + + /** + @brief resets the pipeline with a new range of pipes + + @param first iterator to the beginning of the range + @param last iterator to the end of the range + + The member function assigns the pipeline to a new range of pipes + specified in [first, last) and resets the pipeline to the + initial state. After resetting a pipeline, its token identifier will + start from zero. + + Internally, the scalable pipeline copies the iterators + from the specified range. Those pipe callables pointed to by + these iterators must remain valid during the execution of the pipeline. + */ + void reset(P first, P last); + + /** + @brief resets the pipeline to a new line number and a + new range of pipes + + @param num_lines number of parallel lines + @param first iterator to the beginning of the range + @param last iterator to the end of the range + + The member function resets the pipeline to a new number of + parallel lines and a new range of pipes specified in + [first, last), as if the pipeline is just constructed. + After resetting a pipeline, its token identifier will start from zero. + + Internally, the scalable pipeline copies the iterators + from the specified range. Those pipe callables pointed to by + these iterators must remain valid during the execution of the pipeline. + */ + void reset(size_t num_lines, P first, P last); + + /** + @brief queries the number of generated tokens in the pipeline + + The number represents the total scheduling tokens that has been + generated by the pipeline so far. + */ + size_t num_tokens() const noexcept; + + /** + @brief obtains the graph object associated with the pipeline construct + + This method is primarily used as an opaque data structure for creating + a module task of the this pipeline. + */ + Graph& graph(); + + private: + + Graph _graph; + + size_t _num_tokens{0}; + + std::vector

_pipes; + std::vector _tasks; + std::vector _pipeflows; + std::unique_ptr _lines; + + // chchiu + std::queue> _ready_tokens; + std::unordered_map> _token_dependencies; + std::unordered_map _deferred_tokens; + size_t _longest_deferral = 0; + + void _check_dependents(Pipeflow&); + void _construct_deferred_tokens(Pipeflow&); + void _resolve_token_dependencies(Pipeflow&); + // chchiu + + void _on_pipe(Pipeflow&, Runtime&); + void _build(); + + Line& _line(size_t, size_t); +}; + +// constructor +template +ScalablePipeline

::ScalablePipeline(size_t num_lines) : + _tasks (num_lines + 1), + _pipeflows (num_lines) { + + if(num_lines == 0) { + TF_THROW("must have at least one line"); + } + + _build(); +} + +// constructor +template +ScalablePipeline

::ScalablePipeline(size_t num_lines, P first, P last) : + _tasks (num_lines + 1), + _pipeflows (num_lines) { + + if(num_lines == 0) { + TF_THROW("must have at least one line"); + } + + reset(first, last); + _build(); +} + +// move constructor +template +ScalablePipeline

::ScalablePipeline(ScalablePipeline&& rhs) : + _graph {std::move(rhs._graph)}, + _num_tokens {rhs._num_tokens}, + _pipes {std::move(rhs._pipes)}, + _tasks {std::move(rhs._tasks)}, + _pipeflows {std::move(rhs._pipeflows)}, + _lines {std::move(rhs._lines)}, + _ready_tokens {std::move(rhs._ready_tokens)}, + _token_dependencies {std::move(rhs._token_dependencies)}, + _deferred_tokens {std::move(rhs._deferred_tokens)}, + _longest_deferral {rhs._longest_deferral}{ + + rhs._longest_deferral = 0; + rhs._num_tokens = 0; +} + +// move assignment operator +template +ScalablePipeline

& ScalablePipeline

::operator = (ScalablePipeline&& rhs) { + _graph = std::move(rhs._graph); + _num_tokens = rhs._num_tokens; + _pipes = std::move(rhs._pipes); + _tasks = std::move(rhs._tasks); + _pipeflows = std::move(rhs._pipeflows); + _lines = std::move(rhs._lines); + rhs._num_tokens = 0; + _ready_tokens = std::move(rhs._ready_tokens); + _token_dependencies = std::move(rhs._token_dependencies); + _deferred_tokens = std::move(rhs._deferred_tokens); + _longest_deferral = rhs._longest_deferral; + rhs._longest_deferral = 0; + return *this; +} + +// Function: num_lines +template +size_t ScalablePipeline

::num_lines() const noexcept { + return _pipeflows.size(); +} + +// Function: num_pipes +template +size_t ScalablePipeline

::num_pipes() const noexcept { + return _pipes.size(); +} + +// Function: num_tokens +template +size_t ScalablePipeline

::num_tokens() const noexcept { + return _num_tokens; +} + +// Function: graph +template +Graph& ScalablePipeline

::graph() { + return _graph; +} + +// Function: _line +template +typename ScalablePipeline

::Line& ScalablePipeline

::_line(size_t l, size_t p) { + return _lines[l*num_pipes() + p]; +} + +template +void ScalablePipeline

::reset(size_t num_lines, P first, P last) { + + if(num_lines == 0) { + TF_THROW("must have at least one line"); + } + + _graph.clear(); + _tasks.resize(num_lines + 1); + _pipeflows.resize(num_lines); + + reset(first, last); + + _build(); +} + +// Function: reset +template +void ScalablePipeline

::reset(P first, P last) { + + size_t num_pipes = static_cast(std::distance(first, last)); + + if(num_pipes == 0) { + TF_THROW("pipeline cannot be empty"); + } + + if(first->type() != PipeType::SERIAL) { + TF_THROW("first pipe must be serial"); + } + + _pipes.resize(num_pipes); + + size_t i=0; + for(auto itr = first; itr != last; itr++) { + _pipes[i++] = itr; + } + + _lines = std::make_unique(num_lines() * _pipes.size()); + + reset(); +} + +// Function: reset +template +void ScalablePipeline

::reset() { + + _num_tokens = 0; + + for(size_t l = 0; l(_pipes[f]->type()), std::memory_order_relaxed + ); + } + } + + for(size_t f=1; f(_pipes[0]->type()) - 1, std::memory_order_relaxed + ); + } + + assert(_ready_tokens.empty() == true); + _token_dependencies.clear(); + _deferred_tokens.clear(); +} + +// Procedure: _on_pipe +template +void ScalablePipeline

::_on_pipe(Pipeflow& pf, Runtime& rt) { + + using callable_t = typename pipe_t::callable_t; + + if constexpr (std::is_invocable_v) { + _pipes[pf._pipe]->_callable(pf); + } + else if constexpr(std::is_invocable_v) { + _pipes[pf._pipe]->_callable(pf, rt); + } + else { + static_assert(dependent_false_v, "un-supported pipe callable type"); + } +} + +template +void ScalablePipeline

::_check_dependents(Pipeflow& pf) { + ++pf._num_deferrals; + + for (auto it = pf._dependents.begin(); it != pf._dependents.end();) { + + // valid (e.g., 12.defer(16)) + if (*it >= _num_tokens) { + _token_dependencies[*it].push_back(pf._token); + _longest_deferral = std::max(_longest_deferral, *it); + ++it; + } + // valid or invalid (e.g., 12.defer(7)) + else { + auto pit = _deferred_tokens.find(*it); + + // valid (e.g., 7 is deferred) + if (pit != _deferred_tokens.end()) { + _token_dependencies[*it].push_back(pf._token); + ++it; + } + + else { + it = pf._dependents.erase(it); + } + } + } +} + +// Procedure: _construct_deferred_tokens +// Construct a data structure for a deferred token +template +void ScalablePipeline

::_construct_deferred_tokens(Pipeflow& pf) { + + // construct the deferred pipeflow with zero copy + _deferred_tokens.emplace( + std::piecewise_construct, + std::forward_as_tuple(pf._token), + std::forward_as_tuple( + pf._token, pf._num_deferrals, std::move(pf._dependents) + ) + ); +} + +// Procedure: _resolve_token_dependencies +// Resolve dependencies for tokens that defer to current token +template +void ScalablePipeline

::_resolve_token_dependencies(Pipeflow& pf) { + + if (auto it = _token_dependencies.find(pf._token); + it != _token_dependencies.end()) { + + // iterate tokens that defer to pf._token + for(size_t target : it->second) { + + auto dpf = _deferred_tokens.find(target); + + assert(dpf != _deferred_tokens.end()); + + // erase pf._token from target's _dependents + dpf->second._dependents.erase(pf._token); + + // target has no dependents + if (dpf->second._dependents.empty()) { + _ready_tokens.emplace(dpf->second._token, dpf->second._num_deferrals); + _deferred_tokens.erase(dpf); + } + } + + _token_dependencies.erase(it); + } +} + +// Procedure: _build +template +void ScalablePipeline

::_build() { + + using namespace std::literals::string_literals; + + FlowBuilder fb(_graph); + + // init task + _tasks[0] = fb.emplace([this]() { + return static_cast(_num_tokens % num_lines()); + }).name("cond"); + + // line task + for(size_t l = 0; l < num_lines(); l++) { + + _tasks[l + 1] = fb.emplace([this, l] (tf::Runtime& rt) mutable { + + auto pf = &_pipeflows[l]; + + pipeline: + + _line(pf->_line, pf->_pipe).join_counter.store( + static_cast(_pipes[pf->_pipe]->type()), std::memory_order_relaxed + ); + + // First pipe does all jobs of initialization and token dependencies + if (pf->_pipe == 0) { + // _ready_tokens queue is not empty + // substitute pf with the token at the front of the queue + if (!_ready_tokens.empty()) { + pf->_token = _ready_tokens.front().first; + pf->_num_deferrals = _ready_tokens.front().second; + _ready_tokens.pop(); + } + else { + pf->_token = _num_tokens; + pf->_num_deferrals = 0; + } + + handle_token_dependency: + + if (pf->_stop = false, _on_pipe(*pf, rt); pf->_stop == true) { + // here, the pipeline is not stopped yet because other + // lines of tasks may still be running their last stages + return; + } + + if (_num_tokens == pf->_token) { + ++_num_tokens; + } + + if (pf->_dependents.empty() == false){ + // check if the pf->_dependents have valid dependents + _check_dependents(*pf); + + // tokens in pf->_dependents are all valid dependents + if (pf->_dependents.size()) { + + // construct a data structure for pf in _deferred_tokens + _construct_deferred_tokens(*pf); + goto pipeline; + } + + // tokens in pf->_dependents are invalid dependents + // directly goto on_pipe on the same line + else { + goto handle_token_dependency; + } + } + + // Every token within the deferral range needs to check + // if it can resolve dependencies on other tokens. + if (pf->_token <= _longest_deferral) { + _resolve_token_dependencies(*pf); + } + } + else { + _on_pipe(*pf, rt); + } + + size_t c_f = pf->_pipe; + size_t n_f = (pf->_pipe + 1) % num_pipes(); + size_t n_l = (pf->_line + 1) % num_lines(); + + pf->_pipe = n_f; + + // ---- scheduling starts here ---- + // Notice that the shared variable f must not be changed after this + // point because it can result in data race due to the following + // condition: + // + // a -> b + // | | + // v v + // c -> d + // + // d will be spawned by either c or b, so if c changes f but b spawns d + // then data race on f will happen + + std::array retval; + size_t n = 0; + + // downward dependency + if(_pipes[c_f]->type() == PipeType::SERIAL && + _line(n_l, c_f).join_counter.fetch_sub( + 1, std::memory_order_acq_rel) == 1 + ) { + retval[n++] = 1; + } + + // forward dependency + if(_line(pf->_line, n_f).join_counter.fetch_sub( + 1, std::memory_order_acq_rel) == 1 + ) { + retval[n++] = 0; + } + + // notice that the task index starts from 1 + switch(n) { + case 2: { + rt.schedule(_tasks[n_l+1]); + goto pipeline; + } + case 1: { + if (retval[0] == 1) { + pf = &_pipeflows[n_l]; + } + goto pipeline; + } + } + }).name("rt-"s + std::to_string(l)); + + _tasks[0].precede(_tasks[l+1]); + } +} + +} // end of namespace tf ----------------------------------------------------- + + + + + diff --git a/bundled/taskflow-3.6.0/include/algorithm/reduce.hpp b/bundled/taskflow-3.6.0/include/algorithm/reduce.hpp new file mode 100644 index 0000000000..64869dc7cb --- /dev/null +++ b/bundled/taskflow-3.6.0/include/algorithm/reduce.hpp @@ -0,0 +1,295 @@ +#pragma once + +#include "launch.hpp" + +namespace tf { + +namespace detail { + +// Function: make_reduce_task +template +TF_FORCE_INLINE auto make_reduce_task(B beg, E end, T& init, O bop, P&& part) { + + using B_t = std::decay_t>; + using E_t = std::decay_t>; + using namespace std::string_literals; + + return + [b=beg, e=end, &r=init, bop, part=std::forward

(part)] + (Runtime& rt) mutable { + + // fetch the iterator values + B_t beg = b; + E_t end = e; + + size_t W = rt.executor().num_workers(); + size_t N = std::distance(beg, end); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= part.chunk_size()) { + for(; beg!=end; r = bop(r, *beg++)); + return; + } + + if(N < W) { + W = N; + } + + std::mutex mtx; + + // static partitioner + if constexpr(std::is_same_v, StaticPartitioner>) { + + size_t chunk_size; + + for(size_t w=0, curr_b=0; w lock(mtx); + r = bop(r, *beg); + return; + } + + auto beg1 = beg++; + auto beg2 = beg++; + T sum = bop(*beg1, *beg2); + + // loop reduce + part.loop(N, W, curr_b, chunk_size, + [&, prev_e=curr_b+2](size_t curr_b, size_t curr_e) mutable { + + if(curr_b > prev_e) { + std::advance(beg, curr_b - prev_e); + } + else { + curr_b = prev_e; + } + + for(size_t x=curr_b; x lock(mtx); + r = bop(r, sum); + + }); + } + rt.join(); + } + // dynamic partitioner + else { + std::atomic next(0); + launch_loop(N, W, rt, next, part, [=, &bop, &mtx, &next, &r, &part] () mutable { + // pre-reduce + size_t s0 = next.fetch_add(2, std::memory_order_relaxed); + + if(s0 >= N) { + return; + } + + std::advance(beg, s0); + + if(N - s0 == 1) { + std::lock_guard lock(mtx); + r = bop(r, *beg); + return; + } + + auto beg1 = beg++; + auto beg2 = beg++; + + T sum = bop(*beg1, *beg2); + + // loop reduce + part.loop(N, W, next, + [&, prev_e=s0+2](size_t curr_b, size_t curr_e) mutable { + std::advance(beg, curr_b - prev_e); + for(size_t x=curr_b; x lock(mtx); + r = bop(r, sum); + }); + } + }; +} + +// Function: make_transform_reduce_task +template +TF_FORCE_INLINE auto make_transform_reduce_task( + B beg, E end, T& init, BOP bop, UOP uop, P&& part +) { + + using B_t = std::decay_t>; + using E_t = std::decay_t>; + using namespace std::string_literals; + + return + [b=beg, e=end, &r=init, bop, uop, part=std::forward

(part)] + (Runtime& rt) mutable { + + // fetch the iterator values + B_t beg = b; + E_t end = e; + + size_t W = rt.executor().num_workers(); + size_t N = std::distance(beg, end); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= part.chunk_size()) { + for(; beg!=end; r = bop(std::move(r), uop(*beg++))); + return; + } + + if(N < W) { + W = N; + } + + std::mutex mtx; + + // static partitioner + if constexpr(std::is_same_v, StaticPartitioner>) { + + size_t chunk_size; + + for(size_t w=0, curr_b=0; w lock(mtx); + r = bop(std::move(r), uop(*beg)); + return; + } + + //auto beg1 = beg++; + //auto beg2 = beg++; + //T sum = bop(uop(*beg1), uop(*beg2)); + + T sum = (chunk_size == 1) ? uop(*beg++) : bop(uop(*beg++), uop(*beg++)); + + // loop reduce + part.loop(N, W, curr_b, chunk_size, + [&, prev_e=curr_b+(chunk_size == 1 ? 1 : 2)] + (size_t curr_b, size_t curr_e) mutable { + if(curr_b > prev_e) { + std::advance(beg, curr_b - prev_e); + } + else { + curr_b = prev_e; + } + for(size_t x=curr_b; x lock(mtx); + r = bop(std::move(r), std::move(sum)); + + }); + } + + rt.join(); + } + // dynamic partitioner + else { + std::atomic next(0); + + launch_loop(N, W, rt, next, part, [=, &bop, &uop, &mtx, &next, &r, &part] () mutable { + + // pre-reduce + size_t s0 = next.fetch_add(2, std::memory_order_relaxed); + + if(s0 >= N) { + return; + } + + std::advance(beg, s0); + + if(N - s0 == 1) { + std::lock_guard lock(mtx); + r = bop(std::move(r), uop(*beg)); + return; + } + + auto beg1 = beg++; + auto beg2 = beg++; + + T sum = bop(uop(*beg1), uop(*beg2)); + + // loop reduce + part.loop(N, W, next, + [&, prev_e=s0+2](size_t curr_b, size_t curr_e) mutable { + std::advance(beg, curr_b - prev_e); + for(size_t x=curr_b; x lock(mtx); + r = bop(std::move(r), std::move(sum)); + }); + } + }; +} + +} // end of namespace detail ------------------------------------------------- + +// ---------------------------------------------------------------------------- +// default reduction +// ---------------------------------------------------------------------------- + +// Function: reduce +template +Task FlowBuilder::reduce(B beg, E end, T& init, O bop, P&& part) { + return emplace(detail::make_reduce_task( + beg, end, init, bop, std::forward

(part) + )); +} + +// ---------------------------------------------------------------------------- +// default transform and reduction +// ---------------------------------------------------------------------------- + +// Function: transform_reduce +template +Task FlowBuilder::transform_reduce( + B beg, E end, T& init, BOP bop, UOP uop, P&& part +) { + return emplace(detail::make_transform_reduce_task( + beg, end, init, bop, uop, std::forward

(part) + )); +} + +} // end of namespace tf ----------------------------------------------------- + + + + diff --git a/bundled/taskflow-3.6.0/include/algorithm/scan.hpp b/bundled/taskflow-3.6.0/include/algorithm/scan.hpp new file mode 100644 index 0000000000..cccb2057dd --- /dev/null +++ b/bundled/taskflow-3.6.0/include/algorithm/scan.hpp @@ -0,0 +1,614 @@ +#pragma once + +#include "launch.hpp" + +namespace tf { + +namespace detail { + +// Function: scan_loop +template +TF_FORCE_INLINE void scan_loop( + tf::Runtime& rt, + std::atomic& counter, + BufferT& buf, + B&& bop, + Iterator d_beg, + size_t W, + size_t w, + size_t chunk_size +){ + // whoever finishes the last performs global scan + if(counter.fetch_add(1, std::memory_order_acq_rel) == W-1) { + for(size_t i=1; i +TF_FORCE_INLINE auto make_inclusive_scan_task(B first, E last, D d_first, BOP bop) { + + using B_t = std::decay_t>; + using E_t = std::decay_t>; + using D_t = std::decay_t>; + using value_type = typename std::iterator_traits::value_type; + using namespace std::string_literals; + + return [=] (Runtime& rt) mutable { + + // fetch the stateful values + B_t s_beg = first; + E_t s_end = last; + D_t d_beg = d_first; + + if(s_beg == s_end) { + return; + } + + size_t W = rt.executor().num_workers(); + size_t N = std::distance(s_beg, s_end); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= 2) { + std::inclusive_scan(s_beg, s_end, d_beg, bop); + return; + } + + if(N < W) { + W = N; + } + + std::vector> buf(W); + std::atomic counter(0); + + size_t Q = N/W; + size_t R = N%W; + + //auto orig_d_beg = d_beg; + //ExecutionPolicy policy; + + for(size_t w=0, curr_b=0, chunk_size; w +TF_FORCE_INLINE auto make_inclusive_scan_task(B first, E last, D d_first, BOP bop, T init) { + + using B_t = std::decay_t>; + using E_t = std::decay_t>; + using D_t = std::decay_t>; + using value_type = typename std::iterator_traits::value_type; + using namespace std::string_literals; + + return [=] (Runtime& rt) mutable { + + // fetch the stateful values + B_t s_beg = first; + E_t s_end = last; + D_t d_beg = d_first; + + if(s_beg == s_end) { + return; + } + + size_t W = rt.executor().num_workers(); + size_t N = std::distance(s_beg, s_end); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= 2) { + std::inclusive_scan(s_beg, s_end, d_beg, bop, init); + return; + } + + if(N < W) { + W = N; + } + + std::vector> buf(W); + std::atomic counter(0); + + // set up the initial value for the first worker + buf[0].data = std::move(init); + + size_t Q = N/W; + size_t R = N%W; + + for(size_t w=0, curr_b=0, chunk_size; w +TF_FORCE_INLINE auto make_transform_inclusive_scan_task( + B first, E last, D d_first, BOP bop, UOP uop +) { + + using B_t = std::decay_t>; + using E_t = std::decay_t>; + using D_t = std::decay_t>; + using value_type = typename std::iterator_traits::value_type; + using namespace std::string_literals; + + return [=] (Runtime& rt) mutable { + + // fetch the stateful values + B_t s_beg = first; + E_t s_end = last; + D_t d_beg = d_first; + + if(s_beg == s_end) { + return; + } + + size_t W = rt.executor().num_workers(); + size_t N = std::distance(s_beg, s_end); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= 2) { + std::transform_inclusive_scan(s_beg, s_end, d_beg, bop, uop); + return; + } + + if(N < W) { + W = N; + } + + std::vector> buf(W); + std::atomic counter(0); + + size_t Q = N/W; + size_t R = N%W; + + for(size_t w=0, curr_b=0, chunk_size; w +TF_FORCE_INLINE auto make_transform_inclusive_scan_task( + B first, E last, D d_first, BOP bop, UOP uop, T init +) { + + using B_t = std::decay_t>; + using E_t = std::decay_t>; + using D_t = std::decay_t>; + using value_type = typename std::iterator_traits::value_type; + using namespace std::string_literals; + + return [=] (Runtime& rt) mutable { + + // fetch the stateful values + B_t s_beg = first; + E_t s_end = last; + D_t d_beg = d_first; + + if(s_beg == s_end) { + return; + } + + size_t W = rt.executor().num_workers(); + size_t N = std::distance(s_beg, s_end); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= 2) { + std::transform_inclusive_scan(s_beg, s_end, d_beg, bop, uop, init); + return; + } + + if(N < W) { + W = N; + } + + std::vector> buf(W); + std::atomic counter(0); + + // set up the initial value for the first worker + buf[0].data = std::move(init); + + size_t Q = N/W; + size_t R = N%W; + + for(size_t w=0, curr_b=0, chunk_size; w +TF_FORCE_INLINE auto make_exclusive_scan_task( + B first, E last, D d_first, T init, BOP bop +) { + + using B_t = std::decay_t>; + using E_t = std::decay_t>; + using D_t = std::decay_t>; + using value_type = typename std::iterator_traits::value_type; + using namespace std::string_literals; + + return [=] (Runtime& rt) mutable { + + // fetch the stateful values + B_t s_beg = first; + E_t s_end = last; + D_t d_beg = d_first; + + if(s_beg == s_end) { + return; + } + + size_t W = rt.executor().num_workers(); + size_t N = std::distance(s_beg, s_end); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= 2) { + std::exclusive_scan(s_beg, s_end, d_beg, init, bop); + return; + } + + if(N < W) { + W = N; + } + + std::vector> buf(W); + std::atomic counter(0); + + size_t Q = N/W; + size_t R = N%W; + + // fetch the init value + auto s_beg_temp = s_beg; + for(size_t w=0, curr_b=0, chunk_size; w +TF_FORCE_INLINE auto make_transform_exclusive_scan_task( + B first, E last, D d_first, T init, BOP bop, UOP uop +) { + + using B_t = std::decay_t>; + using E_t = std::decay_t>; + using D_t = std::decay_t>; + using value_type = typename std::iterator_traits::value_type; + using namespace std::string_literals; + + return [=] (Runtime& rt) mutable { + + // fetch the stateful values + B_t s_beg = first; + E_t s_end = last; + D_t d_beg = d_first; + + if(s_beg == s_end) { + return; + } + + size_t W = rt.executor().num_workers(); + size_t N = std::distance(s_beg, s_end); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= 2) { + std::transform_exclusive_scan(s_beg, s_end, d_beg, init, bop, uop); + return; + } + + if(N < W) { + W = N; + } + + std::vector> buf(W); + std::atomic counter(0); + + size_t Q = N/W; + size_t R = N%W; + + // fetch the init value + auto s_beg_temp = s_beg; + for(size_t w=0, curr_b=0, chunk_size; w +Task FlowBuilder::inclusive_scan(B first, E last, D d_first, BOP bop) { + return emplace(detail::make_inclusive_scan_task( + first, last, d_first, bop + )); +} + +// Function: inclusive_scan +template +Task FlowBuilder::inclusive_scan(B first, E last, D d_first, BOP bop, T init) { + return emplace(detail::make_inclusive_scan_task( + first, last, d_first, bop, init + )); +} + +// ---------------------------------------------------------------------------- +// Transform Inclusive Scan +// ---------------------------------------------------------------------------- + +// Function: transform_inclusive_scan +template +Task FlowBuilder::transform_inclusive_scan( + B first, E last, D d_first, BOP bop, UOP uop +) { + return emplace(detail::make_transform_inclusive_scan_task( + first, last, d_first, bop, uop + )); +} + +// Function: transform_inclusive_scan +template +Task FlowBuilder::transform_inclusive_scan( + B first, E last, D d_first, BOP bop, UOP uop, T init +) { + return emplace(detail::make_transform_inclusive_scan_task( + first, last, d_first, bop, uop, init + )); +} + +// ---------------------------------------------------------------------------- +// Exclusive Scan +// ---------------------------------------------------------------------------- + +// Function: exclusive_scan +template +Task FlowBuilder::exclusive_scan(B first, E last, D d_first, T init, BOP bop) { + return emplace(detail::make_exclusive_scan_task( + first, last, d_first, init, bop + )); +} + +// ---------------------------------------------------------------------------- +// Transform Exclusive Scan +// ---------------------------------------------------------------------------- + +// Function: transform_exclusive_scan +template +Task FlowBuilder::transform_exclusive_scan( + B first, E last, D d_first, T init, BOP bop, UOP uop +) { + return emplace(detail::make_transform_exclusive_scan_task( + first, last, d_first, init, bop, uop + )); +} + +} // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-3.6.0/include/algorithm/sort.hpp b/bundled/taskflow-3.6.0/include/algorithm/sort.hpp new file mode 100644 index 0000000000..a4fdf3ce76 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/algorithm/sort.hpp @@ -0,0 +1,648 @@ +#pragma once + +#include "../core/async.hpp" + +namespace tf { + +// threshold whether or not to perform parallel sort +template +constexpr size_t parallel_sort_cutoff() { + + //using value_type = std::decay_t())>; + using value_type = typename std::iterator_traits::value_type; + + constexpr size_t object_size = sizeof(value_type); + + if constexpr(std::is_same_v) { + return 65536 / sizeof(std::string); + } + else { + if constexpr(object_size < 16) return 4096; + else if constexpr(object_size < 32) return 2048; + else if constexpr(object_size < 64) return 1024; + else if constexpr(object_size < 128) return 768; + else if constexpr(object_size < 256) return 512; + else if constexpr(object_size < 512) return 256; + else return 128; + } +} + +// ---------------------------------------------------------------------------- +// pattern-defeating quick sort (pdqsort) +// https://github.com/orlp/pdqsort/ +// ---------------------------------------------------------------------------- + +template +inline T* align_cacheline(T* p) { +#if defined(UINTPTR_MAX) && __cplusplus >= 201103L + std::uintptr_t ip = reinterpret_cast(p); +#else + std::size_t ip = reinterpret_cast(p); +#endif + ip = (ip + cacheline_size - 1) & -cacheline_size; + return reinterpret_cast(ip); +} + +template +inline void swap_offsets( + Iter first, Iter last, + unsigned char* offsets_l, unsigned char* offsets_r, + size_t num, bool use_swaps +) { + typedef typename std::iterator_traits::value_type T; + if (use_swaps) { + // This case is needed for the descending distribution, where we need + // to have proper swapping for pdqsort to remain O(n). + for (size_t i = 0; i < num; ++i) { + std::iter_swap(first + offsets_l[i], last - offsets_r[i]); + } + } else if (num > 0) { + Iter l = first + offsets_l[0]; Iter r = last - offsets_r[0]; + T tmp(std::move(*l)); *l = std::move(*r); + for (size_t i = 1; i < num; ++i) { + l = first + offsets_l[i]; *r = std::move(*l); + r = last - offsets_r[i]; *l = std::move(*r); + } + *r = std::move(tmp); + } +} + +// Sorts [begin, end) using insertion sort with the given comparison function. +template +void insertion_sort(RandItr begin, RandItr end, Compare comp) { + + using T = typename std::iterator_traits::value_type; + + if (begin == end) { + return; + } + + for (RandItr cur = begin + 1; cur != end; ++cur) { + + RandItr shift = cur; + RandItr shift_1 = cur - 1; + + // Compare first to avoid 2 moves for an element + // already positioned correctly. + if (comp(*shift, *shift_1)) { + T tmp = std::move(*shift); + do { + *shift-- = std::move(*shift_1); + }while (shift != begin && comp(tmp, *--shift_1)); + *shift = std::move(tmp); + } + } +} + +// Sorts [begin, end) using insertion sort with the given comparison function. +// Assumes *(begin - 1) is an element smaller than or equal to any element +// in [begin, end). +template +void unguarded_insertion_sort(RandItr begin, RandItr end, Compare comp) { + + using T = typename std::iterator_traits::value_type; + + if (begin == end) { + return; + } + + for (RandItr cur = begin + 1; cur != end; ++cur) { + RandItr shift = cur; + RandItr shift_1 = cur - 1; + + // Compare first so we can avoid 2 moves + // for an element already positioned correctly. + if (comp(*shift, *shift_1)) { + T tmp = std::move(*shift); + + do { + *shift-- = std::move(*shift_1); + }while (comp(tmp, *--shift_1)); + + *shift = std::move(tmp); + } + } +} + +// Attempts to use insertion sort on [begin, end). +// Will return false if more than +// partial_insertion_sort_limit elements were moved, +// and abort sorting. Otherwise it will successfully sort and return true. +template +bool partial_insertion_sort(RandItr begin, RandItr end, Compare comp) { + + using T = typename std::iterator_traits::value_type; + using D = typename std::iterator_traits::difference_type; + + // When we detect an already sorted partition, attempt an insertion sort + // that allows this amount of element moves before giving up. + constexpr auto partial_insertion_sort_limit = D{8}; + + if (begin == end) return true; + + auto limit = D{0}; + + for (RandItr cur = begin + 1; cur != end; ++cur) { + + if (limit > partial_insertion_sort_limit) { + return false; + } + + RandItr shift = cur; + RandItr shift_1 = cur - 1; + + // Compare first so we can avoid 2 moves + // for an element already positioned correctly. + if (comp(*shift, *shift_1)) { + T tmp = std::move(*shift); + + do { + *shift-- = std::move(*shift_1); + }while (shift != begin && comp(tmp, *--shift_1)); + + *shift = std::move(tmp); + limit += cur - shift; + } + } + + return true; +} + +// Partitions [begin, end) around pivot *begin using comparison function comp. Elements equal +// to the pivot are put in the right-hand partition. Returns the position of the pivot after +// partitioning and whether the passed sequence already was correctly partitioned. Assumes the +// pivot is a median of at least 3 elements and that [begin, end) is at least +// insertion_sort_threshold long. Uses branchless partitioning. +template +std::pair partition_right_branchless(Iter begin, Iter end, Compare comp) { + + typedef typename std::iterator_traits::value_type T; + + constexpr size_t block_size = 64; + constexpr size_t cacheline_size = 64; + + // Move pivot into local for speed. + T pivot(std::move(*begin)); + Iter first = begin; + Iter last = end; + + // Find the first element greater than or equal than the pivot (the median of 3 guarantees + // this exists). + while (comp(*++first, pivot)); + + // Find the first element strictly smaller than the pivot. We have to guard this search if + // there was no element before *first. + if (first - 1 == begin) while (first < last && !comp(*--last, pivot)); + else while ( !comp(*--last, pivot)); + + // If the first pair of elements that should be swapped to partition are the same element, + // the passed in sequence already was correctly partitioned. + bool already_partitioned = first >= last; + if (!already_partitioned) { + std::iter_swap(first, last); + ++first; + + // The following branchless partitioning is derived from "BlockQuicksort: How Branch + // Mispredictions don't affect Quicksort" by Stefan Edelkamp and Armin Weiss, but + // heavily micro-optimized. + unsigned char offsets_l_storage[block_size + cacheline_size]; + unsigned char offsets_r_storage[block_size + cacheline_size]; + unsigned char* offsets_l = align_cacheline(offsets_l_storage); + unsigned char* offsets_r = align_cacheline(offsets_r_storage); + + Iter offsets_l_base = first; + Iter offsets_r_base = last; + size_t num_l, num_r, start_l, start_r; + num_l = num_r = start_l = start_r = 0; + + while (first < last) { + // Fill up offset blocks with elements that are on the wrong side. + // First we determine how much elements are considered for each offset block. + size_t num_unknown = last - first; + size_t left_split = num_l == 0 ? (num_r == 0 ? num_unknown / 2 : num_unknown) : 0; + size_t right_split = num_r == 0 ? (num_unknown - left_split) : 0; + + // Fill the offset blocks. + if (left_split >= block_size) { + for (size_t i = 0; i < block_size;) { + offsets_l[num_l] = i++; num_l += !comp(*first, pivot); ++first; + offsets_l[num_l] = i++; num_l += !comp(*first, pivot); ++first; + offsets_l[num_l] = i++; num_l += !comp(*first, pivot); ++first; + offsets_l[num_l] = i++; num_l += !comp(*first, pivot); ++first; + offsets_l[num_l] = i++; num_l += !comp(*first, pivot); ++first; + offsets_l[num_l] = i++; num_l += !comp(*first, pivot); ++first; + offsets_l[num_l] = i++; num_l += !comp(*first, pivot); ++first; + offsets_l[num_l] = i++; num_l += !comp(*first, pivot); ++first; + } + } else { + for (size_t i = 0; i < left_split;) { + offsets_l[num_l] = i++; num_l += !comp(*first, pivot); ++first; + } + } + + if (right_split >= block_size) { + for (size_t i = 0; i < block_size;) { + offsets_r[num_r] = ++i; num_r += comp(*--last, pivot); + offsets_r[num_r] = ++i; num_r += comp(*--last, pivot); + offsets_r[num_r] = ++i; num_r += comp(*--last, pivot); + offsets_r[num_r] = ++i; num_r += comp(*--last, pivot); + offsets_r[num_r] = ++i; num_r += comp(*--last, pivot); + offsets_r[num_r] = ++i; num_r += comp(*--last, pivot); + offsets_r[num_r] = ++i; num_r += comp(*--last, pivot); + offsets_r[num_r] = ++i; num_r += comp(*--last, pivot); + } + } else { + for (size_t i = 0; i < right_split;) { + offsets_r[num_r] = ++i; num_r += comp(*--last, pivot); + } + } + + // Swap elements and update block sizes and first/last boundaries. + size_t num = std::min(num_l, num_r); + swap_offsets( + offsets_l_base, offsets_r_base, + offsets_l + start_l, offsets_r + start_r, + num, num_l == num_r + ); + num_l -= num; num_r -= num; + start_l += num; start_r += num; + + if (num_l == 0) { + start_l = 0; + offsets_l_base = first; + } + + if (num_r == 0) { + start_r = 0; + offsets_r_base = last; + } + } + + // We have now fully identified [first, last)'s proper position. Swap the last elements. + if (num_l) { + offsets_l += start_l; + while (num_l--) std::iter_swap(offsets_l_base + offsets_l[num_l], --last); + first = last; + } + if (num_r) { + offsets_r += start_r; + while (num_r--) std::iter_swap(offsets_r_base - offsets_r[num_r], first), ++first; + last = first; + } + } + + // Put the pivot in the right place. + Iter pivot_pos = first - 1; + *begin = std::move(*pivot_pos); + *pivot_pos = std::move(pivot); + + return std::make_pair(pivot_pos, already_partitioned); +} + +// Partitions [begin, end) around pivot *begin using comparison function comp. +// Elements equal to the pivot are put in the right-hand partition. +// Returns the position of the pivot after partitioning and whether the passed +// sequence already was correctly partitioned. +// Assumes the pivot is a median of at least 3 elements and that [begin, end) +// is at least insertion_sort_threshold long. +template +std::pair partition_right(Iter begin, Iter end, Compare comp) { + + using T = typename std::iterator_traits::value_type; + + // Move pivot into local for speed. + T pivot(std::move(*begin)); + + Iter first = begin; + Iter last = end; + + // Find the first element greater than or equal than the pivot + // (the median of 3 guarantees/ this exists). + while (comp(*++first, pivot)); + + // Find the first element strictly smaller than the pivot. + // We have to guard this search if there was no element before *first. + if (first - 1 == begin) while (first < last && !comp(*--last, pivot)); + else while (!comp(*--last, pivot)); + + // If the first pair of elements that should be swapped to partition + // are the same element, the passed in sequence already was correctly + // partitioned. + bool already_partitioned = first >= last; + + // Keep swapping pairs of elements that are on the wrong side of the pivot. + // Previously swapped pairs guard the searches, + // which is why the first iteration is special-cased above. + while (first < last) { + std::iter_swap(first, last); + while (comp(*++first, pivot)); + while (!comp(*--last, pivot)); + } + + // Put the pivot in the right place. + Iter pivot_pos = first - 1; + *begin = std::move(*pivot_pos); + *pivot_pos = std::move(pivot); + + return std::make_pair(pivot_pos, already_partitioned); +} + +// Similar function to the one above, except elements equal to the pivot +// are put to the left of the pivot and it doesn't check or return +// if the passed sequence already was partitioned. +// Since this is rarely used (the many equal case), +// and in that case pdqsort already has O(n) performance, +// no block quicksort is applied here for simplicity. +template +RandItr partition_left(RandItr begin, RandItr end, Compare comp) { + + using T = typename std::iterator_traits::value_type; + + T pivot(std::move(*begin)); + + RandItr first = begin; + RandItr last = end; + + while (comp(pivot, *--last)); + + if (last + 1 == end) { + while (first < last && !comp(pivot, *++first)); + } + else { + while (!comp(pivot, *++first)); + } + + while (first < last) { + std::iter_swap(first, last); + while (comp(pivot, *--last)); + while (!comp(pivot, *++first)); + } + + RandItr pivot_pos = last; + *begin = std::move(*pivot_pos); + *pivot_pos = std::move(pivot); + + return pivot_pos; +} + +template +void parallel_pdqsort( + tf::Runtime& rt, + Iter begin, Iter end, Compare comp, + int bad_allowed, bool leftmost = true +) { + + // Partitions below this size are sorted sequentially + constexpr auto cutoff = parallel_sort_cutoff(); + + // Partitions below this size are sorted using insertion sort + constexpr auto insertion_sort_threshold = 24; + + // Partitions above this size use Tukey's ninther to select the pivot. + constexpr auto ninther_threshold = 128; + + //using diff_t = typename std::iterator_traits::difference_type; + + // Use a while loop for tail recursion elimination. + while (true) { + + //diff_t size = end - begin; + size_t size = end - begin; + + // Insertion sort is faster for small arrays. + if (size < insertion_sort_threshold) { + if (leftmost) { + insertion_sort(begin, end, comp); + } + else { + unguarded_insertion_sort(begin, end, comp); + } + return; + } + + if(size <= cutoff) { + std::sort(begin, end, comp); + return; + } + + // Choose pivot as median of 3 or pseudomedian of 9. + //diff_t s2 = size / 2; + size_t s2 = size >> 1; + if (size > ninther_threshold) { + sort3(begin, begin + s2, end - 1, comp); + sort3(begin + 1, begin + (s2 - 1), end - 2, comp); + sort3(begin + 2, begin + (s2 + 1), end - 3, comp); + sort3(begin + (s2 - 1), begin + s2, begin + (s2 + 1), comp); + std::iter_swap(begin, begin + s2); + } + else { + sort3(begin + s2, begin, end - 1, comp); + } + + // If *(begin - 1) is the end of the right partition + // of a previous partition operation, there is no element in [begin, end) + // that is smaller than *(begin - 1). + // Then if our pivot compares equal to *(begin - 1) we change strategy, + // putting equal elements in the left partition, + // greater elements in the right partition. + // We do not have to recurse on the left partition, + // since it's sorted (all equal). + if (!leftmost && !comp(*(begin - 1), *begin)) { + begin = partition_left(begin, end, comp) + 1; + continue; + } + + // Partition and get results. + const auto pair = Branchless ? partition_right_branchless(begin, end, comp) : + partition_right(begin, end, comp); + + const auto pivot_pos = pair.first; + const auto already_partitioned = pair.second; + + // Check for a highly unbalanced partition. + //diff_t l_size = pivot_pos - begin; + //diff_t r_size = end - (pivot_pos + 1); + const size_t l_size = pivot_pos - begin; + const size_t r_size = end - (pivot_pos + 1); + const bool highly_unbalanced = l_size < size / 8 || r_size < size / 8; + + // If we got a highly unbalanced partition we shuffle elements + // to break many patterns. + if (highly_unbalanced) { + // If we had too many bad partitions, switch to heapsort + // to guarantee O(n log n). + if (--bad_allowed == 0) { + std::make_heap(begin, end, comp); + std::sort_heap(begin, end, comp); + return; + } + + if (l_size >= insertion_sort_threshold) { + std::iter_swap(begin, begin + l_size / 4); + std::iter_swap(pivot_pos - 1, pivot_pos - l_size / 4); + if (l_size > ninther_threshold) { + std::iter_swap(begin + 1, begin + (l_size / 4 + 1)); + std::iter_swap(begin + 2, begin + (l_size / 4 + 2)); + std::iter_swap(pivot_pos - 2, pivot_pos - (l_size / 4 + 1)); + std::iter_swap(pivot_pos - 3, pivot_pos - (l_size / 4 + 2)); + } + } + + if (r_size >= insertion_sort_threshold) { + std::iter_swap(pivot_pos + 1, pivot_pos + (1 + r_size / 4)); + std::iter_swap(end - 1, end - r_size / 4); + if (r_size > ninther_threshold) { + std::iter_swap(pivot_pos + 2, pivot_pos + (2 + r_size / 4)); + std::iter_swap(pivot_pos + 3, pivot_pos + (3 + r_size / 4)); + std::iter_swap(end - 2, end - (1 + r_size / 4)); + std::iter_swap(end - 3, end - (2 + r_size / 4)); + } + } + } + // decently balanced + else { + // sequence try to use insertion sort. + if (already_partitioned && + partial_insertion_sort(begin, pivot_pos, comp) && + partial_insertion_sort(pivot_pos + 1, end, comp) + ) { + return; + } + } + + // Sort the left partition first using recursion and + // do tail recursion elimination for the right-hand partition. + rt.silent_async( + [&rt, begin, pivot_pos, comp, bad_allowed, leftmost] () mutable { + parallel_pdqsort( + rt, begin, pivot_pos, comp, bad_allowed, leftmost + ); + } + ); + begin = pivot_pos + 1; + leftmost = false; + } +} + +// ---------------------------------------------------------------------------- +// 3-way quick sort +// ---------------------------------------------------------------------------- + +// 3-way quick sort +template +void parallel_3wqsort(tf::Runtime& rt, RandItr first, RandItr last, C compare) { + + using namespace std::string_literals; + + constexpr auto cutoff = parallel_sort_cutoff(); + + sort_partition: + + if(static_cast(last - first) < cutoff) { + std::sort(first, last+1, compare); + return; + } + + auto m = pseudo_median_of_nine(first, last, compare); + + if(m != first) { + std::iter_swap(first, m); + } + + auto l = first; + auto r = last; + auto f = std::next(first, 1); + bool is_swapped_l = false; + bool is_swapped_r = false; + + while(f <= r) { + if(compare(*f, *l)) { + is_swapped_l = true; + std::iter_swap(l, f); + l++; + f++; + } + else if(compare(*l, *f)) { + is_swapped_r = true; + std::iter_swap(r, f); + r--; + } + else { + f++; + } + } + + if(l - first > 1 && is_swapped_l) { + //rt.emplace([&](tf::Runtime& rtl) mutable { + // parallel_3wqsort(rtl, first, l-1, compare); + //}); + rt.silent_async([&rt, first, l, &compare] () mutable { + parallel_3wqsort(rt, first, l-1, compare); + }); + } + + if(last - r > 1 && is_swapped_r) { + //rt.emplace([&](tf::Runtime& rtr) mutable { + // parallel_3wqsort(rtr, r+1, last, compare); + //}); + //rt.silent_async([&rt, r, last, &compare] () mutable { + // parallel_3wqsort(rt, r+1, last, compare); + //}); + first = r+1; + goto sort_partition; + } + + //rt.join(); +} + +// ---------------------------------------------------------------------------- +// tf::Taskflow::sort +// ---------------------------------------------------------------------------- + +// Function: sort +template +Task FlowBuilder::sort(B beg, E end, C cmp) { + + Task task = emplace([b=beg, e=end, cmp] (Runtime& rt) mutable { + + using B_t = std::decay_t>; + using E_t = std::decay_t>; + + // fetch the iterator values + B_t beg = b; + E_t end = e; + + if(beg == end) { + return; + } + + size_t W = rt._executor.num_workers(); + size_t N = std::distance(beg, end); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= parallel_sort_cutoff()) { + std::sort(beg, end, cmp); + return; + } + + //parallel_3wqsort(rt, beg, end-1, cmp); + parallel_pdqsort> && + std::is_arithmetic_v::value_type> + >(rt, beg, end, cmp, log2(end - beg)); + + rt.join(); + }); + + return task; +} + +// Function: sort +template +Task FlowBuilder::sort(B beg, E end) { + using value_type = std::decay_t())>; + return sort(beg, end, std::less{}); +} + +} // namespace tf ------------------------------------------------------------ + diff --git a/bundled/taskflow-3.6.0/include/algorithm/transform.hpp b/bundled/taskflow-3.6.0/include/algorithm/transform.hpp new file mode 100644 index 0000000000..4c87887707 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/algorithm/transform.hpp @@ -0,0 +1,199 @@ +#pragma once + +#include "launch.hpp" + +namespace tf { + +namespace detail { + +// Function: make_transform_task +template +TF_FORCE_INLINE auto make_transform_task( + B first1, E last1, O d_first, C c, P&& part +) { + + using namespace std::string_literals; + + using B_t = std::decay_t>; + using E_t = std::decay_t>; + using O_t = std::decay_t>; + + return + [first1, last1, d_first, c, part=std::forward

(part)] + (Runtime& rt) mutable { + + // fetch the stateful values + B_t beg = first1; + E_t end = last1; + O_t d_beg = d_first; + + size_t W = rt.executor().num_workers(); + size_t N = std::distance(beg, end); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= part.chunk_size()) { + std::transform(beg, end, d_beg, c); + return; + } + + if(N < W) { + W = N; + } + + // static partitioner + if constexpr(std::is_same_v, StaticPartitioner>) { + size_t chunk_size; + for(size_t w=0, curr_b=0; w next(0); + + launch_loop(N, W, rt, next, part, [=, &next, &part] () mutable { + part.loop(N, W, next, + [&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable { + std::advance(beg, curr_b - prev_e); + std::advance(d_beg, curr_b - prev_e); + for(size_t x = curr_b; x>, void>* = nullptr +> +TF_FORCE_INLINE auto make_transform_task( + B1 first1, E1 last1, B2 first2, O d_first, C c, P&& part +) { + + using namespace std::string_literals; + + using B1_t = std::decay_t>; + using E1_t = std::decay_t>; + using B2_t = std::decay_t>; + using O_t = std::decay_t>; + + return + [first1, last1, first2, d_first, c, part=std::forward

(part)] + (Runtime& rt) mutable { + + // fetch the stateful values + B1_t beg1 = first1; + E1_t end1 = last1; + B2_t beg2 = first2; + O_t d_beg = d_first; + + size_t W = rt.executor().num_workers(); + size_t N = std::distance(beg1, end1); + + // only myself - no need to spawn another graph + if(W <= 1 || N <= part.chunk_size()) { + std::transform(beg1, end1, beg2, d_beg, c); + return; + } + + if(N < W) { + W = N; + } + + // static partitioner + if constexpr(std::is_same_v, StaticPartitioner>) { + size_t chunk_size; + for(size_t w=0, curr_b=0; w next(0); + launch_loop(N, W, rt, next, part, [=, &c, &next, &part] () mutable { + part.loop(N, W, next, + [&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable { + std::advance(beg1, curr_b - prev_e); + std::advance(beg2, curr_b - prev_e); + std::advance(d_beg, curr_b - prev_e); + for(size_t x = curr_b; x +Task FlowBuilder::transform(B first1, E last1, O d_first, C c, P&& part) { + return emplace( + detail::make_transform_task(first1, last1, d_first, c, std::forward

(part)) + ); +} + +// ---------------------------------------------------------------------------- +// transform2 +// ---------------------------------------------------------------------------- + +// Function: transform +template < + typename B1, typename E1, typename B2, typename O, typename C, typename P, + std::enable_if_t>, void>* +> +Task FlowBuilder::transform( + B1 first1, E1 last1, B2 first2, O d_first, C c, P&& part +) { + + return emplace(detail::make_transform_task( + first1, last1, first2, d_first, c, std::forward

(part) + )); +} + + +} // end of namespace tf ----------------------------------------------------- + + + diff --git a/bundled/taskflow-3.6.0/include/core/async.hpp b/bundled/taskflow-3.6.0/include/core/async.hpp new file mode 100644 index 0000000000..69788c64b8 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/core/async.hpp @@ -0,0 +1,396 @@ +#pragma once + +#include "executor.hpp" + +// https://hackmd.io/@sysprog/concurrency-atomics + +namespace tf { + +// ---------------------------------------------------------------------------- +// Async +// ---------------------------------------------------------------------------- + +// Function: async +template +auto Executor::async(const std::string& name, F&& f) { + + _increment_topology(); + + using R = std::invoke_result_t>; + + std::promise p; + auto fu{p.get_future()}; + + auto node = node_pool.animate( + name, 0, nullptr, nullptr, 0, + std::in_place_type_t{}, + _make_promised_async(std::move(p), std::forward(f)) + ); + + _schedule_async_task(node); + + return fu; +} + +// Function: async +template +auto Executor::async(F&& f) { + return async("", std::forward(f)); +} + +// ---------------------------------------------------------------------------- +// Silent Async +// ---------------------------------------------------------------------------- + +// Function: silent_async +template +void Executor::silent_async(const std::string& name, F&& f) { + + _increment_topology(); + + auto node = node_pool.animate( + name, 0, nullptr, nullptr, 0, + std::in_place_type_t{}, std::forward(f) + ); + + _schedule_async_task(node); +} + +// Function: silent_async +template +void Executor::silent_async(F&& f) { + silent_async("", std::forward(f)); +} + +// ---------------------------------------------------------------------------- +// Async Helper Methods +// ---------------------------------------------------------------------------- + +// Function: _make_promised_async +template +auto Executor::_make_promised_async(std::promise&& p, F&& func) { + return [p=make_moc(std::move(p)), func=std::forward(func)]() mutable { + if constexpr(std::is_same_v) { + func(); + p.object.set_value(); + } + else { + p.object.set_value(func()); + } + }; +} + +// Procedure: _schedule_async_task +inline void Executor::_schedule_async_task(Node* node) { + if(auto w = _this_worker(); w) { + _schedule(*w, node); + } + else{ + _schedule(node); + } +} + +// Procedure: _tear_down_async +inline void Executor::_tear_down_async(Node* node) { + // from runtime + if(node->_parent) { + node->_parent->_join_counter.fetch_sub(1, std::memory_order_release); + } + // from executor + else { + _decrement_topology_and_notify(); + } + node_pool.recycle(node); +} + +// ---------------------------------------------------------------------------- +// Silent Dependent Async +// ---------------------------------------------------------------------------- + +// Function: silent_dependent_async +template ...>, void>* +> +tf::AsyncTask Executor::silent_dependent_async(F&& func, Tasks&&... tasks) { + return silent_dependent_async("", std::forward(func), std::forward(tasks)...); +} + +// Function: silent_dependent_async +template ...>, void>* +> +tf::AsyncTask Executor::silent_dependent_async( + const std::string& name, F&& func, Tasks&&... tasks +){ + + _increment_topology(); + + size_t num_dependents = sizeof...(Tasks); + + std::shared_ptr node( + node_pool.animate( + name, 0, nullptr, nullptr, num_dependents, + std::in_place_type_t{}, std::forward(func) + ), + [&](Node* ptr){ node_pool.recycle(ptr); } + ); + + { + std::scoped_lock lock(_asyncs_mutex); + _asyncs.insert(node); + } + + if constexpr(sizeof...(Tasks) > 0) { + (_process_async_dependent(node.get(), tasks, num_dependents), ...); + } + + if(num_dependents == 0) { + _schedule_async_task(node.get()); + } + + return AsyncTask(std::move(node)); +} + +// Function: silent_dependent_async +template , AsyncTask>, void>* +> +tf::AsyncTask Executor::silent_dependent_async(F&& func, I first, I last) { + return silent_dependent_async("", std::forward(func), first, last); +} + +// Function: silent_dependent_async +template , AsyncTask>, void>* +> +tf::AsyncTask Executor::silent_dependent_async( + const std::string& name, F&& func, I first, I last +) { + + _increment_topology(); + + size_t num_dependents = std::distance(first, last); + + std::shared_ptr node( + node_pool.animate( + name, 0, nullptr, nullptr, num_dependents, + std::in_place_type_t{}, std::forward(func) + ), + [&](Node* ptr){ node_pool.recycle(ptr); } + ); + + { + std::scoped_lock lock(_asyncs_mutex); + _asyncs.insert(node); + } + + for(; first != last; first++){ + _process_async_dependent(node.get(), *first, num_dependents); + } + + if(num_dependents == 0) { + _schedule_async_task(node.get()); + } + + return AsyncTask(std::move(node)); +} + +// ---------------------------------------------------------------------------- +// Dependent Async +// ---------------------------------------------------------------------------- + +// Function: dependent_async +template ...>, void>* +> +auto Executor::dependent_async(F&& func, Tasks&&... tasks) { + return dependent_async("", std::forward(func), std::forward(tasks)...); +} + +// Function: dependent_async +template ...>, void>* +> +auto Executor::dependent_async( + const std::string& name, F&& func, Tasks&&... tasks +) { + + _increment_topology(); + + using R = std::invoke_result_t>; + + std::promise p; + auto fu{p.get_future()}; + + size_t num_dependents = sizeof...(tasks); + + std::shared_ptr node( + node_pool.animate( + name, 0, nullptr, nullptr, num_dependents, + std::in_place_type_t{}, + _make_promised_async(std::move(p), std::forward(func)) + ), + [&](Node* ptr){ node_pool.recycle(ptr); } + ); + + { + std::scoped_lock lock(_asyncs_mutex); + _asyncs.insert(node); + } + + if constexpr(sizeof...(Tasks) > 0) { + (_process_async_dependent(node.get(), tasks, num_dependents), ...); + } + + if(num_dependents == 0) { + _schedule_async_task(node.get()); + } + + return std::make_pair(AsyncTask(std::move(node)), std::move(fu)); +} + +// Function: dependent_async +template , AsyncTask>, void>* +> +auto Executor::dependent_async(F&& func, I first, I last) { + return dependent_async("", std::forward(func), first, last); +} + +// Function: dependent_async +template , AsyncTask>, void>* +> +auto Executor::dependent_async( + const std::string& name, F&& func, I first, I last +) { + + _increment_topology(); + + using R = std::invoke_result_t>; + + std::promise p; + auto fu{p.get_future()}; + + size_t num_dependents = std::distance(first, last); + + std::shared_ptr node( + node_pool.animate( + name, 0, nullptr, nullptr, num_dependents, + std::in_place_type_t{}, + _make_promised_async(std::move(p), std::forward(func)) + ), + [&](Node* ptr){ node_pool.recycle(ptr); } + ); + + { + std::scoped_lock lock(_asyncs_mutex); + _asyncs.insert(node); + } + + for(; first != last; first++) { + _process_async_dependent(node.get(), *first, num_dependents); + } + + if(num_dependents == 0) { + _schedule_async_task(node.get()); + } + + return std::make_pair(AsyncTask(std::move(node)), std::move(fu)); +} + +// ---------------------------------------------------------------------------- +// Dependent Async Helper Functions +// ---------------------------------------------------------------------------- + +// Procedure: _process_async_dependent +inline void Executor::_process_async_dependent( + Node* node, tf::AsyncTask& task, size_t& num_dependents +) { + + std::shared_ptr dep; + { + std::scoped_lock lock(_asyncs_mutex); + if(auto itr = _asyncs.find(task._node); itr != _asyncs.end()){ + dep = *itr; + } + } + + // if the dependent task exists + if(dep) { + auto& state = std::get_if(&(dep->_handle))->state; + + add_dependent: + + auto target = Node::AsyncState::UNFINISHED; + + // acquires the lock + if(state.compare_exchange_weak(target, Node::AsyncState::LOCKED, + std::memory_order_acq_rel, + std::memory_order_acquire)) { + dep->_successors.push_back(node); + state.store(Node::AsyncState::UNFINISHED, std::memory_order_release); + } + // dep's state is FINISHED, which means dep finished its callable already + // thus decrement the node's join counter by 1 + else if (target == Node::AsyncState::FINISHED) { + // decrement the counter needs to be the order of acquire and release + // to synchronize with the worker + num_dependents = node->_join_counter.fetch_sub(1, std::memory_order_acq_rel) - 1; + } + // another worker adding an async task that shares the same dependent + else { + goto add_dependent; + } + } + else { + num_dependents = node->_join_counter.fetch_sub(1, std::memory_order_acq_rel) - 1; + } +} + +// Procedure: _tear_down_dependent_async +inline void Executor::_tear_down_dependent_async(Worker& worker, Node* node) { + + // this async task comes from Executor + auto& state = std::get_if(&(node->_handle))->state; + auto target = Node::AsyncState::UNFINISHED; + + while(!state.compare_exchange_weak(target, Node::AsyncState::FINISHED, + std::memory_order_acq_rel, + std::memory_order_relaxed)) { + target = Node::AsyncState::UNFINISHED; + } + + // spaw successors whenever their dependencies are resolved + worker._cache = nullptr; + for(size_t i=0; i_successors.size(); ++i) { + //if(auto s = node->_successors[i]; --(s->_join_counter) == 0) { + if(auto s = node->_successors[i]; + s->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1 + ) { + if(worker._cache) { + _schedule(worker, worker._cache); + } + worker._cache = s; + } + } + + // remove myself from the asyncs using extraction to avoid calling + // ~Node inside the lock + typename std::unordered_set>::node_type extracted; + { + std::shared_ptr ptr(node, [](Node*){}); + std::scoped_lock lock(_asyncs_mutex); + extracted = _asyncs.extract(ptr); + // assert(extracted.empty() == false); + } + + _decrement_topology_and_notify(); +} + + + + + +} // end of namespace tf ----------------------------------------------------- + diff --git a/bundled/taskflow-3.6.0/include/core/async_task.hpp b/bundled/taskflow-3.6.0/include/core/async_task.hpp new file mode 100644 index 0000000000..7c92d8e557 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/core/async_task.hpp @@ -0,0 +1,125 @@ +#pragma once + +#include "graph.hpp" + +/** +@file async_task.hpp +@brief asynchronous task include file +*/ + +namespace tf { + +// ---------------------------------------------------------------------------- +// AsyncTask +// ---------------------------------------------------------------------------- + +/** +@brief class to create a dependent asynchronous task + +A tf::AsyncTask is a lightweight handle that retains @em shared ownership +of a dependent async task created by an executor. +This shared ownership ensures that the async task remains alive when +adding it to the dependency list of another async task, +thus avoiding the classical [ABA problem](https://en.wikipedia.org/wiki/ABA_problem). + +@code{.cpp} +// main thread retains shared ownership of async task A +tf::AsyncTask A = executor.silent_dependent_async([](){}); + +// task A remains alive (i.e., at least one ref count by the main thread) +// when being added to the dependency list of async task B +tf::AsyncTask B = executor.silent_dependent_async([](){}, A); +@endcode + +Currently, tf::AsyncTask is implemented based on C++ smart pointer std::shared_ptr and +is considered cheap to copy or move as long as only a handful of objects +own it. +When a worker completes an async task, it will remove the task from the executor, +decrementing the number of shared owners by one. +If that counter reaches zero, the task is destroyed. +*/ +class AsyncTask { + + friend class FlowBuilder; + friend class Runtime; + friend class Taskflow; + friend class TaskView; + friend class Executor; + + public: + + /** + @brief constructs an empty task handle + */ + AsyncTask() = default; + + /** + @brief destroys the managed asynchronous task if this is the last owner + */ + ~AsyncTask() = default; + + /** + @brief constructs an task that shares ownership of @c rhs + */ + AsyncTask(const AsyncTask& rhs) = default; + + /** + @brief move-constructs an task from @c rhs + */ + AsyncTask(AsyncTask&& rhs) = default; + + /** + @brief shares ownership of the task managed by @c rhs + */ + AsyncTask& operator = (const AsyncTask& rhs) = default; + + /** + @brief move-assigns the task from @c rhs + */ + AsyncTask& operator = (AsyncTask&& rhs) = default; + + /** + @brief checks if the task stores a non-null shared pointer + */ + bool empty() const; + + /** + @brief release the ownership + */ + void reset(); + + /** + @brief obtains a hash value of the underlying node + */ + size_t hash_value() const; + + private: + + AsyncTask(std::shared_ptr); + + std::shared_ptr _node; +}; + +// Constructor +inline AsyncTask::AsyncTask(std::shared_ptr ptr) : _node {std::move(ptr)} { +} + +// Function: empty +inline bool AsyncTask::empty() const { + return _node == nullptr; +} + +// Function: reset +inline void AsyncTask::reset() { + _node.reset(); +} + +// Function: hash_value +inline size_t AsyncTask::hash_value() const { + return std::hash>{}(_node); +} + +} // end of namespace tf ---------------------------------------------------- + + + diff --git a/bundled/taskflow-3.6.0/include/core/declarations.hpp b/bundled/taskflow-3.6.0/include/core/declarations.hpp new file mode 100644 index 0000000000..dd89ab3b21 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/core/declarations.hpp @@ -0,0 +1,60 @@ +#pragma once + +namespace tf { + +// ---------------------------------------------------------------------------- +// taskflow +// ---------------------------------------------------------------------------- +class AsyncTopology; +class Node; +class Graph; +class FlowBuilder; +class Semaphore; +class Subflow; +class Runtime; +class Task; +class TaskView; +class Taskflow; +class Topology; +class TopologyBase; +class Executor; +class Worker; +class WorkerView; +class ObserverInterface; +class ChromeTracingObserver; +class TFProfObserver; +class TFProfManager; + +template +class Future; + +template +class Pipeline; + +// ---------------------------------------------------------------------------- +// cudaFlow +// ---------------------------------------------------------------------------- +class cudaFlowNode; +class cudaFlowGraph; +class cudaTask; +class cudaFlow; +class cudaFlowCapturer; +class cudaFlowOptimizerBase; +class cudaFlowLinearOptimizer; +class cudaFlowSequentialOptimizer; +class cudaFlowRoundRobinOptimizer; + +// ---------------------------------------------------------------------------- +// syclFlow +// ---------------------------------------------------------------------------- +class syclNode; +class syclGraph; +class syclTask; +class syclFlow; + + +} // end of namespace tf ----------------------------------------------------- + + + + diff --git a/bundled/taskflow-3.6.0/include/core/environment.hpp b/bundled/taskflow-3.6.0/include/core/environment.hpp new file mode 100644 index 0000000000..f9013b6e55 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/core/environment.hpp @@ -0,0 +1,8 @@ +#pragma once + +#define TF_ENABLE_PROFILER "TF_ENABLE_PROFILER" + +namespace tf { + +} // end of namespace tf ----------------------------------------------------- + diff --git a/bundled/taskflow-2.5.0/include/taskflow/core/error.hpp b/bundled/taskflow-3.6.0/include/core/error.hpp similarity index 84% rename from bundled/taskflow-2.5.0/include/taskflow/core/error.hpp rename to bundled/taskflow-3.6.0/include/core/error.hpp index 67391215c0..6a68bea169 100644 --- a/bundled/taskflow-2.5.0/include/taskflow/core/error.hpp +++ b/bundled/taskflow-3.6.0/include/core/error.hpp @@ -4,7 +4,7 @@ #include #include -#include "../utility/stringify.hpp" +#include "../utility/stream.hpp" namespace tf { @@ -15,8 +15,8 @@ template void throw_re(const char* fname, const size_t line, ArgsT&&... args) { std::ostringstream oss; oss << "[" << fname << ":" << line << "] "; - ostreamize(oss, std::forward(args)...); - //(oss << ... << args); + //ostreamize(oss, std::forward(args)...); + (oss << ... << args); throw std::runtime_error(oss.str()); } diff --git a/bundled/taskflow-3.6.0/include/core/executor-module-opt.hpp b/bundled/taskflow-3.6.0/include/core/executor-module-opt.hpp new file mode 100644 index 0000000000..0e2b1ee6f7 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/core/executor-module-opt.hpp @@ -0,0 +1,2025 @@ +#pragma once + +#include "observer.hpp" +#include "taskflow.hpp" + +/** +@file executor.hpp +@brief executor include file +*/ + +namespace tf { + +// ---------------------------------------------------------------------------- +// Executor Definition +// ---------------------------------------------------------------------------- + +/** @class Executor + +@brief class to create an executor for running a taskflow graph + +An executor manages a set of worker threads to run one or multiple taskflows +using an efficient work-stealing scheduling algorithm. + +@code{.cpp} +// Declare an executor and a taskflow +tf::Executor executor; +tf::Taskflow taskflow; + +// Add three tasks into the taskflow +tf::Task A = taskflow.emplace([] () { std::cout << "This is TaskA\n"; }); +tf::Task B = taskflow.emplace([] () { std::cout << "This is TaskB\n"; }); +tf::Task C = taskflow.emplace([] () { std::cout << "This is TaskC\n"; }); + +// Build precedence between tasks +A.precede(B, C); + +tf::Future fu = executor.run(taskflow); +fu.wait(); // block until the execution completes + +executor.run(taskflow, [](){ std::cout << "end of 1 run"; }).wait(); +executor.run_n(taskflow, 4); +executor.wait_for_all(); // block until all associated executions finish +executor.run_n(taskflow, 4, [](){ std::cout << "end of 4 runs"; }).wait(); +executor.run_until(taskflow, [cnt=0] () mutable { return ++cnt == 10; }); +@endcode + +All the @c run methods are @em thread-safe. You can submit multiple +taskflows at the same time to an executor from different threads. +*/ +class Executor { + + friend class FlowBuilder; + friend class Subflow; + friend class Runtime; + + public: + + /** + @brief constructs the executor with @c N worker threads + + The constructor spawns @c N worker threads to run tasks in a + work-stealing loop. The number of workers must be greater than zero + or an exception will be thrown. + By default, the number of worker threads is equal to the maximum + hardware concurrency returned by std::thread::hardware_concurrency. + */ + explicit Executor(size_t N = std::thread::hardware_concurrency()); + + /** + @brief destructs the executor + + The destructor calls Executor::wait_for_all to wait for all submitted + taskflows to complete and then notifies all worker threads to stop + and join these threads. + */ + ~Executor(); + + /** + @brief runs a taskflow once + + @param taskflow a tf::Taskflow object + + @return a tf::Future that holds the result of the execution + + This member function executes the given taskflow once and returns a tf::Future + object that eventually holds the result of the execution. + + @code{.cpp} + tf::Future future = executor.run(taskflow); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + + @attention + The executor does not own the given taskflow. It is your responsibility to + ensure the taskflow remains alive during its execution. + */ + tf::Future run(Taskflow& taskflow); + + /** + @brief runs a moved taskflow once + + @param taskflow a moved tf::Taskflow object + + @return a tf::Future that holds the result of the execution + + This member function executes a moved taskflow once and returns a tf::Future + object that eventually holds the result of the execution. + The executor will take care of the lifetime of the moved taskflow. + + @code{.cpp} + tf::Future future = executor.run(std::move(taskflow)); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + */ + tf::Future run(Taskflow&& taskflow); + + /** + @brief runs a taskflow once and invoke a callback upon completion + + @param taskflow a tf::Taskflow object + @param callable a callable object to be invoked after this run + + @return a tf::Future that holds the result of the execution + + This member function executes the given taskflow once and invokes the given + callable when the execution completes. + This member function returns a tf::Future object that + eventually holds the result of the execution. + + @code{.cpp} + tf::Future future = executor.run(taskflow, [](){ std::cout << "done"; }); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + + @attention + The executor does not own the given taskflow. It is your responsibility to + ensure the taskflow remains alive during its execution. + */ + template + tf::Future run(Taskflow& taskflow, C&& callable); + + /** + @brief runs a moved taskflow once and invoke a callback upon completion + + @param taskflow a moved tf::Taskflow object + @param callable a callable object to be invoked after this run + + @return a tf::Future that holds the result of the execution + + This member function executes a moved taskflow once and invokes the given + callable when the execution completes. + This member function returns a tf::Future object that + eventually holds the result of the execution. + The executor will take care of the lifetime of the moved taskflow. + + @code{.cpp} + tf::Future future = executor.run( + std::move(taskflow), [](){ std::cout << "done"; } + ); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + */ + template + tf::Future run(Taskflow&& taskflow, C&& callable); + + /** + @brief runs a taskflow for @c N times + + @param taskflow a tf::Taskflow object + @param N number of runs + + @return a tf::Future that holds the result of the execution + + This member function executes the given taskflow @c N times and returns a tf::Future + object that eventually holds the result of the execution. + + @code{.cpp} + tf::Future future = executor.run_n(taskflow, 2); // run taskflow 2 times + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + + @attention + The executor does not own the given taskflow. It is your responsibility to + ensure the taskflow remains alive during its execution. + */ + tf::Future run_n(Taskflow& taskflow, size_t N); + + /** + @brief runs a moved taskflow for @c N times + + @param taskflow a moved tf::Taskflow object + @param N number of runs + + @return a tf::Future that holds the result of the execution + + This member function executes a moved taskflow @c N times and returns a tf::Future + object that eventually holds the result of the execution. + The executor will take care of the lifetime of the moved taskflow. + + @code{.cpp} + tf::Future future = executor.run_n( + std::move(taskflow), 2 // run the moved taskflow 2 times + ); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + */ + tf::Future run_n(Taskflow&& taskflow, size_t N); + + /** + @brief runs a taskflow for @c N times and then invokes a callback + + @param taskflow a tf::Taskflow + @param N number of runs + @param callable a callable object to be invoked after this run + + @return a tf::Future that holds the result of the execution + + This member function executes the given taskflow @c N times and invokes the given + callable when the execution completes. + This member function returns a tf::Future object that + eventually holds the result of the execution. + + @code{.cpp} + tf::Future future = executor.run( + taskflow, 2, [](){ std::cout << "done"; } // runs taskflow 2 times and invoke + // the lambda to print "done" + ); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + + @attention + The executor does not own the given taskflow. It is your responsibility to + ensure the taskflow remains alive during its execution. + */ + template + tf::Future run_n(Taskflow& taskflow, size_t N, C&& callable); + + /** + @brief runs a moved taskflow for @c N times and then invokes a callback + + @param taskflow a moved tf::Taskflow + @param N number of runs + @param callable a callable object to be invoked after this run + + @return a tf::Future that holds the result of the execution + + This member function executes a moved taskflow @c N times and invokes the given + callable when the execution completes. + This member function returns a tf::Future object that + eventually holds the result of the execution. + + @code{.cpp} + tf::Future future = executor.run( + // run the moved taskflow 2 times and invoke the lambda to print "done" + std::move(taskflow), 2, [](){ std::cout << "done"; } + ); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + */ + template + tf::Future run_n(Taskflow&& taskflow, size_t N, C&& callable); + + /** + @brief runs a taskflow multiple times until the predicate becomes true + + @param taskflow a tf::Taskflow + @param pred a boolean predicate to return @c true for stop + + @return a tf::Future that holds the result of the execution + + This member function executes the given taskflow multiple times until + the predicate returns @c true. + This member function returns a tf::Future object that + eventually holds the result of the execution. + + @code{.cpp} + tf::Future future = executor.run( + taskflow, [](){ return rand()%10 == 0 } + ); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + + @attention + The executor does not own the given taskflow. It is your responsibility to + ensure the taskflow remains alive during its execution. + */ + template + tf::Future run_until(Taskflow& taskflow, P&& pred); + + /** + @brief runs a moved taskflow and keeps running it + until the predicate becomes true + + @param taskflow a moved tf::Taskflow object + @param pred a boolean predicate to return @c true for stop + + @return a tf::Future that holds the result of the execution + + This member function executes a moved taskflow multiple times until + the predicate returns @c true. + This member function returns a tf::Future object that + eventually holds the result of the execution. + The executor will take care of the lifetime of the moved taskflow. + + @code{.cpp} + tf::Future future = executor.run( + std::move(taskflow), [](){ return rand()%10 == 0 } + ); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + */ + template + tf::Future run_until(Taskflow&& taskflow, P&& pred); + + /** + @brief runs a taskflow multiple times until the predicate becomes true and + then invokes the callback + + @param taskflow a tf::Taskflow + @param pred a boolean predicate to return @c true for stop + @param callable a callable object to be invoked after this run completes + + @return a tf::Future that holds the result of the execution + + This member function executes the given taskflow multiple times until + the predicate returns @c true and then invokes the given callable when + the execution completes. + This member function returns a tf::Future object that + eventually holds the result of the execution. + + @code{.cpp} + tf::Future future = executor.run( + taskflow, [](){ return rand()%10 == 0 }, [](){ std::cout << "done"; } + ); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + + @attention + The executor does not own the given taskflow. It is your responsibility to + ensure the taskflow remains alive during its execution. + */ + template + tf::Future run_until(Taskflow& taskflow, P&& pred, C&& callable); + + /** + @brief runs a moved taskflow and keeps running + it until the predicate becomes true and then invokes the callback + + @param taskflow a moved tf::Taskflow + @param pred a boolean predicate to return @c true for stop + @param callable a callable object to be invoked after this run completes + + @return a tf::Future that holds the result of the execution + + This member function executes a moved taskflow multiple times until + the predicate returns @c true and then invokes the given callable when + the execution completes. + This member function returns a tf::Future object that + eventually holds the result of the execution. + The executor will take care of the lifetime of the moved taskflow. + + @code{.cpp} + tf::Future future = executor.run( + std::move(taskflow), + [](){ return rand()%10 == 0 }, [](){ std::cout << "done"; } + ); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + */ + template + tf::Future run_until(Taskflow&& taskflow, P&& pred, C&& callable); + + /** + @brief wait for all tasks to complete + + This member function waits until all submitted tasks + (e.g., taskflows, asynchronous tasks) to finish. + + @code{.cpp} + executor.run(taskflow1); + executor.run_n(taskflow2, 10); + executor.run_n(taskflow3, 100); + executor.wait_for_all(); // wait until the above submitted taskflows finish + @endcode + */ + void wait_for_all(); + + /** + @brief queries the number of worker threads + + Each worker represents one unique thread spawned by an executor + upon its construction time. + + @code{.cpp} + tf::Executor executor(4); + std::cout << executor.num_workers(); // 4 + @endcode + */ + size_t num_workers() const noexcept; + + /** + @brief queries the number of running topologies at the time of this call + + When a taskflow is submitted to an executor, a topology is created to store + runtime metadata of the running taskflow. + When the execution of the submitted taskflow finishes, + its corresponding topology will be removed from the executor. + + @code{.cpp} + executor.run(taskflow); + std::cout << executor.num_topologies(); // 0 or 1 (taskflow still running) + @endcode + */ + size_t num_topologies() const; + + /** + @brief queries the number of running taskflows with moved ownership + + @code{.cpp} + executor.run(std::move(taskflow)); + std::cout << executor.num_taskflows(); // 0 or 1 (taskflow still running) + @endcode + */ + size_t num_taskflows() const; + + /** + @brief queries the id of the caller thread in this executor + + Each worker has an unique id in the range of @c 0 to @c N-1 associated with + its parent executor. + If the caller thread does not belong to the executor, @c -1 is returned. + + @code{.cpp} + tf::Executor executor(4); // 4 workers in the executor + executor.this_worker_id(); // -1 (main thread is not a worker) + + taskflow.emplace([&](){ + std::cout << executor.this_worker_id(); // 0, 1, 2, or 3 + }); + executor.run(taskflow); + @endcode + */ + int this_worker_id() const; + + /** + @brief runs a given function asynchronously + + @tparam F callable type + @tparam ArgsT parameter types + + @param f callable object to call + @param args parameters to pass to the callable + + @return a tf::Future that will holds the result of the execution + + The method creates an asynchronous task to launch the given + function on the given arguments. + Unlike std::async, the return here is a @em tf::Future that holds + an optional object to the result. + If the asynchronous task is cancelled before it runs, the return is + a @c std::nullopt, or the value returned by the callable. + + @code{.cpp} + tf::Future> future = executor.async([](){ + std::cout << "create an asynchronous task and returns 1\n"; + return 1; + }); + @endcode + + This member function is thread-safe. + */ + template + auto async(F&& f, ArgsT&&... args); + + /** + @brief runs a given function asynchronously and gives a name to this task + + @tparam F callable type + @tparam ArgsT parameter types + + @param name name of the asynchronous task + @param f callable object to call + @param args parameters to pass to the callable + + @return a tf::Future that will holds the result of the execution + + The method creates a named asynchronous task to launch the given + function on the given arguments. + Naming an asynchronous task is primarily used for profiling and visualizing + the task execution timeline. + Unlike std::async, the return here is a tf::Future that holds + an optional object to the result. + If the asynchronous task is cancelled before it runs, the return is + a @c std::nullopt, or the value returned by the callable. + + @code{.cpp} + tf::Future> future = executor.named_async("name", [](){ + std::cout << "create an asynchronous task with a name and returns 1\n"; + return 1; + }); + @endcode + + This member function is thread-safe. + */ + template + auto named_async(const std::string& name, F&& f, ArgsT&&... args); + + /** + @brief similar to tf::Executor::async but does not return a future object + + This member function is more efficient than tf::Executor::async + and is encouraged to use when there is no data returned. + + @code{.cpp} + executor.silent_async([](){ + std::cout << "create an asynchronous task with no return\n"; + }); + @endcode + + This member function is thread-safe. + */ + template + void silent_async(F&& f, ArgsT&&... args); + + /** + @brief similar to tf::Executor::named_async but does not return a future object + + This member function is more efficient than tf::Executor::named_async + and is encouraged to use when there is no data returned. + + @code{.cpp} + executor.named_silent_async("name", [](){ + std::cout << "create an asynchronous task with a name and no return\n"; + }); + @endcode + + This member function is thread-safe. + */ + template + void named_silent_async(const std::string& name, F&& f, ArgsT&&... args); + + /** + @brief constructs an observer to inspect the activities of worker threads + + @tparam Observer observer type derived from tf::ObserverInterface + @tparam ArgsT argument parameter pack + + @param args arguments to forward to the constructor of the observer + + @return a shared pointer to the created observer + + Each executor manages a list of observers with shared ownership with callers. + For each of these observers, the two member functions, + tf::ObserverInterface::on_entry and tf::ObserverInterface::on_exit + will be called before and after the execution of a task. + + This member function is not thread-safe. + */ + template + std::shared_ptr make_observer(ArgsT&&... args); + + /** + @brief removes an observer from the executor + + This member function is not thread-safe. + */ + template + void remove_observer(std::shared_ptr observer); + + /** + @brief queries the number of observers + */ + size_t num_observers() const noexcept; + + private: + + std::condition_variable _topology_cv; + std::mutex _taskflow_mutex; + std::mutex _topology_mutex; + std::mutex _wsq_mutex; + + size_t _num_topologies {0}; + + std::unordered_map _wids; + std::vector _workers; + std::vector _threads; + std::list _taskflows; + + Notifier _notifier; + + TaskQueue _wsq; + + std::atomic _num_actives {0}; + std::atomic _num_thieves {0}; + std::atomic _done {0}; + + std::unordered_set> _observers; + + Worker* _this_worker(); + + bool _wait_for_task(Worker&, Node*&); + + void _observer_prologue(Worker&, Node*); + void _observer_epilogue(Worker&, Node*); + void _spawn(size_t); + void _worker_loop(Worker&); + void _exploit_task(Worker&, Node*&); + void _explore_task(Worker&, Node*&); + void _consume_task(Worker&, Node*); + void _schedule(Worker&, Node*); + void _schedule(Node*); + void _schedule(Worker&, const SmallVector&); + void _schedule(const SmallVector&); + void _set_up_topology(Worker*, Topology*); + void _tear_down_topology(Worker&, Topology*); + void _tear_down_async(Node*); + void _tear_down_invoke(Worker&, Node*); + void _cancel_invoke(Worker&, Node*); + void _increment_topology(); + void _decrement_topology(); + void _decrement_topology_and_notify(); + void _invoke(Worker&, Node*); + void _invoke_static_task(Worker&, Node*); + void _invoke_dynamic_task(Worker&, Node*); + void _invoke_dynamic_task_external(Worker&, Node*, Graph&, bool); + void _invoke_dynamic_task_internal(Worker&, Node*, Graph&); + void _invoke_condition_task(Worker&, Node*, SmallVector&); + void _invoke_multi_condition_task(Worker&, Node*, SmallVector&); + void _invoke_module_task(Worker&, Node*, bool&); + void _invoke_module_task_internal(Worker&, Node*, Graph&, bool&); + void _invoke_async_task(Worker&, Node*); + void _invoke_silent_async_task(Worker&, Node*); + void _invoke_cudaflow_task(Worker&, Node*); + void _invoke_syclflow_task(Worker&, Node*); + void _invoke_runtime_task(Worker&, Node*); + + template , void>* = nullptr + > + void _invoke_cudaflow_task_entry(Node*, C&&); + + template , void>* = nullptr + > + void _invoke_syclflow_task_entry(Node*, C&&, Q&); +}; + +// Constructor +inline Executor::Executor(size_t N) : + _workers {N}, + _notifier {N} { + + if(N == 0) { + TF_THROW("no cpu workers to execute taskflows"); + } + + _spawn(N); + + // instantite the default observer if requested + if(has_env(TF_ENABLE_PROFILER)) { + TFProfManager::get()._manage(make_observer()); + } +} + +// Destructor +inline Executor::~Executor() { + + // wait for all topologies to complete + wait_for_all(); + + // shut down the scheduler + _done = true; + + _notifier.notify(true); + + for(auto& t : _threads){ + t.join(); + } +} + +// Function: num_workers +inline size_t Executor::num_workers() const noexcept { + return _workers.size(); +} + +// Function: num_topologies +inline size_t Executor::num_topologies() const { + return _num_topologies; +} + +// Function: num_taskflows +inline size_t Executor::num_taskflows() const { + return _taskflows.size(); +} + +// Function: _this_worker +inline Worker* Executor::_this_worker() { + auto itr = _wids.find(std::this_thread::get_id()); + return itr == _wids.end() ? nullptr : &_workers[itr->second]; +} + +// Function: named_async +template +auto Executor::named_async(const std::string& name, F&& f, ArgsT&&... args) { + + _increment_topology(); + + using T = std::invoke_result_t; + using R = std::conditional_t, void, std::optional>; + + std::promise p; + + auto tpg = std::make_shared(); + + Future fu(p.get_future(), tpg); + + auto node = node_pool.animate( + std::in_place_type_t{}, + [p=make_moc(std::move(p)), f=std::forward(f), args...] + (bool cancel) mutable { + if constexpr(std::is_same_v) { + if(!cancel) { + f(args...); + } + p.object.set_value(); + } + else { + p.object.set_value(cancel ? std::nullopt : std::make_optional(f(args...))); + } + }, + std::move(tpg) + ); + + node->_name = name; + + if(auto w = _this_worker(); w) { + _schedule(*w, node); + } + else{ + _schedule(node); + } + + return fu; +} + +// Function: async +template +auto Executor::async(F&& f, ArgsT&&... args) { + return named_async("", std::forward(f), std::forward(args)...); +} + +// Function: named_silent_async +template +void Executor::named_silent_async( + const std::string& name, F&& f, ArgsT&&... args +) { + + _increment_topology(); + + Node* node = node_pool.animate( + std::in_place_type_t{}, + [f=std::forward(f), args...] () mutable { + f(args...); + } + ); + + node->_name = name; + + if(auto w = _this_worker(); w) { + _schedule(*w, node); + } + else { + _schedule(node); + } +} + +// Function: silent_async +template +void Executor::silent_async(F&& f, ArgsT&&... args) { + named_silent_async("", std::forward(f), std::forward(args)...); +} + +// Function: this_worker_id +inline int Executor::this_worker_id() const { + auto i = _wids.find(std::this_thread::get_id()); + return i == _wids.end() ? -1 : static_cast(_workers[i->second]._id); +} + +// Procedure: _spawn +inline void Executor::_spawn(size_t N) { + + std::mutex mutex; + std::condition_variable cond; + size_t n=0; + + for(size_t id=0; id void { + + // enables the mapping + { + std::scoped_lock lock(mutex); + _wids[std::this_thread::get_id()] = w._id; + if(n++; n == num_workers()) { + cond.notify_one(); + } + } + + //this_worker().worker = &w; + + Node* t = nullptr; + + // must use 1 as condition instead of !done + while(1) { + + // execute the tasks. + _exploit_task(w, t); + + // wait for tasks + if(_wait_for_task(w, t) == false) { + break; + } + } + + }, std::ref(_workers[id]), std::ref(mutex), std::ref(cond), std::ref(n)); + } + + std::unique_lock lock(mutex); + cond.wait(lock, [&](){ return n==N; }); +} + +// Function: _consume_task +inline void Executor::_consume_task(Worker& w, Node* p) { + + std::uniform_int_distribution rdvtm(0, _workers.size()-1); + + while(p->_join_counter != 0) { + exploit: + if(auto t = w._wsq.pop(); t) { + _invoke(w, t); + } + else { + size_t num_steals = 0; + //size_t num_pauses = 0; + size_t max_steals = ((_workers.size() + 1) << 1); + + explore: + + t = (w._id == w._vtm) ? _wsq.steal() : _workers[w._vtm]._wsq.steal(); + if(t) { + _invoke(w, t); + goto exploit; + } + else if(p->_join_counter != 0){ + + if(num_steals++ > max_steals) { + std::this_thread::yield(); + } + + //std::this_thread::yield(); + w._vtm = rdvtm(w._rdgen); + goto explore; + } + else { + break; + } + } + } +} + +// Function: _explore_task +inline void Executor::_explore_task(Worker& w, Node*& t) { + + //assert(_workers[w].wsq.empty()); + //assert(!t); + + size_t num_steals = 0; + size_t num_yields = 0; + size_t max_steals = ((_workers.size() + 1) << 1); + + std::uniform_int_distribution rdvtm(0, _workers.size()-1); + + do { + t = (w._id == w._vtm) ? _wsq.steal() : _workers[w._vtm]._wsq.steal(); + + if(t) { + break; + } + + if(num_steals++ > max_steals) { + std::this_thread::yield(); + if(num_yields++ > 100) { + break; + } + } + + w._vtm = rdvtm(w._rdgen); + } while(!_done); + +} + +// Procedure: _exploit_task +inline void Executor::_exploit_task(Worker& w, Node*& t) { + + if(t) { + + if(_num_actives.fetch_add(1) == 0 && _num_thieves == 0) { + _notifier.notify(false); + } + + while(t) { + _invoke(w, t); + t = w._wsq.pop(); + } + + --_num_actives; + } +} + +// Function: _wait_for_task +inline bool Executor::_wait_for_task(Worker& worker, Node*& t) { + + wait_for_task: + + //assert(!t); + + ++_num_thieves; + + explore_task: + + _explore_task(worker, t); + + if(t) { + if(_num_thieves.fetch_sub(1) == 1) { + _notifier.notify(false); + } + return true; + } + + _notifier.prepare_wait(worker._waiter); + + //if(auto vtm = _find_vtm(me); vtm != _workers.size()) { + if(!_wsq.empty()) { + + _notifier.cancel_wait(worker._waiter); + //t = (vtm == me) ? _wsq.steal() : _workers[vtm].wsq.steal(); + + t = _wsq.steal(); // must steal here + if(t) { + if(_num_thieves.fetch_sub(1) == 1) { + _notifier.notify(false); + } + return true; + } + else { + worker._vtm = worker._id; + goto explore_task; + } + } + + if(_done) { + _notifier.cancel_wait(worker._waiter); + _notifier.notify(true); + --_num_thieves; + return false; + } + + if(_num_thieves.fetch_sub(1) == 1) { + if(_num_actives) { + _notifier.cancel_wait(worker._waiter); + goto wait_for_task; + } + // check all queues again + for(auto& w : _workers) { + if(!w._wsq.empty()) { + worker._vtm = w._id; + _notifier.cancel_wait(worker._waiter); + goto wait_for_task; + } + } + } + + // Now I really need to relinguish my self to others + _notifier.commit_wait(worker._waiter); + + return true; +} + +// Function: make_observer +template +std::shared_ptr Executor::make_observer(ArgsT&&... args) { + + static_assert( + std::is_base_of_v, + "Observer must be derived from ObserverInterface" + ); + + // use a local variable to mimic the constructor + auto ptr = std::make_shared(std::forward(args)...); + + ptr->set_up(_workers.size()); + + _observers.emplace(std::static_pointer_cast(ptr)); + + return ptr; +} + +// Procedure: remove_observer +template +void Executor::remove_observer(std::shared_ptr ptr) { + + static_assert( + std::is_base_of_v, + "Observer must be derived from ObserverInterface" + ); + + _observers.erase(std::static_pointer_cast(ptr)); +} + +// Function: num_observers +inline size_t Executor::num_observers() const noexcept { + return _observers.size(); +} + +// Procedure: _schedule +inline void Executor::_schedule(Worker& worker, Node* node) { + + node->_state.fetch_or(Node::READY, std::memory_order_release); + + // caller is a worker to this pool + if(worker._executor == this) { + worker._wsq.push(node); + return; + } + + { + std::lock_guard lock(_wsq_mutex); + _wsq.push(node); + } + + _notifier.notify(false); +} + +// Procedure: _schedule +inline void Executor::_schedule(Node* node) { + + node->_state.fetch_or(Node::READY, std::memory_order_release); + + { + std::lock_guard lock(_wsq_mutex); + _wsq.push(node); + } + + _notifier.notify(false); +} + +// Procedure: _schedule +inline void Executor::_schedule( + Worker& worker, const SmallVector& nodes +) { + + // We need to cacth the node count to avoid accessing the nodes + // vector while the parent topology is removed! + const auto num_nodes = nodes.size(); + + if(num_nodes == 0) { + return; + } + + // make the node ready + for(size_t i=0; i_state.fetch_or(Node::READY, std::memory_order_release); + } + + if(worker._executor == this) { + for(size_t i=0; i lock(_wsq_mutex); + for(size_t k=0; k& nodes) { + + // parent topology may be removed! + const auto num_nodes = nodes.size(); + + if(num_nodes == 0) { + return; + } + + // make the node ready + for(size_t i=0; i_state.fetch_or(Node::READY, std::memory_order_release); + } + + { + std::lock_guard lock(_wsq_mutex); + for(size_t k=0; k conds; + + // synchronize all outstanding memory operations caused by reordering + do { + state = node->_state.load(std::memory_order_acquire); + } while(! (state & Node::READY)); + + // unwind stack for deferred node + if(state & Node::DEFERRED) { + node->_state.fetch_and(~Node::DEFERRED, std::memory_order_relaxed); + goto invoke_epilogue; + } + + //while(!(node->_state.load(std::memory_order_acquire) & Node::READY)); + + invoke_prologue: + + // no need to do other things if the topology is cancelled + if(node->_is_cancelled()) { + _cancel_invoke(worker, node); + return; + } + + // if acquiring semaphore(s) exists, acquire them first + if(node->_semaphores && !node->_semaphores->to_acquire.empty()) { + SmallVector nodes; + if(!node->_acquire_all(nodes)) { + _schedule(worker, nodes); + return; + } + node->_state.fetch_or(Node::ACQUIRED, std::memory_order_release); + } + + // condition task + //int cond = -1; + //SmallVector conds = { -1 }; + + // switch is faster than nested if-else due to jump table + switch(node->_handle.index()) { + // static task + case Node::STATIC:{ + _invoke_static_task(worker, node); + } + break; + + // dynamic task + case Node::DYNAMIC: { + _invoke_dynamic_task(worker, node); + } + break; + + // condition task + case Node::CONDITION: { + _invoke_condition_task(worker, node, conds); + } + break; + + // multi-condition task + case Node::MULTI_CONDITION: { + _invoke_multi_condition_task(worker, node, conds); + } + break; + + // module task + case Node::MODULE: { + bool deferred = false; + _invoke_module_task(worker, node, deferred); + if(deferred) { + return; + } + } + break; + + // async task + case Node::ASYNC: { + _invoke_async_task(worker, node); + _tear_down_async(node); + return ; + } + break; + + // silent async task + case Node::SILENT_ASYNC: { + _invoke_silent_async_task(worker, node); + _tear_down_async(node); + return ; + } + break; + + // cudaflow task + case Node::CUDAFLOW: { + _invoke_cudaflow_task(worker, node); + } + break; + + // syclflow task + case Node::SYCLFLOW: { + _invoke_syclflow_task(worker, node); + } + break; + + // runtime task + case Node::RUNTIME: { + _invoke_runtime_task(worker, node); + } + break; + + // monostate (placeholder) + default: + break; + } + + invoke_epilogue: + + // if releasing semaphores exist, release them + if(node->_semaphores && !node->_semaphores->to_release.empty()) { + _schedule(worker, node->_release_all()); + } + + // We MUST recover the dependency since the graph may have cycles. + // This must be done before scheduling the successors, otherwise this might cause + // race condition on the _dependents + if((node->_state.load(std::memory_order_relaxed) & Node::CONDITIONED)) { + node->_join_counter = node->num_strong_dependents(); + } + else { + node->_join_counter = node->num_dependents(); + } + + // acquire the parent flow counter + auto& j = (node->_parent) ? node->_parent->_join_counter : + node->_topology->_join_counter; + + Node* cache {nullptr}; + + // At this point, the node storage might be destructed (to be verified) + // case 1: non-condition task + switch(node->_handle.index()) { + + // condition and multi-condition tasks + case Node::CONDITION: + case Node::MULTI_CONDITION: { + for(auto cond : conds) { + if(cond >= 0 && static_cast(cond) < node->_successors.size()) { + auto s = node->_successors[cond]; + // zeroing the join counter for invariant + s->_join_counter.store(0, std::memory_order_relaxed); + j.fetch_add(1); + if(cache) { + _schedule(worker, cache); + } + cache = s; + } + } + } + break; + + // non-condition task + default: { + for(size_t i=0; i_successors.size(); ++i) { + if(--(node->_successors[i]->_join_counter) == 0) { + j.fetch_add(1); + if(cache) { + _schedule(worker, cache); + } + cache = node->_successors[i]; + } + } + } + break; + } + + // tear_down the invoke + _tear_down_invoke(worker, node); + + // perform tail recursion elimination for the right-most child to reduce + // the number of expensive pop/push operations through the task queue + if(cache) { + node = cache; + //node->_state.fetch_or(Node::READY, std::memory_order_release); + goto invoke_prologue; + } +} + +// Procedure: _tear_down_async +inline void Executor::_tear_down_async(Node* node) { + if(node->_parent) { + node->_parent->_join_counter.fetch_sub(1); + } + else { + _decrement_topology_and_notify(); + } + node_pool.recycle(node); +} + +// Proecdure: _tear_down_invoke +inline void Executor::_tear_down_invoke(Worker& worker, Node* node) { + // we must check parent first before substracting the join counter, + // or it can introduce data race + if(auto parent = node->_parent; parent == nullptr) { + if(node->_topology->_join_counter.fetch_sub(1) == 1) { + _tear_down_topology(worker, node->_topology); + } + } + else { + // prefetch the deferred status, as subtracting the join counter can + // immediately cause the other worker to release the subflow + auto deferred = parent->_state.load(std::memory_order_relaxed) & Node::DEFERRED; + if(parent->_join_counter.fetch_sub(1) == 1 && deferred) { + _schedule(worker, parent); + } + } +} + +// Procedure: _cancel_invoke +inline void Executor::_cancel_invoke(Worker& worker, Node* node) { + + switch(node->_handle.index()) { + // async task needs to carry out the promise + case Node::ASYNC: + std::get_if(&(node->_handle))->work(true); + _tear_down_async(node); + break; + + // silent async doesn't need to carry out the promise + case Node::SILENT_ASYNC: + _tear_down_async(node); + break; + + // tear down topology if the node is the last leaf + default: { + _tear_down_invoke(worker, node); + } + break; + } +} + +// Procedure: _observer_prologue +inline void Executor::_observer_prologue(Worker& worker, Node* node) { + for(auto& observer : _observers) { + observer->on_entry(WorkerView(worker), TaskView(*node)); + } +} + +// Procedure: _observer_epilogue +inline void Executor::_observer_epilogue(Worker& worker, Node* node) { + for(auto& observer : _observers) { + observer->on_exit(WorkerView(worker), TaskView(*node)); + } +} + +// Procedure: _invoke_static_task +inline void Executor::_invoke_static_task(Worker& worker, Node* node) { + _observer_prologue(worker, node); + std::get_if(&node->_handle)->work(); + _observer_epilogue(worker, node); +} + +// Procedure: _invoke_dynamic_task +inline void Executor::_invoke_dynamic_task(Worker& w, Node* node) { + + _observer_prologue(w, node); + + auto handle = std::get_if(&node->_handle); + + handle->subgraph._clear(); + + Subflow sf(*this, w, node, handle->subgraph); + + handle->work(sf); + + if(sf._joinable) { + _invoke_dynamic_task_internal(w, node, handle->subgraph); + } + + _observer_epilogue(w, node); +} + +// Procedure: _invoke_dynamic_task_external +inline void Executor::_invoke_dynamic_task_external( + Worker& w, Node* p, Graph& g, bool detach +) { + + // graph is empty and has no async tasks + if(g.empty() && p->_join_counter == 0) { + return; + } + + SmallVector src; + + for(auto n : g._nodes) { + + n->_topology = p->_topology; + n->_state.store(0, std::memory_order_relaxed); + n->_set_up_join_counter(); + + if(detach) { + n->_parent = nullptr; + n->_state.fetch_or(Node::DETACHED, std::memory_order_relaxed); + } + else { + n->_parent = p; + } + + if(n->num_dependents() == 0) { + src.push_back(n); + } + } + + // detach here + if(detach) { + + { + std::lock_guard lock(p->_topology->_taskflow._mutex); + p->_topology->_taskflow._graph._merge(std::move(g)); + } + + p->_topology->_join_counter.fetch_add(src.size()); + _schedule(w, src); + } + // join here + else { + p->_join_counter.fetch_add(src.size()); + _schedule(w, src); + _consume_task(w, p); + } +} + +// Procedure: _invoke_dynamic_task_internal +inline void Executor::_invoke_dynamic_task_internal( + Worker& w, Node* p, Graph& g +) { + + // graph is empty and has no async tasks + if(g.empty() && p->_join_counter == 0) { + return; + } + + SmallVector src; + + for(auto n : g._nodes) { + n->_topology = p->_topology; + n->_state.store(0, std::memory_order_relaxed); + n->_set_up_join_counter(); + n->_parent = p; + if(n->num_dependents() == 0) { + src.push_back(n); + } + } + p->_join_counter.fetch_add(src.size()); + _schedule(w, src); + _consume_task(w, p); +} + +// Procedure: _invoke_module_task_internal +inline void Executor::_invoke_module_task_internal( + Worker& w, Node* p, Graph& g, bool& deferred +) { + + // graph is empty and has no async tasks + if(g.empty()) { + return; + } + + // set deferred + deferred = true; + p->_state.fetch_or(Node::DEFERRED, std::memory_order_relaxed); + + SmallVector src; + + for(auto n : g._nodes) { + n->_topology = p->_topology; + n->_state.store(0, std::memory_order_relaxed); + n->_set_up_join_counter(); + n->_parent = p; + if(n->num_dependents() == 0) { + src.push_back(n); + } + } + p->_join_counter.fetch_add(src.size()); + _schedule(w, src); +} + +// Procedure: _invoke_condition_task +inline void Executor::_invoke_condition_task( + Worker& worker, Node* node, SmallVector& conds +) { + _observer_prologue(worker, node); + conds = { std::get_if(&node->_handle)->work() }; + _observer_epilogue(worker, node); +} + +// Procedure: _invoke_multi_condition_task +inline void Executor::_invoke_multi_condition_task( + Worker& worker, Node* node, SmallVector& conds +) { + _observer_prologue(worker, node); + conds = std::get_if(&node->_handle)->work(); + _observer_epilogue(worker, node); +} + +// Procedure: _invoke_cudaflow_task +inline void Executor::_invoke_cudaflow_task(Worker& worker, Node* node) { + _observer_prologue(worker, node); + std::get_if(&node->_handle)->work(*this, node); + _observer_epilogue(worker, node); +} + +// Procedure: _invoke_syclflow_task +inline void Executor::_invoke_syclflow_task(Worker& worker, Node* node) { + _observer_prologue(worker, node); + std::get_if(&node->_handle)->work(*this, node); + _observer_epilogue(worker, node); +} + +// Procedure: _invoke_module_task +inline void Executor::_invoke_module_task(Worker& w, Node* node, bool& deferred) { + _observer_prologue(w, node); + _invoke_module_task_internal( + w, node, std::get_if(&node->_handle)->graph, deferred + ); + _observer_epilogue(w, node); +} + +// Procedure: _invoke_async_task +inline void Executor::_invoke_async_task(Worker& w, Node* node) { + _observer_prologue(w, node); + std::get_if(&node->_handle)->work(false); + _observer_epilogue(w, node); +} + +// Procedure: _invoke_silent_async_task +inline void Executor::_invoke_silent_async_task(Worker& w, Node* node) { + _observer_prologue(w, node); + std::get_if(&node->_handle)->work(); + _observer_epilogue(w, node); +} + +// Procedure: _invoke_runtime_task +inline void Executor::_invoke_runtime_task(Worker& w, Node* node) { + _observer_prologue(w, node); + Runtime rt(*this, w, node); + std::get_if(&node->_handle)->work(rt); + _observer_epilogue(w, node); +} + +// Function: run +inline tf::Future Executor::run(Taskflow& f) { + return run_n(f, 1, [](){}); +} + +// Function: run +inline tf::Future Executor::run(Taskflow&& f) { + return run_n(std::move(f), 1, [](){}); +} + +// Function: run +template +tf::Future Executor::run(Taskflow& f, C&& c) { + return run_n(f, 1, std::forward(c)); +} + +// Function: run +template +tf::Future Executor::run(Taskflow&& f, C&& c) { + return run_n(std::move(f), 1, std::forward(c)); +} + +// Function: run_n +inline tf::Future Executor::run_n(Taskflow& f, size_t repeat) { + return run_n(f, repeat, [](){}); +} + +// Function: run_n +inline tf::Future Executor::run_n(Taskflow&& f, size_t repeat) { + return run_n(std::move(f), repeat, [](){}); +} + +// Function: run_n +template +tf::Future Executor::run_n(Taskflow& f, size_t repeat, C&& c) { + return run_until( + f, [repeat]() mutable { return repeat-- == 0; }, std::forward(c) + ); +} + +// Function: run_n +template +tf::Future Executor::run_n(Taskflow&& f, size_t repeat, C&& c) { + return run_until( + std::move(f), [repeat]() mutable { return repeat-- == 0; }, std::forward(c) + ); +} + +// Function: run_until +template +tf::Future Executor::run_until(Taskflow& f, P&& pred) { + return run_until(f, std::forward

(pred), [](){}); +} + +// Function: run_until +template +tf::Future Executor::run_until(Taskflow&& f, P&& pred) { + return run_until(std::move(f), std::forward

(pred), [](){}); +} + +// Function: run_until +template +tf::Future Executor::run_until(Taskflow& f, P&& p, C&& c) { + + _increment_topology(); + + // Need to check the empty under the lock since dynamic task may + // define detached blocks that modify the taskflow at the same time + bool empty; + { + std::lock_guard lock(f._mutex); + empty = f.empty(); + } + + // No need to create a real topology but returns an dummy future + if(empty || p()) { + c(); + std::promise promise; + promise.set_value(); + _decrement_topology_and_notify(); + return tf::Future(promise.get_future(), std::monostate{}); + } + + // create a topology for this run + auto t = std::make_shared(f, std::forward

(p), std::forward(c)); + + // need to create future before the topology got torn down quickly + tf::Future future(t->_promise.get_future(), t); + + // modifying topology needs to be protected under the lock + { + std::lock_guard lock(f._mutex); + f._topologies.push(t); + if(f._topologies.size() == 1) { + _set_up_topology(_this_worker(), t.get()); + } + } + + return future; +} + +// Function: run_until +template +tf::Future Executor::run_until(Taskflow&& f, P&& pred, C&& c) { + + std::list::iterator itr; + + { + std::scoped_lock lock(_taskflow_mutex); + itr = _taskflows.emplace(_taskflows.end(), std::move(f)); + itr->_satellite = itr; + } + + return run_until(*itr, std::forward

(pred), std::forward(c)); +} + +// Procedure: _increment_topology +inline void Executor::_increment_topology() { + std::lock_guard lock(_topology_mutex); + ++_num_topologies; +} + +// Procedure: _decrement_topology_and_notify +inline void Executor::_decrement_topology_and_notify() { + std::lock_guard lock(_topology_mutex); + if(--_num_topologies == 0) { + _topology_cv.notify_all(); + } +} + +// Procedure: _decrement_topology +inline void Executor::_decrement_topology() { + std::lock_guard lock(_topology_mutex); + --_num_topologies; +} + +// Procedure: wait_for_all +inline void Executor::wait_for_all() { + std::unique_lock lock(_topology_mutex); + _topology_cv.wait(lock, [&](){ return _num_topologies == 0; }); +} + +// Function: _set_up_topology +inline void Executor::_set_up_topology(Worker* worker, Topology* tpg) { + + // ---- under taskflow lock ---- + + tpg->_sources.clear(); + tpg->_taskflow._graph._clear_detached(); + + // scan each node in the graph and build up the links + for(auto node : tpg->_taskflow._graph._nodes) { + + node->_topology = tpg; + node->_state.store(0, std::memory_order_relaxed); + + if(node->num_dependents() == 0) { + tpg->_sources.push_back(node); + } + + node->_set_up_join_counter(); + } + + tpg->_join_counter = tpg->_sources.size(); + + if(worker) { + _schedule(*worker, tpg->_sources); + } + else { + _schedule(tpg->_sources); + } +} + +// Function: _tear_down_topology +inline void Executor::_tear_down_topology(Worker& worker, Topology* tpg) { + + auto &f = tpg->_taskflow; + + //assert(&tpg == &(f._topologies.front())); + + // case 1: we still need to run the topology again + if(!tpg->_is_cancelled && !tpg->_pred()) { + //assert(tpg->_join_counter == 0); + std::lock_guard lock(f._mutex); + tpg->_join_counter = tpg->_sources.size(); + _schedule(worker, tpg->_sources); + } + // case 2: the final run of this topology + else { + + // TODO: if the topology is cancelled, need to release all semaphores + + if(tpg->_call != nullptr) { + tpg->_call(); + } + + // If there is another run (interleave between lock) + if(std::unique_lock lock(f._mutex); f._topologies.size()>1) { + //assert(tpg->_join_counter == 0); + + // Set the promise + tpg->_promise.set_value(); + f._topologies.pop(); + tpg = f._topologies.front().get(); + + // decrement the topology but since this is not the last we don't notify + _decrement_topology(); + + // set up topology needs to be under the lock or it can + // introduce memory order error with pop + _set_up_topology(&worker, tpg); + } + else { + //assert(f._topologies.size() == 1); + + // Need to back up the promise first here becuz taskflow might be + // destroy soon after calling get + auto p {std::move(tpg->_promise)}; + + // Back up lambda capture in case it has the topology pointer, + // to avoid it releasing on pop_front ahead of _mutex.unlock & + // _promise.set_value. Released safely when leaving scope. + auto c {std::move(tpg->_call)}; + + // Get the satellite if any + auto s {f._satellite}; + + // Now we remove the topology from this taskflow + f._topologies.pop(); + + //f._mutex.unlock(); + lock.unlock(); + + // We set the promise in the end in case taskflow leaves the scope. + // After set_value, the caller will return from wait + p.set_value(); + + _decrement_topology_and_notify(); + + // remove the taskflow if it is managed by the executor + // TODO: in the future, we may need to synchronize on wait + // (which means the following code should the moved before set_value) + if(s) { + std::scoped_lock lock(_taskflow_mutex); + _taskflows.erase(*s); + } + } + } +} + +// ############################################################################ +// Forward Declaration: Subflow +// ############################################################################ + +inline void Subflow::join() { + + // assert(this_worker().worker == &_worker); + + if(!_joinable) { + TF_THROW("subflow not joinable"); + } + + // only the parent worker can join the subflow + _executor._invoke_dynamic_task_external(_worker, _parent, _graph, false); + _joinable = false; +} + +inline void Subflow::detach() { + + // assert(this_worker().worker == &_worker); + + if(!_joinable) { + TF_THROW("subflow already joined or detached"); + } + + // only the parent worker can detach the subflow + _executor._invoke_dynamic_task_external(_worker, _parent, _graph, true); + _joinable = false; +} + +// Function: named_async +template +auto Subflow::named_async(const std::string& name, F&& f, ArgsT&&... args) { + return _named_async( + *_executor._this_worker(), name, std::forward(f), std::forward(args)... + ); +} + +// Function: _named_async +template +auto Subflow::_named_async( + Worker& w, + const std::string& name, + F&& f, + ArgsT&&... args +) { + + _parent->_join_counter.fetch_add(1); + + using T = std::invoke_result_t; + using R = std::conditional_t, void, std::optional>; + + std::promise p; + + auto tpg = std::make_shared(); + + Future fu(p.get_future(), tpg); + + auto node = node_pool.animate( + std::in_place_type_t{}, + [p=make_moc(std::move(p)), f=std::forward(f), args...] + (bool cancel) mutable { + if constexpr(std::is_same_v) { + if(!cancel) { + f(args...); + } + p.object.set_value(); + } + else { + p.object.set_value(cancel ? std::nullopt : std::make_optional(f(args...))); + } + }, + std::move(tpg) + ); + + node->_name = name; + node->_topology = _parent->_topology; + node->_parent = _parent; + + _executor._schedule(w, node); + + return fu; +} + +// Function: async +template +auto Subflow::async(F&& f, ArgsT&&... args) { + return named_async("", std::forward(f), std::forward(args)...); +} + +// Function: _named_silent_async +template +void Subflow::_named_silent_async( + Worker& w, const std::string& name, F&& f, ArgsT&&... args +) { + + _parent->_join_counter.fetch_add(1); + + auto node = node_pool.animate( + std::in_place_type_t{}, + [f=std::forward(f), args...] () mutable { + f(args...); + } + ); + + node->_name = name; + node->_topology = _parent->_topology; + node->_parent = _parent; + + _executor._schedule(w, node); +} + +// Function: silent_async +template +void Subflow::named_silent_async(const std::string& name, F&& f, ArgsT&&... args) { + _named_silent_async( + *_executor._this_worker(), name, std::forward(f), std::forward(args)... + ); +} + +// Function: named_silent_async +template +void Subflow::silent_async(F&& f, ArgsT&&... args) { + named_silent_async("", std::forward(f), std::forward(args)...); +} + +// ############################################################################ +// Forward Declaration: Runtime +// ############################################################################ + +// Procedure: schedule +inline void Runtime::schedule(Task task) { + auto node = task._node; + auto& j = node->_parent ? node->_parent->_join_counter : + node->_topology->_join_counter; + j.fetch_add(1); + _executor._schedule(_worker, node); +} + +// Procedure: run +template +void Runtime::run(C&& callable) { + + // dynamic task (subflow) + if constexpr(is_dynamic_task_v) { + Graph graph; + Subflow sf(_executor, _worker, _parent, graph); + callable(sf); + if(sf._joinable) { + _executor._invoke_dynamic_task_internal(_worker, _parent, graph); + } + } + else { + static_assert(dependent_false_v, "unsupported task callable to run"); + } +} + +} // end of namespace tf ----------------------------------------------------- + + + + + + + + diff --git a/bundled/taskflow-3.6.0/include/core/executor.hpp b/bundled/taskflow-3.6.0/include/core/executor.hpp new file mode 100644 index 0000000000..a5607e044b --- /dev/null +++ b/bundled/taskflow-3.6.0/include/core/executor.hpp @@ -0,0 +1,2313 @@ +#pragma once + +#include "observer.hpp" +#include "taskflow.hpp" +#include "async_task.hpp" + +/** +@file executor.hpp +@brief executor include file +*/ + +namespace tf { + +// ---------------------------------------------------------------------------- +// Executor Definition +// ---------------------------------------------------------------------------- + +/** @class Executor + +@brief class to create an executor for running a taskflow graph + +An executor manages a set of worker threads to run one or multiple taskflows +using an efficient work-stealing scheduling algorithm. + +@code{.cpp} +// Declare an executor and a taskflow +tf::Executor executor; +tf::Taskflow taskflow; + +// Add three tasks into the taskflow +tf::Task A = taskflow.emplace([] () { std::cout << "This is TaskA\n"; }); +tf::Task B = taskflow.emplace([] () { std::cout << "This is TaskB\n"; }); +tf::Task C = taskflow.emplace([] () { std::cout << "This is TaskC\n"; }); + +// Build precedence between tasks +A.precede(B, C); + +tf::Future fu = executor.run(taskflow); +fu.wait(); // block until the execution completes + +executor.run(taskflow, [](){ std::cout << "end of 1 run"; }).wait(); +executor.run_n(taskflow, 4); +executor.wait_for_all(); // block until all associated executions finish +executor.run_n(taskflow, 4, [](){ std::cout << "end of 4 runs"; }).wait(); +executor.run_until(taskflow, [cnt=0] () mutable { return ++cnt == 10; }); +@endcode + +All the @c run methods are @em thread-safe. You can submit multiple +taskflows at the same time to an executor from different threads. +*/ +class Executor { + + friend class FlowBuilder; + friend class Subflow; + friend class Runtime; + + public: + + /** + @brief constructs the executor with @c N worker threads + + + @param N number of workers (default std::thread::hardware_concurrency) + @param wix worker interface class to alter worker (thread) behaviors + + The constructor spawns @c N worker threads to run tasks in a + work-stealing loop. The number of workers must be greater than zero + or an exception will be thrown. + By default, the number of worker threads is equal to the maximum + hardware concurrency returned by std::thread::hardware_concurrency. + + Users can alter the worker behavior, such as changing thread affinity, + via deriving an instance from tf::WorkerInterface. + */ + explicit Executor( + size_t N = std::thread::hardware_concurrency(), + std::shared_ptr wix = nullptr + ); + + /** + @brief destructs the executor + + The destructor calls Executor::wait_for_all to wait for all submitted + taskflows to complete and then notifies all worker threads to stop + and join these threads. + */ + ~Executor(); + + /** + @brief runs a taskflow once + + @param taskflow a tf::Taskflow object + + @return a tf::Future that holds the result of the execution + + This member function executes the given taskflow once and returns a tf::Future + object that eventually holds the result of the execution. + + @code{.cpp} + tf::Future future = executor.run(taskflow); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + + @attention + The executor does not own the given taskflow. It is your responsibility to + ensure the taskflow remains alive during its execution. + */ + tf::Future run(Taskflow& taskflow); + + /** + @brief runs a moved taskflow once + + @param taskflow a moved tf::Taskflow object + + @return a tf::Future that holds the result of the execution + + This member function executes a moved taskflow once and returns a tf::Future + object that eventually holds the result of the execution. + The executor will take care of the lifetime of the moved taskflow. + + @code{.cpp} + tf::Future future = executor.run(std::move(taskflow)); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + */ + tf::Future run(Taskflow&& taskflow); + + /** + @brief runs a taskflow once and invoke a callback upon completion + + @param taskflow a tf::Taskflow object + @param callable a callable object to be invoked after this run + + @return a tf::Future that holds the result of the execution + + This member function executes the given taskflow once and invokes the given + callable when the execution completes. + This member function returns a tf::Future object that + eventually holds the result of the execution. + + @code{.cpp} + tf::Future future = executor.run(taskflow, [](){ std::cout << "done"; }); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + + @attention + The executor does not own the given taskflow. It is your responsibility to + ensure the taskflow remains alive during its execution. + */ + template + tf::Future run(Taskflow& taskflow, C&& callable); + + /** + @brief runs a moved taskflow once and invoke a callback upon completion + + @param taskflow a moved tf::Taskflow object + @param callable a callable object to be invoked after this run + + @return a tf::Future that holds the result of the execution + + This member function executes a moved taskflow once and invokes the given + callable when the execution completes. + This member function returns a tf::Future object that + eventually holds the result of the execution. + The executor will take care of the lifetime of the moved taskflow. + + @code{.cpp} + tf::Future future = executor.run( + std::move(taskflow), [](){ std::cout << "done"; } + ); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + */ + template + tf::Future run(Taskflow&& taskflow, C&& callable); + + /** + @brief runs a taskflow for @c N times + + @param taskflow a tf::Taskflow object + @param N number of runs + + @return a tf::Future that holds the result of the execution + + This member function executes the given taskflow @c N times and returns a tf::Future + object that eventually holds the result of the execution. + + @code{.cpp} + tf::Future future = executor.run_n(taskflow, 2); // run taskflow 2 times + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + + @attention + The executor does not own the given taskflow. It is your responsibility to + ensure the taskflow remains alive during its execution. + */ + tf::Future run_n(Taskflow& taskflow, size_t N); + + /** + @brief runs a moved taskflow for @c N times + + @param taskflow a moved tf::Taskflow object + @param N number of runs + + @return a tf::Future that holds the result of the execution + + This member function executes a moved taskflow @c N times and returns a tf::Future + object that eventually holds the result of the execution. + The executor will take care of the lifetime of the moved taskflow. + + @code{.cpp} + tf::Future future = executor.run_n( + std::move(taskflow), 2 // run the moved taskflow 2 times + ); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + */ + tf::Future run_n(Taskflow&& taskflow, size_t N); + + /** + @brief runs a taskflow for @c N times and then invokes a callback + + @param taskflow a tf::Taskflow + @param N number of runs + @param callable a callable object to be invoked after this run + + @return a tf::Future that holds the result of the execution + + This member function executes the given taskflow @c N times and invokes the given + callable when the execution completes. + This member function returns a tf::Future object that + eventually holds the result of the execution. + + @code{.cpp} + tf::Future future = executor.run( + taskflow, 2, [](){ std::cout << "done"; } // runs taskflow 2 times and invoke + // the lambda to print "done" + ); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + + @attention + The executor does not own the given taskflow. It is your responsibility to + ensure the taskflow remains alive during its execution. + */ + template + tf::Future run_n(Taskflow& taskflow, size_t N, C&& callable); + + /** + @brief runs a moved taskflow for @c N times and then invokes a callback + + @param taskflow a moved tf::Taskflow + @param N number of runs + @param callable a callable object to be invoked after this run + + @return a tf::Future that holds the result of the execution + + This member function executes a moved taskflow @c N times and invokes the given + callable when the execution completes. + This member function returns a tf::Future object that + eventually holds the result of the execution. + + @code{.cpp} + tf::Future future = executor.run_n( + // run the moved taskflow 2 times and invoke the lambda to print "done" + std::move(taskflow), 2, [](){ std::cout << "done"; } + ); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + */ + template + tf::Future run_n(Taskflow&& taskflow, size_t N, C&& callable); + + /** + @brief runs a taskflow multiple times until the predicate becomes true + + @param taskflow a tf::Taskflow + @param pred a boolean predicate to return @c true for stop + + @return a tf::Future that holds the result of the execution + + This member function executes the given taskflow multiple times until + the predicate returns @c true. + This member function returns a tf::Future object that + eventually holds the result of the execution. + + @code{.cpp} + tf::Future future = executor.run_until( + taskflow, [](){ return rand()%10 == 0 } + ); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + + @attention + The executor does not own the given taskflow. It is your responsibility to + ensure the taskflow remains alive during its execution. + */ + template + tf::Future run_until(Taskflow& taskflow, P&& pred); + + /** + @brief runs a moved taskflow and keeps running it + until the predicate becomes true + + @param taskflow a moved tf::Taskflow object + @param pred a boolean predicate to return @c true for stop + + @return a tf::Future that holds the result of the execution + + This member function executes a moved taskflow multiple times until + the predicate returns @c true. + This member function returns a tf::Future object that + eventually holds the result of the execution. + The executor will take care of the lifetime of the moved taskflow. + + @code{.cpp} + tf::Future future = executor.run_until( + std::move(taskflow), [](){ return rand()%10 == 0 } + ); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + */ + template + tf::Future run_until(Taskflow&& taskflow, P&& pred); + + /** + @brief runs a taskflow multiple times until the predicate becomes true and + then invokes the callback + + @param taskflow a tf::Taskflow + @param pred a boolean predicate to return @c true for stop + @param callable a callable object to be invoked after this run completes + + @return a tf::Future that holds the result of the execution + + This member function executes the given taskflow multiple times until + the predicate returns @c true and then invokes the given callable when + the execution completes. + This member function returns a tf::Future object that + eventually holds the result of the execution. + + @code{.cpp} + tf::Future future = executor.run_until( + taskflow, [](){ return rand()%10 == 0 }, [](){ std::cout << "done"; } + ); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + + @attention + The executor does not own the given taskflow. It is your responsibility to + ensure the taskflow remains alive during its execution. + */ + template + tf::Future run_until(Taskflow& taskflow, P&& pred, C&& callable); + + /** + @brief runs a moved taskflow and keeps running + it until the predicate becomes true and then invokes the callback + + @param taskflow a moved tf::Taskflow + @param pred a boolean predicate to return @c true for stop + @param callable a callable object to be invoked after this run completes + + @return a tf::Future that holds the result of the execution + + This member function executes a moved taskflow multiple times until + the predicate returns @c true and then invokes the given callable when + the execution completes. + This member function returns a tf::Future object that + eventually holds the result of the execution. + The executor will take care of the lifetime of the moved taskflow. + + @code{.cpp} + tf::Future future = executor.run_until( + std::move(taskflow), + [](){ return rand()%10 == 0 }, [](){ std::cout << "done"; } + ); + // do something else + future.wait(); + @endcode + + This member function is thread-safe. + */ + template + tf::Future run_until(Taskflow&& taskflow, P&& pred, C&& callable); + + /** + @brief runs a target graph and waits until it completes using + an internal worker of this executor + + @tparam T target type which has `tf::Graph& T::graph()` defined + @param target the target task graph object + + The method runs a target graph which has `tf::Graph& T::graph()` defined + and waits until the execution completes. + Unlike the typical flow of calling `tf::Executor::run` series + plus waiting on the result, this method must be called by an internal + worker of this executor. The caller worker will participate in + the work-stealing loop of the scheduler, therby avoiding potential + deadlock caused by blocked waiting. + + @code{.cpp} + tf::Executor executor(2); + tf::Taskflow taskflow; + std::array others; + + std::atomic counter{0}; + + for(size_t n=0; n<1000; n++) { + for(size_t i=0; i<1000; i++) { + others[n].emplace([&](){ counter++; }); + } + taskflow.emplace([&executor, &tf=others[n]](){ + executor.corun(tf); + //executor.run(tf).wait(); <- blocking the worker without doing anything + // will introduce deadlock + }); + } + executor.run(taskflow).wait(); + @endcode + + The method is thread-safe as long as the target is not concurrently + ran by two or more threads. + + @attention + You must call tf::Executor::corun from a worker of the calling executor + or an exception will be thrown. + */ + template + void corun(T& target); + + /** + @brief keeps running the work-stealing loop until the predicate becomes true + + @tparam P predicate type + @param predicate a boolean predicate to indicate when to stop the loop + + The method keeps the caller worker running in the work-stealing loop + until the stop predicate becomes true. + + @code{.cpp} + taskflow.emplace([&](){ + std::future fu = std::async([](){ std::sleep(100s); }); + executor.corun_until([](){ + return fu.wait_for(std::chrono::seconds(0)) == future_status::ready; + }); + }); + @endcode + + @attention + You must call tf::Executor::corun_until from a worker of the calling executor + or an exception will be thrown. + */ + template + void corun_until(P&& predicate); + + /** + @brief waits for all tasks to complete + + This member function waits until all submitted tasks + (e.g., taskflows, asynchronous tasks) to finish. + + @code{.cpp} + executor.run(taskflow1); + executor.run_n(taskflow2, 10); + executor.run_n(taskflow3, 100); + executor.wait_for_all(); // wait until the above submitted taskflows finish + @endcode + */ + void wait_for_all(); + + /** + @brief queries the number of worker threads + + Each worker represents one unique thread spawned by an executor + upon its construction time. + + @code{.cpp} + tf::Executor executor(4); + std::cout << executor.num_workers(); // 4 + @endcode + */ + size_t num_workers() const noexcept; + + /** + @brief queries the number of running topologies at the time of this call + + When a taskflow is submitted to an executor, a topology is created to store + runtime metadata of the running taskflow. + When the execution of the submitted taskflow finishes, + its corresponding topology will be removed from the executor. + + @code{.cpp} + executor.run(taskflow); + std::cout << executor.num_topologies(); // 0 or 1 (taskflow still running) + @endcode + */ + size_t num_topologies() const; + + /** + @brief queries the number of running taskflows with moved ownership + + @code{.cpp} + executor.run(std::move(taskflow)); + std::cout << executor.num_taskflows(); // 0 or 1 (taskflow still running) + @endcode + */ + size_t num_taskflows() const; + + /** + @brief queries the id of the caller thread in this executor + + Each worker has an unique id in the range of @c 0 to @c N-1 associated with + its parent executor. + If the caller thread does not belong to the executor, @c -1 is returned. + + @code{.cpp} + tf::Executor executor(4); // 4 workers in the executor + executor.this_worker_id(); // -1 (main thread is not a worker) + + taskflow.emplace([&](){ + std::cout << executor.this_worker_id(); // 0, 1, 2, or 3 + }); + executor.run(taskflow); + @endcode + */ + int this_worker_id() const; + + // -------------------------------------------------------------------------- + // Observer methods + // -------------------------------------------------------------------------- + + /** + @brief constructs an observer to inspect the activities of worker threads + + @tparam Observer observer type derived from tf::ObserverInterface + @tparam ArgsT argument parameter pack + + @param args arguments to forward to the constructor of the observer + + @return a shared pointer to the created observer + + Each executor manages a list of observers with shared ownership with callers. + For each of these observers, the two member functions, + tf::ObserverInterface::on_entry and tf::ObserverInterface::on_exit + will be called before and after the execution of a task. + + This member function is not thread-safe. + */ + template + std::shared_ptr make_observer(ArgsT&&... args); + + /** + @brief removes an observer from the executor + + This member function is not thread-safe. + */ + template + void remove_observer(std::shared_ptr observer); + + /** + @brief queries the number of observers + */ + size_t num_observers() const noexcept; + + // -------------------------------------------------------------------------- + // Async Task Methods + // -------------------------------------------------------------------------- + + /** + @brief runs a given function asynchronously + + @tparam F callable type + + @param func callable object + + @return a @std_future that will hold the result of the execution + + The method creates an asynchronous task to run the given function + and return a @std_future object that eventually will hold the result + of the return value. + + @code{.cpp} + std::future future = executor.async([](){ + std::cout << "create an asynchronous task and returns 1\n"; + return 1; + }); + future.get(); + @endcode + + This member function is thread-safe. + */ + template + auto async(F&& func); + + /** + @brief runs a given function asynchronously and gives a name to this task + + @tparam F callable type + + @param name name of the asynchronous task + @param func callable object + + @return a @std_future that will hold the result of the execution + + The method creates and assigns a name to an asynchronous task + to run the given function, + returning @std_future object that eventually will hold the result + Assigned task names will appear in the observers of the executor. + + @code{.cpp} + std::future future = executor.async("name", [](){ + std::cout << "create an asynchronous task with a name and returns 1\n"; + return 1; + }); + future.get(); + @endcode + + This member function is thread-safe. + */ + template + auto async(const std::string& name, F&& func); + + /** + @brief similar to tf::Executor::async but does not return a future object + + @tparam F callable type + + @param func callable object + + This member function is more efficient than tf::Executor::async + and is encouraged to use when you do not want a @std_future to + acquire the result or synchronize the execution. + + @code{.cpp} + executor.silent_async([](){ + std::cout << "create an asynchronous task with no return\n"; + }); + executor.wait_for_all(); + @endcode + + This member function is thread-safe. + */ + template + void silent_async(F&& func); + + /** + @brief similar to tf::Executor::async but does not return a future object + + @tparam F callable type + + @param name assigned name to the task + @param func callable object + + This member function is more efficient than tf::Executor::async + and is encouraged to use when you do not want a @std_future to + acquire the result or synchronize the execution. + Assigned task names will appear in the observers of the executor. + + @code{.cpp} + executor.silent_async("name", [](){ + std::cout << "create an asynchronous task with a name and no return\n"; + }); + executor.wait_for_all(); + @endcode + + This member function is thread-safe. + */ + template + void silent_async(const std::string& name, F&& func); + + // -------------------------------------------------------------------------- + // Silent Dependent Async Methods + // -------------------------------------------------------------------------- + + /** + @brief runs the given function asynchronously + when the given dependents finish + + @tparam F callable type + @tparam Tasks task types convertible to tf::AsyncTask + + @param func callable object + @param tasks asynchronous tasks on which this execution depends + + @return a tf::AsyncTask handle + + This member function is more efficient than tf::Executor::dependent_async + and is encouraged to use when you do not want a @std_future to + acquire the result or synchronize the execution. + The example below creates three asynchronous tasks, @c A, @c B, and @c C, + in which task @c C runs after task @c A and task @c B. + + @code{.cpp} + tf::AsyncTask A = executor.silent_dependent_async([](){ printf("A\n"); }); + tf::AsyncTask B = executor.silent_dependent_async([](){ printf("B\n"); }); + executor.silent_dependent_async([](){ printf("C runs after A and B\n"); }, A, B); + executor.wait_for_all(); + @endcode + + This member function is thread-safe. + */ + template ...>, void>* = nullptr + > + tf::AsyncTask silent_dependent_async(F&& func, Tasks&&... tasks); + + /** + @brief names and runs the given function asynchronously + when the given dependents finish + + @tparam F callable type + @tparam Tasks task types convertible to tf::AsyncTask + + @param name assigned name to the task + @param func callable object + @param tasks asynchronous tasks on which this execution depends + + @return a tf::AsyncTask handle + + This member function is more efficient than tf::Executor::dependent_async + and is encouraged to use when you do not want a @std_future to + acquire the result or synchronize the execution. + The example below creates three asynchronous tasks, @c A, @c B, and @c C, + in which task @c C runs after task @c A and task @c B. + Assigned task names will appear in the observers of the executor. + + @code{.cpp} + tf::AsyncTask A = executor.silent_dependent_async("A", [](){ printf("A\n"); }); + tf::AsyncTask B = executor.silent_dependent_async("B", [](){ printf("B\n"); }); + executor.silent_dependent_async( + "C", [](){ printf("C runs after A and B\n"); }, A, B + ); + executor.wait_for_all(); + @endcode + + This member function is thread-safe. + */ + template ...>, void>* = nullptr + > + tf::AsyncTask silent_dependent_async(const std::string& name, F&& func, Tasks&&... tasks); + + /** + @brief runs the given function asynchronously + when the given range of dependents finish + + @tparam F callable type + @tparam I iterator type + + @param func callable object + @param first iterator to the beginning (inclusive) + @param last iterator to the end (exclusive) + + @return a tf::AsyncTask handle + + This member function is more efficient than tf::Executor::dependent_async + and is encouraged to use when you do not want a @std_future to + acquire the result or synchronize the execution. + The example below creates three asynchronous tasks, @c A, @c B, and @c C, + in which task @c C runs after task @c A and task @c B. + + @code{.cpp} + std::array array { + executor.silent_dependent_async([](){ printf("A\n"); }), + executor.silent_dependent_async([](){ printf("B\n"); }) + }; + executor.silent_dependent_async( + [](){ printf("C runs after A and B\n"); }, array.begin(), array.end() + ); + executor.wait_for_all(); + @endcode + + This member function is thread-safe. + */ + template , AsyncTask>, void>* = nullptr + > + tf::AsyncTask silent_dependent_async(F&& func, I first, I last); + + /** + @brief names and runs the given function asynchronously + when the given range of dependents finish + + @tparam F callable type + @tparam I iterator type + + @param name assigned name to the task + @param func callable object + @param first iterator to the beginning (inclusive) + @param last iterator to the end (exclusive) + + @return a tf::AsyncTask handle + + This member function is more efficient than tf::Executor::dependent_async + and is encouraged to use when you do not want a @std_future to + acquire the result or synchronize the execution. + The example below creates three asynchronous tasks, @c A, @c B, and @c C, + in which task @c C runs after task @c A and task @c B. + Assigned task names will appear in the observers of the executor. + + @code{.cpp} + std::array array { + executor.silent_dependent_async("A", [](){ printf("A\n"); }), + executor.silent_dependent_async("B", [](){ printf("B\n"); }) + }; + executor.silent_dependent_async( + "C", [](){ printf("C runs after A and B\n"); }, array.begin(), array.end() + ); + executor.wait_for_all(); + @endcode + + This member function is thread-safe. + */ + template , AsyncTask>, void>* = nullptr + > + tf::AsyncTask silent_dependent_async(const std::string& name, F&& func, I first, I last); + + // -------------------------------------------------------------------------- + // Dependent Async Methods + // -------------------------------------------------------------------------- + + /** + @brief runs the given function asynchronously + when the given dependents finish + + @tparam F callable type + @tparam Tasks task types convertible to tf::AsyncTask + + @param func callable object + @param tasks asynchronous tasks on which this execution depends + + @return a pair of a tf::AsyncTask handle and + a @std_future that holds the result of the execution + + The example below creates three asynchronous tasks, @c A, @c B, and @c C, + in which task @c C runs after task @c A and task @c B. + Task @c C returns a pair of its tf::AsyncTask handle and a std::future + that eventually will hold the result of the execution. + + @code{.cpp} + tf::AsyncTask A = executor.silent_dependent_async([](){ printf("A\n"); }); + tf::AsyncTask B = executor.silent_dependent_async([](){ printf("B\n"); }); + auto [C, fuC] = executor.dependent_async( + [](){ + printf("C runs after A and B\n"); + return 1; + }, + A, B + ); + fuC.get(); // C finishes, which in turns means both A and B finish + @endcode + + You can mixed the use of tf::AsyncTask handles + returned by Executor::dependent_async and Executor::silent_dependent_async + when specifying task dependencies. + + This member function is thread-safe. + */ + template ...>, void>* = nullptr + > + auto dependent_async(F&& func, Tasks&&... tasks); + + /** + @brief names and runs the given function asynchronously + when the given dependents finish + + @tparam F callable type + @tparam Tasks task types convertible to tf::AsyncTask + + @param name assigned name to the task + @param func callable object + @param tasks asynchronous tasks on which this execution depends + + @return a pair of a tf::AsyncTask handle and + a @std_future that holds the result of the execution + + The example below creates three named asynchronous tasks, @c A, @c B, and @c C, + in which task @c C runs after task @c A and task @c B. + Task @c C returns a pair of its tf::AsyncTask handle and a std::future + that eventually will hold the result of the execution. + Assigned task names will appear in the observers of the executor. + + @code{.cpp} + tf::AsyncTask A = executor.silent_dependent_async("A", [](){ printf("A\n"); }); + tf::AsyncTask B = executor.silent_dependent_async("B", [](){ printf("B\n"); }); + auto [C, fuC] = executor.dependent_async( + "C", + [](){ + printf("C runs after A and B\n"); + return 1; + }, + A, B + ); + assert(fuC.get()==1); // C finishes, which in turns means both A and B finish + @endcode + + You can mixed the use of tf::AsyncTask handles + returned by Executor::dependent_async and Executor::silent_dependent_async + when specifying task dependencies. + + This member function is thread-safe. + */ + template ...>, void>* = nullptr + > + auto dependent_async(const std::string& name, F&& func, Tasks&&... tasks); + + /** + @brief runs the given function asynchronously + when the given range of dependents finish + + @tparam F callable type + @tparam I iterator type + + @param func callable object + @param first iterator to the beginning (inclusive) + @param last iterator to the end (exclusive) + + @return a pair of a tf::AsyncTask handle and + a @std_future that holds the result of the execution + + The example below creates three asynchronous tasks, @c A, @c B, and @c C, + in which task @c C runs after task @c A and task @c B. + Task @c C returns a pair of its tf::AsyncTask handle and a std::future + that eventually will hold the result of the execution. + + @code{.cpp} + std::array array { + executor.silent_dependent_async([](){ printf("A\n"); }), + executor.silent_dependent_async([](){ printf("B\n"); }) + }; + auto [C, fuC] = executor.dependent_async( + [](){ + printf("C runs after A and B\n"); + return 1; + }, + array.begin(), array.end() + ); + assert(fuC.get()==1); // C finishes, which in turns means both A and B finish + @endcode + + You can mixed the use of tf::AsyncTask handles + returned by Executor::dependent_async and Executor::silent_dependent_async + when specifying task dependencies. + + This member function is thread-safe. + */ + template , AsyncTask>, void>* = nullptr + > + auto dependent_async(F&& func, I first, I last); + + /** + @brief names and runs the given function asynchronously + when the given range of dependents finish + + @tparam F callable type + @tparam I iterator type + + @param name assigned name to the task + @param func callable object + @param first iterator to the beginning (inclusive) + @param last iterator to the end (exclusive) + + @return a pair of a tf::AsyncTask handle and + a @std_future that holds the result of the execution + + The example below creates three named asynchronous tasks, @c A, @c B, and @c C, + in which task @c C runs after task @c A and task @c B. + Task @c C returns a pair of its tf::AsyncTask handle and a std::future + that eventually will hold the result of the execution. + Assigned task names will appear in the observers of the executor. + + @code{.cpp} + std::array array { + executor.silent_dependent_async("A", [](){ printf("A\n"); }), + executor.silent_dependent_async("B", [](){ printf("B\n"); }) + }; + auto [C, fuC] = executor.dependent_async( + "C", + [](){ + printf("C runs after A and B\n"); + return 1; + }, + array.begin(), array.end() + ); + assert(fuC.get()==1); // C finishes, which in turns means both A and B finish + @endcode + + You can mixed the use of tf::AsyncTask handles + returned by Executor::dependent_async and Executor::silent_dependent_async + when specifying task dependencies. + + This member function is thread-safe. + */ + template , AsyncTask>, void>* = nullptr + > + auto dependent_async(const std::string& name, F&& func, I first, I last); + + private: + + const size_t _MAX_STEALS; + + std::condition_variable _topology_cv; + std::mutex _taskflows_mutex; + std::mutex _topology_mutex; + std::mutex _wsq_mutex; + std::mutex _asyncs_mutex; + + size_t _num_topologies {0}; + + std::unordered_map _wids; + std::vector _threads; + std::vector _workers; + std::list _taskflows; + + std::unordered_set> _asyncs; + + Notifier _notifier; + + TaskQueue _wsq; + + std::atomic _done {0}; + + std::shared_ptr _worker_interface; + std::unordered_set> _observers; + + Worker* _this_worker(); + + bool _wait_for_task(Worker&, Node*&); + + void _observer_prologue(Worker&, Node*); + void _observer_epilogue(Worker&, Node*); + void _spawn(size_t); + void _exploit_task(Worker&, Node*&); + void _explore_task(Worker&, Node*&); + void _schedule(Worker&, Node*); + void _schedule(Node*); + void _schedule(Worker&, const SmallVector&); + void _schedule(const SmallVector&); + void _set_up_topology(Worker*, Topology*); + void _tear_down_topology(Worker&, Topology*); + void _tear_down_async(Node*); + void _tear_down_dependent_async(Worker&, Node*); + void _tear_down_invoke(Worker&, Node*); + void _increment_topology(); + void _decrement_topology(); + void _decrement_topology_and_notify(); + void _invoke(Worker&, Node*); + void _invoke_static_task(Worker&, Node*); + void _invoke_dynamic_task(Worker&, Node*); + void _consume_graph(Worker&, Node*, Graph&); + void _detach_dynamic_task(Worker&, Node*, Graph&); + void _invoke_condition_task(Worker&, Node*, SmallVector&); + void _invoke_multi_condition_task(Worker&, Node*, SmallVector&); + void _invoke_module_task(Worker&, Node*); + void _invoke_async_task(Worker&, Node*); + void _invoke_dependent_async_task(Worker&, Node*); + void _process_async_dependent(Node*, tf::AsyncTask&, size_t&); + void _schedule_async_task(Node*); + + template + void _corun_until(Worker&, P&&); + + template + auto _make_promised_async(std::promise&&, F&&); +}; + +// Constructor +inline Executor::Executor(size_t N, std::shared_ptr wix) : + _MAX_STEALS {((N+1) << 1)}, + _threads {N}, + _workers {N}, + _notifier {N}, + _worker_interface {std::move(wix)} { + + if(N == 0) { + TF_THROW("no cpu workers to execute taskflows"); + } + + _spawn(N); + + // instantite the default observer if requested + if(has_env(TF_ENABLE_PROFILER)) { + TFProfManager::get()._manage(make_observer()); + } +} + +// Destructor +inline Executor::~Executor() { + + // wait for all topologies to complete + wait_for_all(); + + // shut down the scheduler + _done = true; + + _notifier.notify(true); + + for(auto& t : _threads){ + t.join(); + } +} + +// Function: num_workers +inline size_t Executor::num_workers() const noexcept { + return _workers.size(); +} + +// Function: num_topologies +inline size_t Executor::num_topologies() const { + return _num_topologies; +} + +// Function: num_taskflows +inline size_t Executor::num_taskflows() const { + return _taskflows.size(); +} + +// Function: _this_worker +inline Worker* Executor::_this_worker() { + auto itr = _wids.find(std::this_thread::get_id()); + return itr == _wids.end() ? nullptr : &_workers[itr->second]; +} + +// Function: this_worker_id +inline int Executor::this_worker_id() const { + auto i = _wids.find(std::this_thread::get_id()); + return i == _wids.end() ? -1 : static_cast(_workers[i->second]._id); +} + +// Procedure: _spawn +inline void Executor::_spawn(size_t N) { + + std::mutex mutex; + std::condition_variable cond; + size_t n=0; + + for(size_t id=0; id void { + + // assign the thread + w._thread = &_threads[w._id]; + + // enables the mapping + { + std::scoped_lock lock(mutex); + _wids[std::this_thread::get_id()] = w._id; + if(n++; n == num_workers()) { + cond.notify_one(); + } + } + + Node* t = nullptr; + + // before entering the scheduler (work-stealing loop), + // call the user-specified prologue function + if(_worker_interface) { + _worker_interface->scheduler_prologue(w); + } + + // must use 1 as condition instead of !done because + // the previous worker may stop while the following workers + // are still preparing for entering the scheduling loop + std::exception_ptr ptr{nullptr}; + try { + while(1) { + + // execute the tasks. + _exploit_task(w, t); + + // wait for tasks + if(_wait_for_task(w, t) == false) { + break; + } + } + } + catch(...) { + ptr = std::current_exception(); + } + + // call the user-specified epilogue function + if(_worker_interface) { + _worker_interface->scheduler_epilogue(w, ptr); + } + + }, std::ref(_workers[id]), std::ref(mutex), std::ref(cond), std::ref(n)); + + // POSIX-like system can use the following to affine threads to cores + //cpu_set_t cpuset; + //CPU_ZERO(&cpuset); + //CPU_SET(id, &cpuset); + //pthread_setaffinity_np( + // _threads[id].native_handle(), sizeof(cpu_set_t), &cpuset + //); + } + + std::unique_lock lock(mutex); + cond.wait(lock, [&](){ return n==N; }); +} + +// Function: _corun_until +template +void Executor::_corun_until(Worker& w, P&& stop_predicate) { + + std::uniform_int_distribution rdvtm(0, _workers.size()-1); + + exploit: + + while(!stop_predicate()) { + + //exploit: + + if(auto t = w._wsq.pop(); t) { + _invoke(w, t); + } + else { + size_t num_steals = 0; + + explore: + + t = (w._id == w._vtm) ? _wsq.steal() : _workers[w._vtm]._wsq.steal(); + + if(t) { + _invoke(w, t); + goto exploit; + } + else if(!stop_predicate()) { + if(num_steals++ > _MAX_STEALS) { + std::this_thread::yield(); + } + w._vtm = rdvtm(w._rdgen); + goto explore; + } + else { + break; + } + } + } +} + +// Function: _explore_task +inline void Executor::_explore_task(Worker& w, Node*& t) { + + //assert(_workers[w].wsq.empty()); + //assert(!t); + + size_t num_steals = 0; + size_t num_yields = 0; + + std::uniform_int_distribution rdvtm(0, _workers.size()-1); + + // Here, we write do-while to make the worker steal at once + // from the assigned victim. + do { + t = (w._id == w._vtm) ? _wsq.steal() : _workers[w._vtm]._wsq.steal(); + + if(t) { + break; + } + + if(num_steals++ > _MAX_STEALS) { + std::this_thread::yield(); + if(num_yields++ > 100) { + break; + } + } + + w._vtm = rdvtm(w._rdgen); + } while(!_done); + +} + +// Procedure: _exploit_task +inline void Executor::_exploit_task(Worker& w, Node*& t) { + while(t) { + _invoke(w, t); + t = w._wsq.pop(); + } +} + +// Function: _wait_for_task +inline bool Executor::_wait_for_task(Worker& worker, Node*& t) { + + explore_task: + + _explore_task(worker, t); + + // The last thief who successfully stole a task will wake up + // another thief worker to avoid starvation. + if(t) { + _notifier.notify(false); + return true; + } + + // ---- 2PC guard ---- + _notifier.prepare_wait(worker._waiter); + + if(!_wsq.empty()) { + _notifier.cancel_wait(worker._waiter); + worker._vtm = worker._id; + goto explore_task; + } + + if(_done) { + _notifier.cancel_wait(worker._waiter); + _notifier.notify(true); + return false; + } + + // We need to use index-based scanning to avoid data race + // with _spawn which may initialize a worker at the same time. + for(size_t vtm=0; vtm<_workers.size(); vtm++) { + if(!_workers[vtm]._wsq.empty()) { + _notifier.cancel_wait(worker._waiter); + worker._vtm = vtm; + goto explore_task; + } + } + + // Now I really need to relinguish my self to others + _notifier.commit_wait(worker._waiter); + + goto explore_task; +} + +// Function: make_observer +template +std::shared_ptr Executor::make_observer(ArgsT&&... args) { + + static_assert( + std::is_base_of_v, + "Observer must be derived from ObserverInterface" + ); + + // use a local variable to mimic the constructor + auto ptr = std::make_shared(std::forward(args)...); + + ptr->set_up(_workers.size()); + + _observers.emplace(std::static_pointer_cast(ptr)); + + return ptr; +} + +// Procedure: remove_observer +template +void Executor::remove_observer(std::shared_ptr ptr) { + + static_assert( + std::is_base_of_v, + "Observer must be derived from ObserverInterface" + ); + + _observers.erase(std::static_pointer_cast(ptr)); +} + +// Function: num_observers +inline size_t Executor::num_observers() const noexcept { + return _observers.size(); +} + +// Procedure: _schedule +inline void Executor::_schedule(Worker& worker, Node* node) { + + // We need to fetch p before the release such that the read + // operation is synchronized properly with other thread to + // void data race. + auto p = node->_priority; + + node->_state.fetch_or(Node::READY, std::memory_order_release); + + // caller is a worker to this pool - starting at v3.5 we do not use + // any complicated notification mechanism as the experimental result + // has shown no significant advantage. + if(worker._executor == this) { + worker._wsq.push(node, p); + _notifier.notify(false); + return; + } + + { + std::lock_guard lock(_wsq_mutex); + _wsq.push(node, p); + } + + _notifier.notify(false); +} + +// Procedure: _schedule +inline void Executor::_schedule(Node* node) { + + // We need to fetch p before the release such that the read + // operation is synchronized properly with other thread to + // void data race. + auto p = node->_priority; + + node->_state.fetch_or(Node::READY, std::memory_order_release); + + { + std::lock_guard lock(_wsq_mutex); + _wsq.push(node, p); + } + + _notifier.notify(false); +} + +// Procedure: _schedule +inline void Executor::_schedule(Worker& worker, const SmallVector& nodes) { + + // We need to cacth the node count to avoid accessing the nodes + // vector while the parent topology is removed! + const auto num_nodes = nodes.size(); + + if(num_nodes == 0) { + return; + } + + // caller is a worker to this pool - starting at v3.5 we do not use + // any complicated notification mechanism as the experimental result + // has shown no significant advantage. + if(worker._executor == this) { + for(size_t i=0; i_priority; + nodes[i]->_state.fetch_or(Node::READY, std::memory_order_release); + worker._wsq.push(nodes[i], p); + _notifier.notify(false); + } + return; + } + + { + std::lock_guard lock(_wsq_mutex); + for(size_t k=0; k_priority; + nodes[k]->_state.fetch_or(Node::READY, std::memory_order_release); + _wsq.push(nodes[k], p); + } + } + + _notifier.notify_n(num_nodes); +} + +// Procedure: _schedule +inline void Executor::_schedule(const SmallVector& nodes) { + + // parent topology may be removed! + const auto num_nodes = nodes.size(); + + if(num_nodes == 0) { + return; + } + + // We need to fetch p before the release such that the read + // operation is synchronized properly with other thread to + // void data race. + { + std::lock_guard lock(_wsq_mutex); + for(size_t k=0; k_priority; + nodes[k]->_state.fetch_or(Node::READY, std::memory_order_release); + _wsq.push(nodes[k], p); + } + } + + _notifier.notify_n(num_nodes); +} + +// Procedure: _invoke +inline void Executor::_invoke(Worker& worker, Node* node) { + + // synchronize all outstanding memory operations caused by reordering + while(!(node->_state.load(std::memory_order_acquire) & Node::READY)); + + begin_invoke: + + // no need to do other things if the topology is cancelled + if(node->_is_cancelled()) { + _tear_down_invoke(worker, node); + return; + } + + // if acquiring semaphore(s) exists, acquire them first + if(node->_semaphores && !node->_semaphores->to_acquire.empty()) { + SmallVector nodes; + if(!node->_acquire_all(nodes)) { + _schedule(worker, nodes); + return; + } + node->_state.fetch_or(Node::ACQUIRED, std::memory_order_release); + } + + // condition task + //int cond = -1; + SmallVector conds; + + // switch is faster than nested if-else due to jump table + switch(node->_handle.index()) { + // static task + case Node::STATIC:{ + _invoke_static_task(worker, node); + } + break; + + // dynamic task + case Node::DYNAMIC: { + _invoke_dynamic_task(worker, node); + } + break; + + // condition task + case Node::CONDITION: { + _invoke_condition_task(worker, node, conds); + } + break; + + // multi-condition task + case Node::MULTI_CONDITION: { + _invoke_multi_condition_task(worker, node, conds); + } + break; + + // module task + case Node::MODULE: { + _invoke_module_task(worker, node); + } + break; + + // async task + case Node::ASYNC: { + _invoke_async_task(worker, node); + _tear_down_async(node); + return ; + } + break; + + // dependent async task + case Node::DEPENDENT_ASYNC: { + _invoke_dependent_async_task(worker, node); + _tear_down_dependent_async(worker, node); + if(worker._cache) { + node = worker._cache; + goto begin_invoke; + } + return; + } + break; + + // monostate (placeholder) + default: + break; + } + + // if releasing semaphores exist, release them + if(node->_semaphores && !node->_semaphores->to_release.empty()) { + _schedule(worker, node->_release_all()); + } + + // Reset the join counter to support the cyclic control flow. + // + We must do this before scheduling the successors to avoid race + // condition on _dependents. + // + We must use fetch_add instead of direct assigning + // because the user-space call on "invoke" may explicitly schedule + // this task again (e.g., pipeline) which can access the join_counter. + if((node->_state.load(std::memory_order_relaxed) & Node::CONDITIONED)) { + node->_join_counter.fetch_add(node->num_strong_dependents(), std::memory_order_relaxed); + } + else { + node->_join_counter.fetch_add(node->num_dependents(), std::memory_order_relaxed); + } + + // acquire the parent flow counter + auto& j = (node->_parent) ? node->_parent->_join_counter : + node->_topology->_join_counter; + + // Here, we want to cache the latest successor with the highest priority + worker._cache = nullptr; + auto max_p = static_cast(TaskPriority::MAX); + + // Invoke the task based on the corresponding type + switch(node->_handle.index()) { + + // condition and multi-condition tasks + case Node::CONDITION: + case Node::MULTI_CONDITION: { + for(auto cond : conds) { + if(cond >= 0 && static_cast(cond) < node->_successors.size()) { + auto s = node->_successors[cond]; + // zeroing the join counter for invariant + s->_join_counter.store(0, std::memory_order_relaxed); + j.fetch_add(1, std::memory_order_relaxed); + if(s->_priority <= max_p) { + if(worker._cache) { + _schedule(worker, worker._cache); + } + worker._cache = s; + max_p = s->_priority; + } + else { + _schedule(worker, s); + } + } + } + } + break; + + // non-condition task + default: { + for(size_t i=0; i_successors.size(); ++i) { + //if(auto s = node->_successors[i]; --(s->_join_counter) == 0) { + if(auto s = node->_successors[i]; + s->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1) { + j.fetch_add(1, std::memory_order_relaxed); + if(s->_priority <= max_p) { + if(worker._cache) { + _schedule(worker, worker._cache); + } + worker._cache = s; + max_p = s->_priority; + } + else { + _schedule(worker, s); + } + } + } + } + break; + } + + // tear_down the invoke + _tear_down_invoke(worker, node); + + // perform tail recursion elimination for the right-most child to reduce + // the number of expensive pop/push operations through the task queue + if(worker._cache) { + node = worker._cache; + //node->_state.fetch_or(Node::READY, std::memory_order_release); + goto begin_invoke; + } +} + +// Proecdure: _tear_down_invoke +inline void Executor::_tear_down_invoke(Worker& worker, Node* node) { + // we must check parent first before substracting the join counter, + // or it can introduce data race + if(node->_parent == nullptr) { + if(node->_topology->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1) { + _tear_down_topology(worker, node->_topology); + } + } + // joined subflow + else { + node->_parent->_join_counter.fetch_sub(1, std::memory_order_release); + } +} + +// Procedure: _observer_prologue +inline void Executor::_observer_prologue(Worker& worker, Node* node) { + for(auto& observer : _observers) { + observer->on_entry(WorkerView(worker), TaskView(*node)); + } +} + +// Procedure: _observer_epilogue +inline void Executor::_observer_epilogue(Worker& worker, Node* node) { + for(auto& observer : _observers) { + observer->on_exit(WorkerView(worker), TaskView(*node)); + } +} + +// Procedure: _invoke_static_task +inline void Executor::_invoke_static_task(Worker& worker, Node* node) { + _observer_prologue(worker, node); + auto& work = std::get_if(&node->_handle)->work; + switch(work.index()) { + case 0: + std::get_if<0>(&work)->operator()(); + break; + + case 1: + Runtime rt(*this, worker, node); + std::get_if<1>(&work)->operator()(rt); + break; + } + _observer_epilogue(worker, node); +} + +// Procedure: _invoke_dynamic_task +inline void Executor::_invoke_dynamic_task(Worker& w, Node* node) { + + _observer_prologue(w, node); + + auto handle = std::get_if(&node->_handle); + + handle->subgraph._clear(); + + Subflow sf(*this, w, node, handle->subgraph); + + handle->work(sf); + + if(sf._joinable) { + _consume_graph(w, node, handle->subgraph); + } + + _observer_epilogue(w, node); +} + +// Procedure: _detach_dynamic_task +inline void Executor::_detach_dynamic_task( + Worker& w, Node* p, Graph& g +) { + + // graph is empty and has no async tasks + if(g.empty() && p->_join_counter.load(std::memory_order_acquire) == 0) { + return; + } + + SmallVector src; + + for(auto n : g._nodes) { + + n->_state.store(Node::DETACHED, std::memory_order_relaxed); + n->_set_up_join_counter(); + n->_topology = p->_topology; + n->_parent = nullptr; + + if(n->num_dependents() == 0) { + src.push_back(n); + } + } + + { + std::lock_guard lock(p->_topology->_taskflow._mutex); + p->_topology->_taskflow._graph._merge(std::move(g)); + } + + p->_topology->_join_counter.fetch_add(src.size(), std::memory_order_relaxed); + _schedule(w, src); +} + +// Procedure: _consume_graph +inline void Executor::_consume_graph(Worker& w, Node* p, Graph& g) { + + // graph is empty and has no async tasks + if(g.empty() && p->_join_counter.load(std::memory_order_acquire) == 0) { + return; + } + + SmallVector src; + + for(auto n : g._nodes) { + n->_state.store(0, std::memory_order_relaxed); + n->_set_up_join_counter(); + n->_topology = p->_topology; + n->_parent = p; + if(n->num_dependents() == 0) { + src.push_back(n); + } + } + p->_join_counter.fetch_add(src.size(), std::memory_order_relaxed); + + _schedule(w, src); + _corun_until(w, [p] () -> bool { return p->_join_counter.load(std::memory_order_acquire) == 0; }); +} + +// Procedure: _invoke_condition_task +inline void Executor::_invoke_condition_task( + Worker& worker, Node* node, SmallVector& conds +) { + _observer_prologue(worker, node); + auto& work = std::get_if(&node->_handle)->work; + switch(work.index()) { + case 0: + conds = { std::get_if<0>(&work)->operator()() }; + break; + + case 1: + Runtime rt(*this, worker, node); + conds = { std::get_if<1>(&work)->operator()(rt) }; + break; + } + _observer_epilogue(worker, node); +} + +// Procedure: _invoke_multi_condition_task +inline void Executor::_invoke_multi_condition_task( + Worker& worker, Node* node, SmallVector& conds +) { + _observer_prologue(worker, node); + auto& work = std::get_if(&node->_handle)->work; + switch(work.index()) { + case 0: + conds = std::get_if<0>(&work)->operator()(); + break; + + case 1: + Runtime rt(*this, worker, node); + conds = std::get_if<1>(&work)->operator()(rt); + break; + } + _observer_epilogue(worker, node); +} + +// Procedure: _invoke_module_task +inline void Executor::_invoke_module_task(Worker& w, Node* node) { + _observer_prologue(w, node); + _consume_graph( + w, node, std::get_if(&node->_handle)->graph + ); + _observer_epilogue(w, node); +} + +// Procedure: _invoke_async_task +inline void Executor::_invoke_async_task(Worker& w, Node* node) { + _observer_prologue(w, node); + std::get_if(&node->_handle)->work(); + _observer_epilogue(w, node); +} + +// Procedure: _invoke_dependent_async_task +inline void Executor::_invoke_dependent_async_task(Worker& w, Node* node) { + _observer_prologue(w, node); + std::get_if(&node->_handle)->work(); + _observer_epilogue(w, node); +} + +// Function: run +inline tf::Future Executor::run(Taskflow& f) { + return run_n(f, 1, [](){}); +} + +// Function: run +inline tf::Future Executor::run(Taskflow&& f) { + return run_n(std::move(f), 1, [](){}); +} + +// Function: run +template +tf::Future Executor::run(Taskflow& f, C&& c) { + return run_n(f, 1, std::forward(c)); +} + +// Function: run +template +tf::Future Executor::run(Taskflow&& f, C&& c) { + return run_n(std::move(f), 1, std::forward(c)); +} + +// Function: run_n +inline tf::Future Executor::run_n(Taskflow& f, size_t repeat) { + return run_n(f, repeat, [](){}); +} + +// Function: run_n +inline tf::Future Executor::run_n(Taskflow&& f, size_t repeat) { + return run_n(std::move(f), repeat, [](){}); +} + +// Function: run_n +template +tf::Future Executor::run_n(Taskflow& f, size_t repeat, C&& c) { + return run_until( + f, [repeat]() mutable { return repeat-- == 0; }, std::forward(c) + ); +} + +// Function: run_n +template +tf::Future Executor::run_n(Taskflow&& f, size_t repeat, C&& c) { + return run_until( + std::move(f), [repeat]() mutable { return repeat-- == 0; }, std::forward(c) + ); +} + +// Function: run_until +template +tf::Future Executor::run_until(Taskflow& f, P&& pred) { + return run_until(f, std::forward

(pred), [](){}); +} + +// Function: run_until +template +tf::Future Executor::run_until(Taskflow&& f, P&& pred) { + return run_until(std::move(f), std::forward

(pred), [](){}); +} + +// Function: run_until +template +tf::Future Executor::run_until(Taskflow& f, P&& p, C&& c) { + + _increment_topology(); + + // Need to check the empty under the lock since dynamic task may + // define detached blocks that modify the taskflow at the same time + bool empty; + { + std::lock_guard lock(f._mutex); + empty = f.empty(); + } + + // No need to create a real topology but returns an dummy future + if(empty || p()) { + c(); + std::promise promise; + promise.set_value(); + _decrement_topology_and_notify(); + return tf::Future(promise.get_future(), std::monostate{}); + } + + // create a topology for this run + auto t = std::make_shared(f, std::forward

(p), std::forward(c)); + + // need to create future before the topology got torn down quickly + tf::Future future(t->_promise.get_future(), t); + + // modifying topology needs to be protected under the lock + { + std::lock_guard lock(f._mutex); + f._topologies.push(t); + if(f._topologies.size() == 1) { + _set_up_topology(_this_worker(), t.get()); + } + } + + return future; +} + +// Function: run_until +template +tf::Future Executor::run_until(Taskflow&& f, P&& pred, C&& c) { + + std::list::iterator itr; + + { + std::scoped_lock lock(_taskflows_mutex); + itr = _taskflows.emplace(_taskflows.end(), std::move(f)); + itr->_satellite = itr; + } + + return run_until(*itr, std::forward

(pred), std::forward(c)); +} + +// Function: corun +template +void Executor::corun(T& target) { + + auto w = _this_worker(); + + if(w == nullptr) { + TF_THROW("corun must be called by a worker of the executor"); + } + + Node parent; // dummy parent + _consume_graph(*w, &parent, target.graph()); +} + +// Function: corun_until +template +void Executor::corun_until(P&& predicate) { + + auto w = _this_worker(); + + if(w == nullptr) { + TF_THROW("corun_until must be called by a worker of the executor"); + } + + _corun_until(*w, std::forward

(predicate)); +} + +// Procedure: _increment_topology +inline void Executor::_increment_topology() { + std::lock_guard lock(_topology_mutex); + ++_num_topologies; +} + +// Procedure: _decrement_topology_and_notify +inline void Executor::_decrement_topology_and_notify() { + std::lock_guard lock(_topology_mutex); + if(--_num_topologies == 0) { + _topology_cv.notify_all(); + } +} + +// Procedure: _decrement_topology +inline void Executor::_decrement_topology() { + std::lock_guard lock(_topology_mutex); + --_num_topologies; +} + +// Procedure: wait_for_all +inline void Executor::wait_for_all() { + std::unique_lock lock(_topology_mutex); + _topology_cv.wait(lock, [&](){ return _num_topologies == 0; }); +} + +// Function: _set_up_topology +inline void Executor::_set_up_topology(Worker* worker, Topology* tpg) { + + // ---- under taskflow lock ---- + + tpg->_sources.clear(); + tpg->_taskflow._graph._clear_detached(); + + // scan each node in the graph and build up the links + for(auto node : tpg->_taskflow._graph._nodes) { + + node->_topology = tpg; + node->_parent = nullptr; + node->_state.store(0, std::memory_order_relaxed); + + if(node->num_dependents() == 0) { + tpg->_sources.push_back(node); + } + + node->_set_up_join_counter(); + } + + tpg->_join_counter.store(tpg->_sources.size(), std::memory_order_relaxed); + + if(worker) { + _schedule(*worker, tpg->_sources); + } + else { + _schedule(tpg->_sources); + } +} + +// Function: _tear_down_topology +inline void Executor::_tear_down_topology(Worker& worker, Topology* tpg) { + + auto &f = tpg->_taskflow; + + //assert(&tpg == &(f._topologies.front())); + + // case 1: we still need to run the topology again + if(!tpg->_is_cancelled && !tpg->_pred()) { + //assert(tpg->_join_counter == 0); + std::lock_guard lock(f._mutex); + tpg->_join_counter.store(tpg->_sources.size(), std::memory_order_relaxed); + _schedule(worker, tpg->_sources); + } + // case 2: the final run of this topology + else { + + // TODO: if the topology is cancelled, need to release all semaphores + if(tpg->_call != nullptr) { + tpg->_call(); + } + + // If there is another run (interleave between lock) + if(std::unique_lock lock(f._mutex); f._topologies.size()>1) { + //assert(tpg->_join_counter == 0); + + // Set the promise + tpg->_promise.set_value(); + f._topologies.pop(); + tpg = f._topologies.front().get(); + + // decrement the topology but since this is not the last we don't notify + _decrement_topology(); + + // set up topology needs to be under the lock or it can + // introduce memory order error with pop + _set_up_topology(&worker, tpg); + } + else { + //assert(f._topologies.size() == 1); + + // Need to back up the promise first here becuz taskflow might be + // destroy soon after calling get + auto p {std::move(tpg->_promise)}; + + // Back up lambda capture in case it has the topology pointer, + // to avoid it releasing on pop_front ahead of _mutex.unlock & + // _promise.set_value. Released safely when leaving scope. + auto c {std::move(tpg->_call)}; + + // Get the satellite if any + auto s {f._satellite}; + + // Now we remove the topology from this taskflow + f._topologies.pop(); + + //f._mutex.unlock(); + lock.unlock(); + + // We set the promise in the end in case taskflow leaves the scope. + // After set_value, the caller will return from wait + p.set_value(); + + _decrement_topology_and_notify(); + + // remove the taskflow if it is managed by the executor + // TODO: in the future, we may need to synchronize on wait + // (which means the following code should the moved before set_value) + if(s) { + std::scoped_lock lock(_taskflows_mutex); + _taskflows.erase(*s); + } + } + } +} + +// ############################################################################ +// Forward Declaration: Subflow +// ############################################################################ + +inline void Subflow::join() { + + // assert(this_worker().worker == &_worker); + + if(!_joinable) { + TF_THROW("subflow not joinable"); + } + + // only the parent worker can join the subflow + _executor._consume_graph(_worker, _parent, _graph); + _joinable = false; +} + +inline void Subflow::detach() { + + // assert(this_worker().worker == &_worker); + + if(!_joinable) { + TF_THROW("subflow already joined or detached"); + } + + // only the parent worker can detach the subflow + _executor._detach_dynamic_task(_worker, _parent, _graph); + _joinable = false; +} + +// ############################################################################ +// Forward Declaration: Runtime +// ############################################################################ + +// Procedure: schedule +inline void Runtime::schedule(Task task) { + + auto node = task._node; + // need to keep the invariant: when scheduling a task, the task must have + // zero dependency (join counter is 0) + // or we can encounter bug when inserting a nested flow (e.g., module task) + node->_join_counter.store(0, std::memory_order_relaxed); + + auto& j = node->_parent ? node->_parent->_join_counter : + node->_topology->_join_counter; + j.fetch_add(1, std::memory_order_relaxed); + _executor._schedule(_worker, node); +} + +// Procedure: corun +template +void Runtime::corun(T&& target) { + + // dynamic task (subflow) + if constexpr(is_dynamic_task_v) { + Graph graph; + Subflow sf(_executor, _worker, _parent, graph); + target(sf); + if(sf._joinable) { + _executor._consume_graph(_worker, _parent, graph); + } + } + // a composable graph object with `tf::Graph& T::graph()` defined + else { + _executor._consume_graph(_worker, _parent, target.graph()); + } +} + +// Procedure: corun_until +template +void Runtime::corun_until(P&& predicate) { + _executor._corun_until(_worker, std::forward

(predicate)); +} + +// Function: _silent_async +template +void Runtime::_silent_async(Worker& w, const std::string& name, F&& f) { + + _parent->_join_counter.fetch_add(1, std::memory_order_relaxed); + + auto node = node_pool.animate( + name, 0, _parent->_topology, _parent, 0, + std::in_place_type_t{}, std::forward(f) + ); + + _executor._schedule(w, node); +} + +// Function: silent_async +template +void Runtime::silent_async(F&& f) { + _silent_async(*_executor._this_worker(), "", std::forward(f)); +} + +// Function: silent_async +template +void Runtime::silent_async(const std::string& name, F&& f) { + _silent_async(*_executor._this_worker(), name, std::forward(f)); +} + +// Function: silent_async_unchecked +template +void Runtime::silent_async_unchecked(const std::string& name, F&& f) { + _silent_async(_worker, name, std::forward(f)); +} + +// Function: _async +template +auto Runtime::_async(Worker& w, const std::string& name, F&& f) { + + _parent->_join_counter.fetch_add(1, std::memory_order_relaxed); + + using R = std::invoke_result_t>; + + std::promise p; + auto fu{p.get_future()}; + + auto node = node_pool.animate( + name, 0, _parent->_topology, _parent, 0, + std::in_place_type_t{}, + [p=make_moc(std::move(p)), f=std::forward(f)] () mutable { + if constexpr(std::is_same_v) { + f(); + p.object.set_value(); + } + else { + p.object.set_value(f()); + } + } + ); + + _executor._schedule(w, node); + + return fu; +} + +// Function: async +template +auto Runtime::async(F&& f) { + return _async(*_executor._this_worker(), "", std::forward(f)); +} + +// Function: async +template +auto Runtime::async(const std::string& name, F&& f) { + return _async(*_executor._this_worker(), name, std::forward(f)); +} + +// Function: join +inline void Runtime::join() { + corun_until([this] () -> bool { + return _parent->_join_counter.load(std::memory_order_acquire) == 0; + }); +} + +} // end of namespace tf ----------------------------------------------------- + + + + + + diff --git a/bundled/taskflow-3.6.0/include/core/flow_builder.hpp b/bundled/taskflow-3.6.0/include/core/flow_builder.hpp new file mode 100644 index 0000000000..3e90d8e74d --- /dev/null +++ b/bundled/taskflow-3.6.0/include/core/flow_builder.hpp @@ -0,0 +1,1352 @@ +#pragma once + +#include "task.hpp" +#include "../algorithm/partitioner.hpp" + +/** +@file flow_builder.hpp +@brief flow builder include file +*/ + +namespace tf { + +/** +@class FlowBuilder + +@brief class to build a task dependency graph + +The class provides essential methods to construct a task dependency graph +from which tf::Taskflow and tf::Subflow are derived. + +*/ +class FlowBuilder { + + friend class Executor; + + public: + + /** + @brief constructs a flow builder with a graph + */ + FlowBuilder(Graph& graph); + + /** + @brief creates a static task + + @tparam C callable type constructible from std::function + + @param callable callable to construct a static task + + @return a tf::Task handle + + The following example creates a static task. + + @code{.cpp} + tf::Task static_task = taskflow.emplace([](){}); + @endcode + + Please refer to @ref StaticTasking for details. + */ + template , void>* = nullptr + > + Task emplace(C&& callable); + + /** + @brief creates a dynamic task + + @tparam C callable type constructible from std::function + + @param callable callable to construct a dynamic task + + @return a tf::Task handle + + The following example creates a dynamic task (tf::Subflow) + that spawns two static tasks. + + @code{.cpp} + tf::Task dynamic_task = taskflow.emplace([](tf::Subflow& sf){ + tf::Task static_task1 = sf.emplace([](){}); + tf::Task static_task2 = sf.emplace([](){}); + }); + @endcode + + Please refer to @ref DynamicTasking for details. + */ + template , void>* = nullptr + > + Task emplace(C&& callable); + + /** + @brief creates a condition task + + @tparam C callable type constructible from std::function + + @param callable callable to construct a condition task + + @return a tf::Task handle + + The following example creates an if-else block using one condition task + and three static tasks. + + @code{.cpp} + tf::Taskflow taskflow; + + auto [init, cond, yes, no] = taskflow.emplace( + [] () { }, + [] () { return 0; }, + [] () { std::cout << "yes\n"; }, + [] () { std::cout << "no\n"; } + ); + + // executes yes if cond returns 0, or no if cond returns 1 + cond.precede(yes, no); + cond.succeed(init); + @endcode + + Please refer to @ref ConditionalTasking for details. + */ + template , void>* = nullptr + > + Task emplace(C&& callable); + + /** + @brief creates a multi-condition task + + @tparam C callable type constructible from + std::function()> + + @param callable callable to construct a multi-condition task + + @return a tf::Task handle + + The following example creates a multi-condition task that selectively + jumps to two successor tasks. + + @code{.cpp} + tf::Taskflow taskflow; + + auto [init, cond, branch1, branch2, branch3] = taskflow.emplace( + [] () { }, + [] () { return tf::SmallVector{0, 2}; }, + [] () { std::cout << "branch1\n"; }, + [] () { std::cout << "branch2\n"; }, + [] () { std::cout << "branch3\n"; } + ); + + // executes branch1 and branch3 when cond returns 0 and 2 + cond.precede(branch1, branch2, branch3); + cond.succeed(init); + @endcode + + Please refer to @ref ConditionalTasking for details. + */ + template , void>* = nullptr + > + Task emplace(C&& callable); + + /** + @brief creates multiple tasks from a list of callable objects + + @tparam C callable types + + @param callables one or multiple callable objects constructible from each task category + + @return a tf::Task handle + + The method returns a tuple of tasks each corresponding to the given + callable target. You can use structured binding to get the return tasks + one by one. + The following example creates four static tasks and assign them to + @c A, @c B, @c C, and @c D using structured binding. + + @code{.cpp} + auto [A, B, C, D] = taskflow.emplace( + [] () { std::cout << "A"; }, + [] () { std::cout << "B"; }, + [] () { std::cout << "C"; }, + [] () { std::cout << "D"; } + ); + @endcode + */ + template 1), void>* = nullptr> + auto emplace(C&&... callables); + + /** + @brief removes a task from a taskflow + + @param task task to remove + + Removes a task and its input and output dependencies from the graph + associated with the flow builder. + If the task does not belong to the graph, nothing will happen. + + @code{.cpp} + tf::Task A = taskflow.emplace([](){ std::cout << "A"; }); + tf::Task B = taskflow.emplace([](){ std::cout << "B"; }); + tf::Task C = taskflow.emplace([](){ std::cout << "C"; }); + tf::Task D = taskflow.emplace([](){ std::cout << "D"; }); + A.precede(B, C, D); + + // erase A from the taskflow and its dependencies to B, C, and D + taskflow.erase(A); + @endcode + */ + void erase(Task task); + + /** + @brief creates a module task for the target object + + @tparam T target object type + @param object a custom object that defines the method @c T::graph() + + @return a tf::Task handle + + The example below demonstrates a taskflow composition using + the @c composed_of method. + + @code{.cpp} + tf::Taskflow t1, t2; + t1.emplace([](){ std::cout << "t1"; }); + + // t2 is partially composed of t1 + tf::Task comp = t2.composed_of(t1); + tf::Task init = t2.emplace([](){ std::cout << "t2"; }); + init.precede(comp); + @endcode + + The taskflow object @c t2 is composed of another taskflow object @c t1, + preceded by another static task @c init. + When taskflow @c t2 is submitted to an executor, + @c init will run first and then @c comp which spwans its definition + in taskflow @c t1. + + The target @c object being composed must define the method + T::graph() that returns a reference to a graph object of + type tf::Graph such that it can interact with the executor. + For example: + + @code{.cpp} + // custom struct + struct MyObj { + tf::Graph graph; + MyObj() { + tf::FlowBuilder builder(graph); + tf::Task task = builder.emplace([](){ + std::cout << "a task\n"; // static task + }); + } + Graph& graph() { return graph; } + }; + + MyObj obj; + tf::Task comp = taskflow.composed_of(obj); + @endcode + + Please refer to @ref ComposableTasking for details. + */ + template + Task composed_of(T& object); + + /** + @brief creates a placeholder task + + @return a tf::Task handle + + A placeholder task maps to a node in the taskflow graph, but + it does not have any callable work assigned yet. + A placeholder task is different from an empty task handle that + does not point to any node in a graph. + + @code{.cpp} + // create a placeholder task with no callable target assigned + tf::Task placeholder = taskflow.placeholder(); + assert(placeholder.empty() == false && placeholder.has_work() == false); + + // create an empty task handle + tf::Task task; + assert(task.empty() == true); + + // assign the task handle to the placeholder task + task = placeholder; + assert(task.empty() == false && task.has_work() == false); + @endcode + */ + Task placeholder(); + + /** + @brief adds adjacent dependency links to a linear list of tasks + + @param tasks a vector of tasks + + This member function creates linear dependencies over a vector of tasks. + + @code{.cpp} + tf::Task A = taskflow.emplace([](){ std::cout << "A"; }); + tf::Task B = taskflow.emplace([](){ std::cout << "B"; }); + tf::Task C = taskflow.emplace([](){ std::cout << "C"; }); + tf::Task D = taskflow.emplace([](){ std::cout << "D"; }); + std::vector tasks {A, B, C, D} + taskflow.linearize(tasks); // A->B->C->D + @endcode + + */ + void linearize(std::vector& tasks); + + /** + @brief adds adjacent dependency links to a linear list of tasks + + @param tasks an initializer list of tasks + + This member function creates linear dependencies over a list of tasks. + + @code{.cpp} + tf::Task A = taskflow.emplace([](){ std::cout << "A"; }); + tf::Task B = taskflow.emplace([](){ std::cout << "B"; }); + tf::Task C = taskflow.emplace([](){ std::cout << "C"; }); + tf::Task D = taskflow.emplace([](){ std::cout << "D"; }); + taskflow.linearize({A, B, C, D}); // A->B->C->D + @endcode + */ + void linearize(std::initializer_list tasks); + + // ------------------------------------------------------------------------ + // parallel iterations + // ------------------------------------------------------------------------ + + /** + @brief constructs an STL-styled parallel-for task + + @tparam B beginning iterator type + @tparam E ending iterator type + @tparam C callable type + @tparam P partitioner type (default tf::GuidedPartitioner) + + @param first iterator to the beginning (inclusive) + @param last iterator to the end (exclusive) + @param callable callable object to apply to the dereferenced iterator + @param part partitioning algorithm to schedule parallel iterations + + @return a tf::Task handle + + The task spawns asynchronous tasks that applies the callable object to each object + obtained by dereferencing every iterator in the range [first, last). + This method is equivalent to the parallel execution of the following loop: + + @code{.cpp} + for(auto itr=first; itr!=last; itr++) { + callable(*itr); + } + @endcode + + Iterators are templated to enable stateful range using std::reference_wrapper. + The callable needs to take a single argument of + the dereferenced iterator type. + + Please refer to @ref ParallelIterations for details. + */ + template + Task for_each(B first, E last, C callable, P&& part = P()); + + /** + @brief constructs an STL-styled index-based parallel-for task + + @tparam B beginning index type (must be integral) + @tparam E ending index type (must be integral) + @tparam S step type (must be integral) + @tparam C callable type + @tparam P partitioner type (default tf::GuidedPartitioner) + + @param first index of the beginning (inclusive) + @param last index of the end (exclusive) + @param step step size + @param callable callable object to apply to each valid index + @param part partitioning algorithm to schedule parallel iterations + + @return a tf::Task handle + + The task spawns asynchronous tasks that applies the callable object to each index + in the range [first, last) with the step size. + This method is equivalent to the parallel execution of the following loop: + + @code{.cpp} + // case 1: step size is positive + for(auto i=first; ilast; i+=step) { + callable(i); + } + @endcode + + Iterators are templated to enable stateful range using std::reference_wrapper. + The callable needs to take a single argument of the integral index type. + + Please refer to @ref ParallelIterations for details. + */ + template + Task for_each_index( + B first, E last, S step, C callable, P&& part = P() + ); + + // ------------------------------------------------------------------------ + // transform + // ------------------------------------------------------------------------ + + /** + @brief constructs a parallel-transform task + + @tparam B beginning input iterator type + @tparam E ending input iterator type + @tparam O output iterator type + @tparam C callable type + @tparam P partitioner type (default tf::GuidedPartitioner) + + @param first1 iterator to the beginning of the first range + @param last1 iterator to the end of the first range + @param d_first iterator to the beginning of the output range + @param c an unary callable to apply to dereferenced input elements + @param part partitioning algorithm to schedule parallel iterations + + @return a tf::Task handle + + The task spawns asynchronous tasks that applies the callable object to an + input range and stores the result in another output range. + This method is equivalent to the parallel execution of the following loop: + + @code{.cpp} + while (first1 != last1) { + *d_first++ = c(*first1++); + } + @endcode + + Iterators are templated to enable stateful range using std::reference_wrapper. + The callable needs to take a single argument of the dereferenced + iterator type. + + Please refer to @ref ParallelTransforms for details. + */ + template < + typename B, typename E, typename O, typename C, typename P = GuidedPartitioner + > + Task transform(B first1, E last1, O d_first, C c, P&& part = P()); + + /** + @brief constructs a parallel-transform task + + @tparam B1 beginning input iterator type for the first input range + @tparam E1 ending input iterator type for the first input range + @tparam B2 beginning input iterator type for the first second range + @tparam O output iterator type + @tparam C callable type + @tparam P partitioner type (default tf::GuidedPartitioner) + + @param first1 iterator to the beginning of the first input range + @param last1 iterator to the end of the first input range + @param first2 iterator to the beginning of the second input range + @param d_first iterator to the beginning of the output range + @param c a binary operator to apply to dereferenced input elements + @param part partitioning algorithm to schedule parallel iterations + + @return a tf::Task handle + + The task spawns asynchronous tasks that applies the callable object to two + input ranges and stores the result in another output range. + This method is equivalent to the parallel execution of the following loop: + + @code{.cpp} + while (first1 != last1) { + *d_first++ = c(*first1++, *first2++); + } + @endcode + + Iterators are templated to enable stateful range using std::reference_wrapper. + The callable needs to take two arguments of dereferenced elements + from the two input ranges. + + Please refer to @ref ParallelTransforms for details. + */ + template < + typename B1, typename E1, typename B2, typename O, typename C, typename P=GuidedPartitioner, + std::enable_if_t>, void>* = nullptr + > + Task transform(B1 first1, E1 last1, B2 first2, O d_first, C c, P&& part = P()); + + // ------------------------------------------------------------------------ + // reduction + // ------------------------------------------------------------------------ + + /** + @brief constructs an STL-styled parallel-reduce task + + @tparam B beginning iterator type + @tparam E ending iterator type + @tparam T result type + @tparam O binary reducer type + @tparam P partitioner type (default tf::GuidedPartitioner) + + @param first iterator to the beginning (inclusive) + @param last iterator to the end (exclusive) + @param init initial value of the reduction and the storage for the reduced result + @param bop binary operator that will be applied + @param part partitioning algorithm to schedule parallel iterations + + @return a tf::Task handle + + The task spawns asynchronous tasks to perform parallel reduction over @c init + and the elements in the range [first, last). + The reduced result is store in @c init. + This method is equivalent to the parallel execution of the following loop: + + @code{.cpp} + for(auto itr=first; itr!=last; itr++) { + init = bop(init, *itr); + } + @endcode + + Iterators are templated to enable stateful range using std::reference_wrapper. + + Please refer to @ref ParallelReduction for details. + */ + template + Task reduce(B first, E last, T& init, O bop, P&& part = P()); + + // ------------------------------------------------------------------------ + // transfrom and reduction + // ------------------------------------------------------------------------ + + /** + @brief constructs an STL-styled parallel transform-reduce task + + @tparam B beginning iterator type + @tparam E ending iterator type + @tparam T result type + @tparam BOP binary reducer type + @tparam UOP unary transformion type + @tparam P partitioner type (default tf::GuidedPartitioner) + + @param first iterator to the beginning (inclusive) + @param last iterator to the end (exclusive) + @param init initial value of the reduction and the storage for the reduced result + @param bop binary operator that will be applied in unspecified order to the results of @c uop + @param uop unary operator that will be applied to transform each element in the range to the result type + @param part partitioning algorithm to schedule parallel iterations + + @return a tf::Task handle + + The task spawns asynchronous tasks to perform parallel reduction over @c init and + the transformed elements in the range [first, last). + The reduced result is store in @c init. + This method is equivalent to the parallel execution of the following loop: + + @code{.cpp} + for(auto itr=first; itr!=last; itr++) { + init = bop(init, uop(*itr)); + } + @endcode + + Iterators are templated to enable stateful range using std::reference_wrapper. + + Please refer to @ref ParallelReduction for details. + */ + template < + typename B, typename E, typename T, typename BOP, typename UOP, typename P = GuidedPartitioner + > + Task transform_reduce(B first, E last, T& init, BOP bop, UOP uop, P&& part = P()); + + // ------------------------------------------------------------------------ + // scan + // ------------------------------------------------------------------------ + + /** + @brief creates an STL-styled parallel inclusive-scan task + + @tparam B beginning iterator type + @tparam E ending iterator type + @tparam D destination iterator type + @tparam BOP summation operator type + + @param first start of input range + @param last end of input range + @param d_first start of output range (may be the same as input range) + @param bop function to perform summation + + Performs the cumulative sum (aka prefix sum, aka scan) of the input range + and writes the result to the output range. + Each element of the output range contains the + running total of all earlier elements using the given binary operator + for summation. + + This function generates an @em inclusive scan, meaning that the N-th element + of the output range is the sum of the first N input elements, + so the N-th input element is included. + + @code{.cpp} + std::vector input = {1, 2, 3, 4, 5}; + taskflow.inclusive_scan( + input.begin(), input.end(), input.begin(), std::plus{} + ); + executor.run(taskflow).wait(); + + // input is {1, 3, 6, 10, 15} + @endcode + + Iterators are templated to enable stateful range using std::reference_wrapper. + + Please refer to @ref ParallelScan for details. + */ + template + Task inclusive_scan(B first, E last, D d_first, BOP bop); + + /** + @brief creates an STL-styled parallel inclusive-scan task with an initial value + + @tparam B beginning iterator type + @tparam E ending iterator type + @tparam D destination iterator type + @tparam BOP summation operator type + @tparam T initial value type + + @param first start of input range + @param last end of input range + @param d_first start of output range (may be the same as input range) + @param bop function to perform summation + @param init initial value + + Performs the cumulative sum (aka prefix sum, aka scan) of the input range + and writes the result to the output range. + Each element of the output range contains the + running total of all earlier elements (and the initial value) + using the given binary operator for summation. + + This function generates an @em inclusive scan, meaning the N-th element + of the output range is the sum of the first N input elements, + so the N-th input element is included. + + @code{.cpp} + std::vector input = {1, 2, 3, 4, 5}; + taskflow.inclusive_scan( + input.begin(), input.end(), input.begin(), std::plus{}, -1 + ); + executor.run(taskflow).wait(); + + // input is {0, 2, 5, 9, 14} + @endcode + + Iterators are templated to enable stateful range using std::reference_wrapper. + + Please refer to @ref ParallelScan for details. + + */ + template + Task inclusive_scan(B first, E last, D d_first, BOP bop, T init); + + /** + @brief creates an STL-styled parallel exclusive-scan task + + @tparam B beginning iterator type + @tparam E ending iterator type + @tparam D destination iterator type + @tparam T initial value type + @tparam BOP summation operator type + + @param first start of input range + @param last end of input range + @param d_first start of output range (may be the same as input range) + @param init initial value + @param bop function to perform summation + + Performs the cumulative sum (aka prefix sum, aka scan) of the input range + and writes the result to the output range. + Each element of the output range contains the + running total of all earlier elements (and the initial value) + using the given binary operator for summation. + + This function generates an @em exclusive scan, meaning the N-th element + of the output range is the sum of the first N-1 input elements, + so the N-th input element is not included. + + @code{.cpp} + std::vector input = {1, 2, 3, 4, 5}; + taskflow.exclusive_scan( + input.begin(), input.end(), input.begin(), -1, std::plus{} + ); + executor.run(taskflow).wait(); + + // input is {-1, 0, 2, 5, 9} + @endcode + + Iterators are templated to enable stateful range using std::reference_wrapper. + + Please refer to @ref ParallelScan for details. + */ + template + Task exclusive_scan(B first, E last, D d_first, T init, BOP bop); + + // ------------------------------------------------------------------------ + // transform scan + // ------------------------------------------------------------------------ + + /** + @brief creates an STL-styled parallel transform-inclusive scan task + + @tparam B beginning iterator type + @tparam E ending iterator type + @tparam D destination iterator type + @tparam BOP summation operator type + @tparam UOP transform operator type + + @param first start of input range + @param last end of input range + @param d_first start of output range (may be the same as input range) + @param bop function to perform summation + @param uop function to transform elements of the input range + + Write the cumulative sum (aka prefix sum, aka scan) of the input range + to the output range. Each element of the output range contains the + running total of all earlier elements + using @c uop to transform the input elements + and using @c bop for summation. + + This function generates an @em inclusive scan, meaning the Nth element + of the output range is the sum of the first N input elements, + so the Nth input element is included. + + @code{.cpp} + std::vector input = {1, 2, 3, 4, 5}; + taskflow.transform_inclusive_scan( + input.begin(), input.end(), input.begin(), std::plus{}, + [] (int item) { return -item; } + ); + executor.run(taskflow).wait(); + + // input is {-1, -3, -6, -10, -15} + @endcode + + Iterators are templated to enable stateful range using std::reference_wrapper. + + Please refer to @ref ParallelScan for details. + */ + template + Task transform_inclusive_scan(B first, E last, D d_first, BOP bop, UOP uop); + + /** + @brief creates an STL-styled parallel transform-inclusive scan task + + @tparam B beginning iterator type + @tparam E ending iterator type + @tparam D destination iterator type + @tparam BOP summation operator type + @tparam UOP transform operator type + @tparam T initial value type + + @param first start of input range + @param last end of input range + @param d_first start of output range (may be the same as input range) + @param bop function to perform summation + @param uop function to transform elements of the input range + @param init initial value + + Write the cumulative sum (aka prefix sum, aka scan) of the input range + to the output range. Each element of the output range contains the + running total of all earlier elements (including an initial value) + using @c uop to transform the input elements + and using @c bop for summation. + + This function generates an @em inclusive scan, meaning the Nth element + of the output range is the sum of the first N input elements, + so the Nth input element is included. + + @code{.cpp} + std::vector input = {1, 2, 3, 4, 5}; + taskflow.transform_inclusive_scan( + input.begin(), input.end(), input.begin(), std::plus{}, + [] (int item) { return -item; }, + -1 + ); + executor.run(taskflow).wait(); + + // input is {-2, -4, -7, -11, -16} + @endcode + + Iterators are templated to enable stateful range using std::reference_wrapper. + + Please refer to @ref ParallelScan for details. + */ + template + Task transform_inclusive_scan(B first, E last, D d_first, BOP bop, UOP uop, T init); + + /** + @brief creates an STL-styled parallel transform-exclusive scan task + + @tparam B beginning iterator type + @tparam E ending iterator type + @tparam D destination iterator type + @tparam BOP summation operator type + @tparam UOP transform operator type + @tparam T initial value type + + @param first start of input range + @param last end of input range + @param d_first start of output range (may be the same as input range) + @param bop function to perform summation + @param uop function to transform elements of the input range + @param init initial value + + Write the cumulative sum (aka prefix sum, aka scan) of the input range + to the output range. Each element of the output range contains the + running total of all earlier elements (including an initial value) + using @c uop to transform the input elements + and using @c bop for summation. + + This function generates an @em exclusive scan, meaning the Nth element + of the output range is the sum of the first N-1 input elements, + so the Nth input element is not included. + + @code{.cpp} + std::vector input = {1, 2, 3, 4, 5}; + taskflow.transform_exclusive_scan( + input.begin(), input.end(), input.begin(), -1, std::plus{}, + [](int item) { return -item; } + ); + executor.run(taskflow).wait(); + + // input is {-1, -2, -4, -7, -11} + @endcode + + Iterators are templated to enable stateful range using std::reference_wrapper. + + Please refer to @ref ParallelScan for details. + */ + template + Task transform_exclusive_scan(B first, E last, D d_first, T init, BOP bop, UOP uop); + + // ------------------------------------------------------------------------ + // find + // ------------------------------------------------------------------------ + + /** + @brief constructs a task to perform STL-styled find-if algorithm + + @tparam B beginning iterator type + @tparam E ending iterator type + @tparam T resulting iterator type + @tparam UOP unary predicate type + @tparam P partitioner type + + @param first start of the input range + @param last end of the input range + @param result resulting iterator to the found element in the input range + @param predicate unary predicate which returns @c true for the required element + @param part partitioning algorithm (default tf::GuidedPartitioner) + + Returns an iterator to the first element in the range [first, last) + that satisfies the given criteria (or last if there is no such iterator). + This method is equivalent to the parallel execution of the following loop: + + @code{.cpp} + auto find_if(InputIt first, InputIt last, UnaryPredicate p) { + for (; first != last; ++first) { + if (predicate(*first)){ + return first; + } + } + return last; + } + @endcode + + For example, the code below find the element that satisfies the given + criteria (value plus one is equal to 23) from an input range of 10 elements: + + @code{.cpp} + std::vector input = {1, 6, 9, 10, 22, 5, 7, 8, 9, 11}; + std::vector::iterator result; + taskflow.find_if( + input.begin(), input.end(), [](int i){ return i+1 = 23; }, result + ); + executor.run(taskflow).wait(); + assert(*result == 22); + @endcode + + Iterators are templated to enable stateful range using std::reference_wrapper. + */ + template + Task find_if(B first, E last, T& result, UOP predicate, P&& part = P()); + + /** + @brief constructs a task to perform STL-styled find-if-not algorithm + + @tparam B beginning iterator type + @tparam E ending iterator type + @tparam T resulting iterator type + @tparam UOP unary predicate type + @tparam P partitioner type + + @param first start of the input range + @param last end of the input range + @param result resulting iterator to the found element in the input range + @param predicate unary predicate which returns @c false for the required element + @param part partitioning algorithm (default tf::GuidedPartitioner) + + Returns an iterator to the first element in the range [first, last) + that satisfies the given criteria (or last if there is no such iterator). + This method is equivalent to the parallel execution of the following loop: + + @code{.cpp} + auto find_if(InputIt first, InputIt last, UnaryPredicate p) { + for (; first != last; ++first) { + if (!predicate(*first)){ + return first; + } + } + return last; + } + @endcode + + For example, the code below find the element that satisfies the given + criteria (value is not equal to 1) from an input range of 10 elements: + + @code{.cpp} + std::vector input = {1, 1, 1, 1, 22, 1, 1, 1, 1, 1}; + std::vector::iterator result; + taskflow.find_if_not( + input.begin(), input.end(), [](int i){ return i == 1; }, result + ); + executor.run(taskflow).wait(); + assert(*result == 22); + @endcode + + Iterators are templated to enable stateful range using std::reference_wrapper. + */ + template + Task find_if_not(B first, E last, T& result, UOP predicate, P&& part = P()); + + /** + @brief constructs a task to perform STL-styled min-element algorithm + + @tparam B beginning iterator type + @tparam E ending iterator type + @tparam T resulting iterator type + @tparam C comparator type + @tparam P partitioner type + + @param first start of the input range + @param last end of the input range + @param result resulting iterator to the found element in the input range + @param comp comparison function object + @param part partitioning algorithm (default tf::GuidedPartitioner) + + Finds the smallest element in the [first, last) + using the given comparison function object. + The iterator to that smallest element is stored in @c result. + This method is equivalent to the parallel execution of the following loop: + + @code{.cpp} + if (first == last) { + return last; + } + auto smallest = first; + ++first; + for (; first != last; ++first) { + if (comp(*first, *smallest)) { + smallest = first; + } + } + return smallest; + @endcode + + For example, the code below find the smallest element from an input + range of 10 elements. + + @code{.cpp} + std::vector input = {1, 1, 1, 1, 1, -1, 1, 1, 1, 1}; + std::vector::iterator result; + taskflow.min_element( + input.begin(), input.end(), std::less(), result + ); + executor.run(taskflow).wait(); + assert(*result == -1); + @endcode + + Iterators are templated to enable stateful range using std::reference_wrapper. + */ + template + Task min_element(B first, E last, T& result, C comp, P&& part); + + /** + @brief constructs a task to perform STL-styled max-element algorithm + + @tparam B beginning iterator type + @tparam E ending iterator type + @tparam T resulting iterator type + @tparam C comparator type + @tparam P partitioner type + + @param first start of the input range + @param last end of the input range + @param result resulting iterator to the found element in the input range + @param comp comparison function object + @param part partitioning algorithm (default tf::GuidedPartitioner) + + Finds the largest element in the [first, last) + using the given comparison function object. + The iterator to that largest element is stored in @c result. + This method is equivalent to the parallel execution of the following loop: + + @code{.cpp} + if (first == last){ + return last; + } + auto largest = first; + ++first; + for (; first != last; ++first) { + if (comp(*largest, *first)) { + largest = first; + } + } + return largest; + @endcode + + For example, the code below find the largest element from an input + range of 10 elements. + + @code{.cpp} + std::vector input = {1, 1, 1, 1, 1, 2, 1, 1, 1, 1}; + std::vector::iterator result; + taskflow.max_element( + input.begin(), input.end(), std::less(), result + ); + executor.run(taskflow).wait(); + assert(*result == 2); + @endcode + + Iterators are templated to enable stateful range using std::reference_wrapper. + */ + template + Task max_element(B first, E last, T& result, C comp, P&& part); + + // ------------------------------------------------------------------------ + // sort + // ------------------------------------------------------------------------ + + /** + @brief constructs a dynamic task to perform STL-styled parallel sort + + @tparam B beginning iterator type (random-accessible) + @tparam E ending iterator type (random-accessible) + @tparam C comparator type + + @param first iterator to the beginning (inclusive) + @param last iterator to the end (exclusive) + @param cmp comparison operator + + The task spawns asynchronous tasks to sort elements in the range + [first, last) in parallel. + + Iterators are templated to enable stateful range using std::reference_wrapper. + + Please refer to @ref ParallelSort for details. + */ + template + Task sort(B first, E last, C cmp); + + /** + @brief constructs a dynamic task to perform STL-styled parallel sort using + the @c std::less comparator, where @c T is the element type + + @tparam B beginning iterator type (random-accessible) + @tparam E ending iterator type (random-accessible) + + @param first iterator to the beginning (inclusive) + @param last iterator to the end (exclusive) + + The task spawns asynchronous tasks to parallelly sort elements in the range + [first, last) using the @c std::less comparator, + where @c T is the dereferenced iterator type. + + Iterators are templated to enable stateful range using std::reference_wrapper. + + Please refer to @ref ParallelSort for details. + */ + template + Task sort(B first, E last); + + protected: + + /** + @brief associated graph object + */ + Graph& _graph; + + private: + + template + void _linearize(L&); +}; + +// Constructor +inline FlowBuilder::FlowBuilder(Graph& graph) : + _graph {graph} { +} + +// Function: emplace +template , void>*> +Task FlowBuilder::emplace(C&& c) { + return Task(_graph._emplace_back("", 0, nullptr, nullptr, 0, + std::in_place_type_t{}, std::forward(c) + )); +} + +// Function: emplace +template , void>*> +Task FlowBuilder::emplace(C&& c) { + return Task(_graph._emplace_back("", 0, nullptr, nullptr, 0, + std::in_place_type_t{}, std::forward(c) + )); +} + +// Function: emplace +template , void>*> +Task FlowBuilder::emplace(C&& c) { + return Task(_graph._emplace_back("", 0, nullptr, nullptr, 0, + std::in_place_type_t{}, std::forward(c) + )); +} + +// Function: emplace +template , void>*> +Task FlowBuilder::emplace(C&& c) { + return Task(_graph._emplace_back("", 0, nullptr, nullptr, 0, + std::in_place_type_t{}, std::forward(c) + )); +} + +// Function: emplace +template 1), void>*> +auto FlowBuilder::emplace(C&&... cs) { + return std::make_tuple(emplace(std::forward(cs))...); +} + +// Function: erase +inline void FlowBuilder::erase(Task task) { + + if (!task._node) { + return; + } + + task.for_each_dependent([&] (Task dependent) { + auto& S = dependent._node->_successors; + if(auto I = std::find(S.begin(), S.end(), task._node); I != S.end()) { + S.erase(I); + } + }); + + task.for_each_successor([&] (Task dependent) { + auto& D = dependent._node->_dependents; + if(auto I = std::find(D.begin(), D.end(), task._node); I != D.end()) { + D.erase(I); + } + }); + + _graph._erase(task._node); +} + +// Function: composed_of +template +Task FlowBuilder::composed_of(T& object) { + auto node = _graph._emplace_back("", 0, nullptr, nullptr, 0, + std::in_place_type_t{}, object + ); + return Task(node); +} + +// Function: placeholder +inline Task FlowBuilder::placeholder() { + auto node = _graph._emplace_back("", 0, nullptr, nullptr, 0, + std::in_place_type_t{} + ); + return Task(node); +} + +// Procedure: _linearize +template +void FlowBuilder::_linearize(L& keys) { + + auto itr = keys.begin(); + auto end = keys.end(); + + if(itr == end) { + return; + } + + auto nxt = itr; + + for(++nxt; nxt != end; ++nxt, ++itr) { + itr->_node->_precede(nxt->_node); + } +} + +// Procedure: linearize +inline void FlowBuilder::linearize(std::vector& keys) { + _linearize(keys); +} + +// Procedure: linearize +inline void FlowBuilder::linearize(std::initializer_list keys) { + _linearize(keys); +} + +// ---------------------------------------------------------------------------- + +/** +@class Subflow + +@brief class to construct a subflow graph from the execution of a dynamic task + +tf::Subflow is a derived class from tf::Runtime with a specialized mechanism +to manage the execution of a child graph. +By default, a subflow automatically @em joins its parent node. +You may explicitly join or detach a subflow by calling tf::Subflow::join +or tf::Subflow::detach, respectively. +The following example creates a taskflow graph that spawns a subflow from +the execution of task @c B, and the subflow contains three tasks, @c B1, +@c B2, and @c B3, where @c B3 runs after @c B1 and @c B2. + +@code{.cpp} +// create three static tasks +tf::Task A = taskflow.emplace([](){}).name("A"); +tf::Task C = taskflow.emplace([](){}).name("C"); +tf::Task D = taskflow.emplace([](){}).name("D"); + +// create a subflow graph (dynamic tasking) +tf::Task B = taskflow.emplace([] (tf::Subflow& subflow) { + tf::Task B1 = subflow.emplace([](){}).name("B1"); + tf::Task B2 = subflow.emplace([](){}).name("B2"); + tf::Task B3 = subflow.emplace([](){}).name("B3"); + B1.precede(B3); + B2.precede(B3); +}).name("B"); + +A.precede(B); // B runs after A +A.precede(C); // C runs after A +B.precede(D); // D runs after B +C.precede(D); // D runs after C +@endcode + +*/ +class Subflow : public FlowBuilder, + public Runtime { + + friend class Executor; + friend class FlowBuilder; + friend class Runtime; + + public: + + /** + @brief enables the subflow to join its parent task + + Performs an immediate action to join the subflow. Once the subflow is joined, + it is considered finished and you may not modify the subflow anymore. + + @code{.cpp} + taskflow.emplace([](tf::Subflow& sf){ + sf.emplace([](){}); + sf.join(); // join the subflow of one task + }); + @endcode + + Only the worker that spawns this subflow can join it. + */ + void join(); + + /** + @brief enables the subflow to detach from its parent task + + Performs an immediate action to detach the subflow. Once the subflow is detached, + it is considered finished and you may not modify the subflow anymore. + + @code{.cpp} + taskflow.emplace([](tf::Subflow& sf){ + sf.emplace([](){}); + sf.detach(); + }); + @endcode + + Only the worker that spawns this subflow can detach it. + */ + void detach(); + + /** + @brief resets the subflow to a joinable state + + @param clear_graph specifies whether to clear the associated graph (default @c true) + + Clears the underlying task graph depending on the + given variable @c clear_graph (default @c true) and then + updates the subflow to a joinable state. + */ + void reset(bool clear_graph = true); + + /** + @brief queries if the subflow is joinable + + This member function queries if the subflow is joinable. + When a subflow is joined or detached, it becomes not joinable. + + @code{.cpp} + taskflow.emplace([](tf::Subflow& sf){ + sf.emplace([](){}); + std::cout << sf.joinable() << '\n'; // true + sf.join(); + std::cout << sf.joinable() << '\n'; // false + }); + @endcode + */ + bool joinable() const noexcept; + + private: + + bool _joinable {true}; + + Subflow(Executor&, Worker&, Node*, Graph&); +}; + +// Constructor +inline Subflow::Subflow( + Executor& executor, Worker& worker, Node* parent, Graph& graph +) : + FlowBuilder {graph}, + Runtime {executor, worker, parent} { + // assert(_parent != nullptr); +} + +// Function: joined +inline bool Subflow::joinable() const noexcept { + return _joinable; +} + +// Procedure: reset +inline void Subflow::reset(bool clear_graph) { + if(clear_graph) { + _graph._clear(); + } + _joinable = true; +} + +} // end of namespace tf. --------------------------------------------------- + + + + + + + + + + diff --git a/bundled/taskflow-3.6.0/include/core/graph.hpp b/bundled/taskflow-3.6.0/include/core/graph.hpp new file mode 100644 index 0000000000..475422d082 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/core/graph.hpp @@ -0,0 +1,998 @@ +#pragma once + +#include "../utility/traits.hpp" +#include "../utility/iterator.hpp" +#include "../utility/object_pool.hpp" +#include "../utility/os.hpp" +#include "../utility/math.hpp" +#include "../utility/small_vector.hpp" +#include "../utility/serializer.hpp" +#include "error.hpp" +#include "declarations.hpp" +#include "semaphore.hpp" +#include "environment.hpp" +#include "topology.hpp" +#include "tsq.hpp" + +/** +@file graph.hpp +@brief graph include file +*/ + +namespace tf { + +// ---------------------------------------------------------------------------- +// Class: Graph +// ---------------------------------------------------------------------------- + +/** +@class Graph + +@brief class to create a graph object + +A graph is the ultimate storage for a task dependency graph and is the main +gateway to interact with an executor. +A graph manages a set of nodes in a global object pool that animates and +recycles node objects efficiently without going through repetitive and +expensive memory allocations and deallocations. +This class is mainly used for creating an opaque graph object in a custom +class to interact with the executor through taskflow composition. + +A graph object is move-only. +*/ +class Graph { + + friend class Node; + friend class FlowBuilder; + friend class Subflow; + friend class Taskflow; + friend class Executor; + + public: + + /** + @brief constructs a graph object + */ + Graph() = default; + + /** + @brief disabled copy constructor + */ + Graph(const Graph&) = delete; + + /** + @brief constructs a graph using move semantics + */ + Graph(Graph&&); + + /** + @brief destructs the graph object + */ + ~Graph(); + + /** + @brief disabled copy assignment operator + */ + Graph& operator = (const Graph&) = delete; + + /** + @brief assigns a graph using move semantics + */ + Graph& operator = (Graph&&); + + /** + @brief queries if the graph is empty + */ + bool empty() const; + + /** + @brief queries the number of nodes in the graph + */ + size_t size() const; + + /** + @brief clears the graph + */ + void clear(); + + private: + + std::vector _nodes; + + void _clear(); + void _clear_detached(); + void _merge(Graph&&); + void _erase(Node*); + + /** + @private + */ + template + Node* _emplace_back(ArgsT&&...); +}; + +// ---------------------------------------------------------------------------- + +/** +@class Runtime + +@brief class to include a runtime object in a task + +A runtime object allows users to interact with the +scheduling runtime inside a task, such as scheduling an active task, +spawning a subflow, and so on. + +@code{.cpp} +tf::Task A, B, C, D; +std::tie(A, B, C, D) = taskflow.emplace( + [] () { return 0; }, + [&C] (tf::Runtime& rt) { // C must be captured by reference + std::cout << "B\n"; + rt.schedule(C); + }, + [] () { std::cout << "C\n"; }, + [] () { std::cout << "D\n"; } +); +A.precede(B, C, D); +executor.run(taskflow).wait(); +@endcode + +A runtime object is associated with the worker and the executor +that runs the task. + +*/ +class Runtime { + + friend class Executor; + friend class FlowBuilder; + + public: + + /** + @brief obtains the running executor + + The running executor of a runtime task is the executor that runs + the parent taskflow of that runtime task. + + @code{.cpp} + tf::Executor executor; + tf::Taskflow taskflow; + taskflow.emplace([&](tf::Runtime& rt){ + assert(&(rt.executor()) == &executor); + }); + executor.run(taskflow).wait(); + @endcode + */ + Executor& executor(); + + /** + @brief schedules an active task immediately to the worker's queue + + @param task the given active task to schedule immediately + + This member function immediately schedules an active task to the + task queue of the associated worker in the runtime task. + An active task is a task in a running taskflow. + The task may or may not be running, and scheduling that task + will immediately put the task into the task queue of the worker + that is running the runtime task. + Consider the following example: + + @code{.cpp} + tf::Task A, B, C, D; + std::tie(A, B, C, D) = taskflow.emplace( + [] () { return 0; }, + [&C] (tf::Runtime& rt) { // C must be captured by reference + std::cout << "B\n"; + rt.schedule(C); + }, + [] () { std::cout << "C\n"; }, + [] () { std::cout << "D\n"; } + ); + A.precede(B, C, D); + executor.run(taskflow).wait(); + @endcode + + The executor will first run the condition task @c A which returns @c 0 + to inform the scheduler to go to the runtime task @c B. + During the execution of @c B, it directly schedules task @c C without + going through the normal taskflow graph scheduling process. + At this moment, task @c C is active because its parent taskflow is running. + When the taskflow finishes, we will see both @c B and @c C in the output. + */ + void schedule(Task task); + + /** + @brief runs the given callable asynchronously + + @tparam F callable type + @param f callable object + + The method creates an asynchronous task to launch the given + function on the given arguments. + The difference to tf::Executor::async is that the created asynchronous task + pertains to the runtime. + When the runtime joins, all asynchronous tasks created from the runtime + are guaranteed to finish after the join returns. + For example: + + @code{.cpp} + std::atomic counter(0); + taskflow.emplace([&](tf::Runtime& rt){ + auto fu1 = rt.async([&](){ counter++; }); + auto fu2 = rt.async([&](){ counter++; }); + fu1.get(); + fu2.get(); + assert(counter == 2); + + // spawn 100 asynchronous tasks from the worker of the runtime + for(int i=0; i<100; i++) { + rt.async([&](){ counter++; }); + } + + // explicit join 100 asynchronous tasks + rt.join(); + assert(counter == 102); + }); + @endcode + + This method is thread-safe and can be called by multiple workers + that hold the reference to the runtime. + For example, the code below spawns 100 tasks from the worker of + a runtime, and each of the 100 tasks spawns another task + that will be run by another worker. + + @code{.cpp} + std::atomic counter(0); + taskflow.emplace([&](tf::Runtime& rt){ + // worker of the runtime spawns 100 tasks each spawning another task + // that will be run by another worker + for(int i=0; i<100; i++) { + rt.async([&](){ + counter++; + rt.async([](){ counter++; }); + }); + } + + // explicit join 100 asynchronous tasks + rt.join(); + assert(counter == 200); + }); + @endcode + */ + template + auto async(F&& f); + + /** + @brief similar to tf::Runtime::async but assigns the task a name + + @tparam F callable type + + @param name assigned name to the task + @param f callable + + @code{.cpp} + taskflow.emplace([&](tf::Runtime& rt){ + auto future = rt.async("my task", [](){}); + future.get(); + }); + @endcode + + */ + template + auto async(const std::string& name, F&& f); + + /** + @brief runs the given function asynchronously without returning any future object + + @tparam F callable type + @param f callable + + This member function is more efficient than tf::Runtime::async + and is encouraged to use when there is no data returned. + + @code{.cpp} + std::atomic counter(0); + taskflow.emplace([&](tf::Runtime& rt){ + for(int i=0; i<100; i++) { + rt.silent_async([&](){ counter++; }); + } + rt.join(); + assert(counter == 100); + }); + @endcode + + This member function is thread-safe. + */ + template + void silent_async(F&& f); + + /** + @brief similar to tf::Runtime::silent_async but assigns the task a name + + @tparam F callable type + @param name assigned name to the task + @param f callable + + @code{.cpp} + taskflow.emplace([&](tf::Runtime& rt){ + rt.silent_async("my task", [](){}); + rt.join(); + }); + @endcode + */ + template + void silent_async(const std::string& name, F&& f); + + /** + @brief similar to tf::Runtime::silent_async but the caller must be the worker of the runtime + + @tparam F callable type + + @param name assigned name to the task + @param f callable + + The method bypass the check of the caller worker from the executor + and thus can only called by the worker of this runtime. + + @code{.cpp} + taskflow.emplace([&](tf::Runtime& rt){ + // running by the worker of this runtime + rt.silent_async_unchecked("my task", [](){}); + rt.join(); + }); + @endcode + */ + template + void silent_async_unchecked(const std::string& name, F&& f); + + /** + @brief co-runs the given target and waits until it completes + + A target can be one of the following forms: + + a dynamic task to spawn a subflow or + + a composable graph object with `tf::Graph& T::graph()` defined + + @code{.cpp} + // co-run a subflow and wait until all tasks complete + taskflow.emplace([](tf::Runtime& rt){ + rt.corun([](tf::Subflow& sf){ + tf::Task A = sf.emplace([](){}); + tf::Task B = sf.emplace([](){}); + }); + }); + + // co-run a taskflow and wait until all tasks complete + tf::Taskflow taskflow1, taskflow2; + taskflow1.emplace([](){ std::cout << "running taskflow1\n"; }); + taskflow2.emplace([&](tf::Runtime& rt){ + std::cout << "running taskflow2\n"; + rt.corun(taskflow1); + }); + executor.run(taskflow2).wait(); + @endcode + + Although tf::Runtime::corun blocks until the operation completes, + the caller thread (worker) is not blocked (e.g., sleeping or holding any lock). + Instead, the caller thread joins the work-stealing loop of the executor + and returns when all tasks in the target completes. + */ + template + void corun(T&& target); + + /** + @brief keeps running the work-stealing loop until the predicate becomes true + + @tparam P predicate type + @param predicate a boolean predicate to indicate when to stop the loop + + The method keeps the caller worker running in the work-stealing loop + until the stop predicate becomes true. + */ + template + void corun_until(P&& predicate); + + /** + @brief joins all asynchronous tasks spawned by this runtime + + Immediately joins all asynchronous tasks (tf::Runtime::async, + tf::Runtime::silent_async). + Unlike tf::Subflow::join, you can join multiples times from + a tf::Runtime object. + + @code{.cpp} + std::atomic counter{0}; + taskflow.emplace([&](tf::Runtime& rt){ + // spawn 100 async tasks and join + for(int i=0; i<100; i++) { + rt.silent_async([&](){ counter++; }); + } + rt.join(); + assert(counter == 100); + + // spawn another 100 async tasks and join + for(int i=0; i<100; i++) { + rt.silent_async([&](){ counter++; }); + } + rt.join(); + assert(counter == 200); + }); + @endcode + + @attention + Only the worker of this tf::Runtime can issue join. + */ + inline void join(); + + /** + @brief acquire a reference to the underlying worker + */ + inline Worker& worker(); + + protected: + + /** + @private + */ + explicit Runtime(Executor&, Worker&, Node*); + + /** + @private + */ + Executor& _executor; + + /** + @private + */ + Worker& _worker; + + /** + @private + */ + Node* _parent; + + /** + @private + */ + template + auto _async(Worker& w, const std::string& name, F&& f); + + /** + @private + */ + template + void _silent_async(Worker& w, const std::string& name, F&& f); +}; + +// constructor +inline Runtime::Runtime(Executor& e, Worker& w, Node* p) : + _executor{e}, + _worker {w}, + _parent {p}{ +} + +// Function: executor +inline Executor& Runtime::executor() { + return _executor; +} + +// Function: worker +inline Worker& Runtime::worker() { + return _worker; +} + +// ---------------------------------------------------------------------------- +// Node +// ---------------------------------------------------------------------------- + +/** +@private +*/ +class Node { + + friend class Graph; + friend class Task; + friend class TaskView; + friend class Taskflow; + friend class Executor; + friend class FlowBuilder; + friend class Subflow; + friend class Runtime; + + enum class AsyncState : int { + UNFINISHED = 0, + LOCKED = 1, + FINISHED = 2 + }; + + TF_ENABLE_POOLABLE_ON_THIS; + + // state bit flag + constexpr static int CONDITIONED = 1; + constexpr static int DETACHED = 2; + constexpr static int ACQUIRED = 4; + constexpr static int READY = 8; + + using Placeholder = std::monostate; + + // static work handle + struct Static { + + template + Static(C&&); + + std::variant< + std::function, std::function + > work; + }; + + // dynamic work handle + struct Dynamic { + + template + Dynamic(C&&); + + std::function work; + Graph subgraph; + }; + + // condition work handle + struct Condition { + + template + Condition(C&&); + + std::variant< + std::function, std::function + > work; + }; + + // multi-condition work handle + struct MultiCondition { + + template + MultiCondition(C&&); + + std::variant< + std::function()>, std::function(Runtime&)> + > work; + }; + + // module work handle + struct Module { + + template + Module(T&); + + Graph& graph; + }; + + // Async work + struct Async { + + template + Async(T&&); + + std::function work; + }; + + // silent dependent async + struct DependentAsync { + + template + DependentAsync(C&&); + + std::function work; + + std::atomic state {AsyncState::UNFINISHED}; + }; + + using handle_t = std::variant< + Placeholder, // placeholder + Static, // static tasking + Dynamic, // dynamic tasking + Condition, // conditional tasking + MultiCondition, // multi-conditional tasking + Module, // composable tasking + Async, // async tasking + DependentAsync // dependent async tasking (no future) + >; + + struct Semaphores { + SmallVector to_acquire; + SmallVector to_release; + }; + + public: + + // variant index + constexpr static auto PLACEHOLDER = get_index_v; + constexpr static auto STATIC = get_index_v; + constexpr static auto DYNAMIC = get_index_v; + constexpr static auto CONDITION = get_index_v; + constexpr static auto MULTI_CONDITION = get_index_v; + constexpr static auto MODULE = get_index_v; + constexpr static auto ASYNC = get_index_v; + constexpr static auto DEPENDENT_ASYNC = get_index_v; + + Node() = default; + + template + Node(const std::string&, unsigned, Topology*, Node*, size_t, Args&&... args); + + ~Node(); + + size_t num_successors() const; + size_t num_dependents() const; + size_t num_strong_dependents() const; + size_t num_weak_dependents() const; + + const std::string& name() const; + + private: + + std::string _name; + + unsigned _priority {0}; + + Topology* _topology {nullptr}; + Node* _parent {nullptr}; + + void* _data {nullptr}; + + SmallVector _successors; + SmallVector _dependents; + + std::atomic _state {0}; + std::atomic _join_counter {0}; + + std::unique_ptr _semaphores; + + handle_t _handle; + + void _precede(Node*); + void _set_up_join_counter(); + + bool _is_cancelled() const; + bool _is_conditioner() const; + bool _acquire_all(SmallVector&); + + SmallVector _release_all(); +}; + +// ---------------------------------------------------------------------------- +// Node Object Pool +// ---------------------------------------------------------------------------- + +/** +@private +*/ +inline ObjectPool node_pool; + +// ---------------------------------------------------------------------------- +// Definition for Node::Static +// ---------------------------------------------------------------------------- + +// Constructor +template +Node::Static::Static(C&& c) : work {std::forward(c)} { +} + +// ---------------------------------------------------------------------------- +// Definition for Node::Dynamic +// ---------------------------------------------------------------------------- + +// Constructor +template +Node::Dynamic::Dynamic(C&& c) : work {std::forward(c)} { +} + +// ---------------------------------------------------------------------------- +// Definition for Node::Condition +// ---------------------------------------------------------------------------- + +// Constructor +template +Node::Condition::Condition(C&& c) : work {std::forward(c)} { +} + +// ---------------------------------------------------------------------------- +// Definition for Node::MultiCondition +// ---------------------------------------------------------------------------- + +// Constructor +template +Node::MultiCondition::MultiCondition(C&& c) : work {std::forward(c)} { +} + +// ---------------------------------------------------------------------------- +// Definition for Node::Module +// ---------------------------------------------------------------------------- + +// Constructor +template +inline Node::Module::Module(T& obj) : graph{ obj.graph() } { +} + +// ---------------------------------------------------------------------------- +// Definition for Node::Async +// ---------------------------------------------------------------------------- + +// Constructor +template +Node::Async::Async(C&& c) : work {std::forward(c)} { +} + +// ---------------------------------------------------------------------------- +// Definition for Node::DependentAsync +// ---------------------------------------------------------------------------- + +// Constructor +template +Node::DependentAsync::DependentAsync(C&& c) : work {std::forward(c)} { +} + +// ---------------------------------------------------------------------------- +// Definition for Node +// ---------------------------------------------------------------------------- + +// Constructor +template +Node::Node( + const std::string& name, + unsigned priority, + Topology* topology, + Node* parent, + size_t join_counter, + Args&&... args +) : + _name {name}, + _priority {priority}, + _topology {topology}, + _parent {parent}, + _join_counter {join_counter}, + _handle {std::forward(args)...} { +} + +//Node::Node(Args&&... args): _handle{std::forward(args)...} { +//} + +// Destructor +inline Node::~Node() { + // this is to avoid stack overflow + + if(_handle.index() == DYNAMIC) { + // using std::get_if instead of std::get makes this compatible + // with older macOS versions + // the result of std::get_if is guaranteed to be non-null + // due to the index check above + auto& subgraph = std::get_if(&_handle)->subgraph; + std::vector nodes; + nodes.reserve(subgraph.size()); + + std::move( + subgraph._nodes.begin(), subgraph._nodes.end(), std::back_inserter(nodes) + ); + subgraph._nodes.clear(); + + size_t i = 0; + + while(i < nodes.size()) { + + if(nodes[i]->_handle.index() == DYNAMIC) { + auto& sbg = std::get_if(&(nodes[i]->_handle))->subgraph; + std::move( + sbg._nodes.begin(), sbg._nodes.end(), std::back_inserter(nodes) + ); + sbg._nodes.clear(); + } + + ++i; + } + + //auto& np = Graph::_node_pool(); + for(i=0; i_dependents.push_back(this); +} + +// Function: num_successors +inline size_t Node::num_successors() const { + return _successors.size(); +} + +// Function: dependents +inline size_t Node::num_dependents() const { + return _dependents.size(); +} + +// Function: num_weak_dependents +inline size_t Node::num_weak_dependents() const { + size_t n = 0; + for(size_t i=0; i<_dependents.size(); i++) { + //if(_dependents[i]->_handle.index() == Node::CONDITION) { + if(_dependents[i]->_is_conditioner()) { + n++; + } + } + return n; +} + +// Function: num_strong_dependents +inline size_t Node::num_strong_dependents() const { + size_t n = 0; + for(size_t i=0; i<_dependents.size(); i++) { + //if(_dependents[i]->_handle.index() != Node::CONDITION) { + if(!_dependents[i]->_is_conditioner()) { + n++; + } + } + return n; +} + +// Function: name +inline const std::string& Node::name() const { + return _name; +} + +// Function: _is_conditioner +inline bool Node::_is_conditioner() const { + return _handle.index() == Node::CONDITION || + _handle.index() == Node::MULTI_CONDITION; +} + +// Function: _is_cancelled +inline bool Node::_is_cancelled() const { + return _topology && _topology->_is_cancelled.load(std::memory_order_relaxed); +} + +// Procedure: _set_up_join_counter +inline void Node::_set_up_join_counter() { + size_t c = 0; + for(auto p : _dependents) { + //if(p->_handle.index() == Node::CONDITION) { + if(p->_is_conditioner()) { + _state.fetch_or(Node::CONDITIONED, std::memory_order_relaxed); + } + else { + c++; + } + } + _join_counter.store(c, std::memory_order_release); +} + + +// Function: _acquire_all +inline bool Node::_acquire_all(SmallVector& nodes) { + + auto& to_acquire = _semaphores->to_acquire; + + for(size_t i = 0; i < to_acquire.size(); ++i) { + if(!to_acquire[i]->_try_acquire_or_wait(this)) { + for(size_t j = 1; j <= i; ++j) { + auto r = to_acquire[i-j]->_release(); + nodes.insert(std::end(nodes), std::begin(r), std::end(r)); + } + return false; + } + } + return true; +} + +// Function: _release_all +inline SmallVector Node::_release_all() { + + auto& to_release = _semaphores->to_release; + + SmallVector nodes; + for(const auto& sem : to_release) { + auto r = sem->_release(); + nodes.insert(std::end(nodes), std::begin(r), std::end(r)); + } + + return nodes; +} + +// ---------------------------------------------------------------------------- +// Node Deleter +// ---------------------------------------------------------------------------- + +/** +@private +*/ +struct NodeDeleter { + void operator ()(Node* ptr) { + node_pool.recycle(ptr); + } +}; + +// ---------------------------------------------------------------------------- +// Graph definition +// ---------------------------------------------------------------------------- + +// Destructor +inline Graph::~Graph() { + _clear(); +} + +// Move constructor +inline Graph::Graph(Graph&& other) : + _nodes {std::move(other._nodes)} { +} + +// Move assignment +inline Graph& Graph::operator = (Graph&& other) { + _clear(); + _nodes = std::move(other._nodes); + return *this; +} + +// Procedure: clear +inline void Graph::clear() { + _clear(); +} + +// Procedure: clear +inline void Graph::_clear() { + for(auto node : _nodes) { + node_pool.recycle(node); + } + _nodes.clear(); +} + +// Procedure: clear_detached +inline void Graph::_clear_detached() { + + auto mid = std::partition(_nodes.begin(), _nodes.end(), [] (Node* node) { + return !(node->_state.load(std::memory_order_relaxed) & Node::DETACHED); + }); + + for(auto itr = mid; itr != _nodes.end(); ++itr) { + node_pool.recycle(*itr); + } + _nodes.resize(std::distance(_nodes.begin(), mid)); +} + +// Procedure: merge +inline void Graph::_merge(Graph&& g) { + for(auto n : g._nodes) { + _nodes.push_back(n); + } + g._nodes.clear(); +} + +// Function: erase +inline void Graph::_erase(Node* node) { + if(auto I = std::find(_nodes.begin(), _nodes.end(), node); I != _nodes.end()) { + _nodes.erase(I); + node_pool.recycle(node); + } +} + +// Function: size +inline size_t Graph::size() const { + return _nodes.size(); +} + +// Function: empty +inline bool Graph::empty() const { + return _nodes.empty(); +} + +/** +@private +*/ +template +Node* Graph::_emplace_back(ArgsT&&... args) { + _nodes.push_back(node_pool.animate(std::forward(args)...)); + return _nodes.back(); +} + +} // end of namespace tf. --------------------------------------------------- diff --git a/bundled/taskflow-2.5.0/include/taskflow/core/notifier.hpp b/bundled/taskflow-3.6.0/include/core/notifier.hpp similarity index 96% rename from bundled/taskflow-2.5.0/include/taskflow/core/notifier.hpp rename to bundled/taskflow-3.6.0/include/core/notifier.hpp index b3bdb9ff69..39bcf6495f 100644 --- a/bundled/taskflow-2.5.0/include/taskflow/core/notifier.hpp +++ b/bundled/taskflow-3.6.0/include/core/notifier.hpp @@ -67,7 +67,7 @@ class Notifier { friend class Executor; public: - + struct Waiter { std::atomic next; std::mutex mu; @@ -121,7 +121,8 @@ class Notifier { // Remove this thread from prewait counter and add it to the waiter list. assert((state & kWaiterMask) != 0); uint64_t newstate = state - kWaiterInc + kEpochInc; - newstate = (newstate & ~kStackMask) | (w - &_waiters[0]); + //newstate = (newstate & ~kStackMask) | (w - &_waiters[0]); + newstate = static_cast((newstate & ~kStackMask) | static_cast(w - &_waiters[0])); if ((state & kStackMask) == kStackMask) w->next.store(nullptr, std::memory_order_relaxed); else @@ -179,7 +180,8 @@ class Notifier { Waiter* w = &_waiters[state & kStackMask]; Waiter* wnext = w->next.load(std::memory_order_relaxed); uint64_t next = kStackMask; - if (wnext != nullptr) next = wnext - &_waiters[0]; + //if (wnext != nullptr) next = wnext - &_waiters[0]; + if (wnext != nullptr) next = static_cast(wnext - &_waiters[0]); // Note: we don't add kEpochInc here. ABA problem on the lock-free stack // can't happen because a waiter is re-pushed onto the stack only after // it was in the pre-wait state which inevitably leads to epoch @@ -197,7 +199,7 @@ class Notifier { } } } - + // notify n workers void notify_n(size_t n) { if(n >= _waiters.size()) { @@ -257,15 +259,6 @@ class Notifier { } } - Notifier(const Notifier&) = delete; - Notifier& operator=(const Notifier&) = delete; - - Notifier(Notifier&& rhs) : - _state {rhs._state.load()}, - _waiters {std::move(rhs._waiters)} { - } - - }; diff --git a/bundled/taskflow-3.6.0/include/core/observer.hpp b/bundled/taskflow-3.6.0/include/core/observer.hpp new file mode 100644 index 0000000000..3c1873efab --- /dev/null +++ b/bundled/taskflow-3.6.0/include/core/observer.hpp @@ -0,0 +1,1046 @@ +#pragma once + +#include "task.hpp" +#include "worker.hpp" + +/** +@file observer.hpp +@brief observer include file +*/ + +namespace tf { + +// ---------------------------------------------------------------------------- +// timeline data structure +// ---------------------------------------------------------------------------- + +/** +@brief default time point type of observers +*/ +using observer_stamp_t = std::chrono::time_point; + +/** +@private +*/ +struct Segment { + + std::string name; + TaskType type; + + observer_stamp_t beg; + observer_stamp_t end; + + template + auto save(Archiver& ar) const { + return ar(name, type, beg, end); + } + + template + auto load(Archiver& ar) { + return ar(name, type, beg, end); + } + + Segment() = default; + + Segment( + const std::string& n, TaskType t, observer_stamp_t b, observer_stamp_t e + ) : name {n}, type {t}, beg {b}, end {e} { + } + + auto span() const { + return end-beg; + } +}; + +/** +@private +*/ +struct Timeline { + + size_t uid; + + observer_stamp_t origin; + std::vector>> segments; + + Timeline() = default; + + Timeline(const Timeline& rhs) = delete; + Timeline(Timeline&& rhs) = default; + + Timeline& operator = (const Timeline& rhs) = delete; + Timeline& operator = (Timeline&& rhs) = default; + + template + auto save(Archiver& ar) const { + return ar(uid, origin, segments); + } + + template + auto load(Archiver& ar) { + return ar(uid, origin, segments); + } +}; + +/** +@private + */ +struct ProfileData { + + std::vector timelines; + + ProfileData() = default; + + ProfileData(const ProfileData& rhs) = delete; + ProfileData(ProfileData&& rhs) = default; + + ProfileData& operator = (const ProfileData& rhs) = delete; + ProfileData& operator = (ProfileData&&) = default; + + template + auto save(Archiver& ar) const { + return ar(timelines); + } + + template + auto load(Archiver& ar) { + return ar(timelines); + } +}; + +// ---------------------------------------------------------------------------- +// observer interface +// ---------------------------------------------------------------------------- + +/** +@class: ObserverInterface + +@brief class to derive an executor observer + +The tf::ObserverInterface class allows users to define custom methods to monitor +the behaviors of an executor. This is particularly useful when you want to +inspect the performance of an executor and visualize when each thread +participates in the execution of a task. +To prevent users from direct access to the internal threads and tasks, +tf::ObserverInterface provides immutable wrappers, +tf::WorkerView and tf::TaskView, over workers and tasks. + +Please refer to tf::WorkerView and tf::TaskView for details. + +Example usage: + +@code{.cpp} + +struct MyObserver : public tf::ObserverInterface { + + MyObserver(const std::string& name) { + std::cout << "constructing observer " << name << '\n'; + } + + void set_up(size_t num_workers) override final { + std::cout << "setting up observer with " << num_workers << " workers\n"; + } + + void on_entry(WorkerView w, tf::TaskView tv) override final { + std::ostringstream oss; + oss << "worker " << w.id() << " ready to run " << tv.name() << '\n'; + std::cout << oss.str(); + } + + void on_exit(WorkerView w, tf::TaskView tv) override final { + std::ostringstream oss; + oss << "worker " << w.id() << " finished running " << tv.name() << '\n'; + std::cout << oss.str(); + } +}; + +tf::Taskflow taskflow; +tf::Executor executor; + +// insert tasks into taskflow +// ... + +// create a custom observer +std::shared_ptr observer = executor.make_observer("MyObserver"); + +// run the taskflow +executor.run(taskflow).wait(); +@endcode +*/ +class ObserverInterface { + + public: + + /** + @brief virtual destructor + */ + virtual ~ObserverInterface() = default; + + /** + @brief constructor-like method to call when the executor observer is fully created + @param num_workers the number of the worker threads in the executor + */ + virtual void set_up(size_t num_workers) = 0; + + /** + @brief method to call before a worker thread executes a closure + @param wv an immutable view of this worker thread + @param task_view a constant wrapper object to the task + */ + virtual void on_entry(WorkerView wv, TaskView task_view) = 0; + + /** + @brief method to call after a worker thread executed a closure + @param wv an immutable view of this worker thread + @param task_view a constant wrapper object to the task + */ + virtual void on_exit(WorkerView wv, TaskView task_view) = 0; +}; + +// ---------------------------------------------------------------------------- +// ChromeObserver definition +// ---------------------------------------------------------------------------- + +/** +@class: ChromeObserver + +@brief class to create an observer based on Chrome tracing format + +A tf::ChromeObserver inherits tf::ObserverInterface and defines methods to dump +the observed thread activities into a format that can be visualized through +@ChromeTracing. + +@code{.cpp} +tf::Taskflow taskflow; +tf::Executor executor; + +// insert tasks into taskflow +// ... + +// create a custom observer +std::shared_ptr observer = executor.make_observer(); + +// run the taskflow +executor.run(taskflow).wait(); + +// dump the thread activities to a chrome-tracing format. +observer->dump(std::cout); +@endcode +*/ +class ChromeObserver : public ObserverInterface { + + friend class Executor; + + // data structure to record each task execution + struct Segment { + + std::string name; + + observer_stamp_t beg; + observer_stamp_t end; + + Segment( + const std::string& n, + observer_stamp_t b, + observer_stamp_t e + ); + }; + + // data structure to store the entire execution timeline + struct Timeline { + observer_stamp_t origin; + std::vector> segments; + std::vector> stacks; + }; + + public: + + /** + @brief dumps the timelines into a @ChromeTracing format through + an output stream + */ + void dump(std::ostream& ostream) const; + + /** + @brief dumps the timelines into a @ChromeTracing format + */ + inline std::string dump() const; + + /** + @brief clears the timeline data + */ + inline void clear(); + + /** + @brief queries the number of tasks observed + */ + inline size_t num_tasks() const; + + private: + + inline void set_up(size_t num_workers) override final; + inline void on_entry(WorkerView w, TaskView task_view) override final; + inline void on_exit(WorkerView w, TaskView task_view) override final; + + Timeline _timeline; +}; + +// constructor +inline ChromeObserver::Segment::Segment( + const std::string& n, observer_stamp_t b, observer_stamp_t e +) : + name {n}, beg {b}, end {e} { +} + +// Procedure: set_up +inline void ChromeObserver::set_up(size_t num_workers) { + _timeline.segments.resize(num_workers); + _timeline.stacks.resize(num_workers); + + for(size_t w=0; w 0) { + break; + } + } + + os << '['; + + for(size_t w=first; w<_timeline.segments.size(); w++) { + + if(w != first && _timeline.segments[w].size() > 0) { + os << ','; + } + + for(size_t i=0; i<_timeline.segments[w].size(); i++) { + + os << '{'<< "\"cat\":\"ChromeObserver\","; + + // name field + os << "\"name\":\""; + if(_timeline.segments[w][i].name.empty()) { + os << w << '_' << i; + } + else { + os << _timeline.segments[w][i].name; + } + os << "\","; + + // segment field + os << "\"ph\":\"X\"," + << "\"pid\":1," + << "\"tid\":" << w << ',' + << "\"ts\":" << duration_cast( + _timeline.segments[w][i].beg - _timeline.origin + ).count() << ',' + << "\"dur\":" << duration_cast( + _timeline.segments[w][i].end - _timeline.segments[w][i].beg + ).count(); + + if(i != _timeline.segments[w].size() - 1) { + os << "},"; + } + else { + os << '}'; + } + } + } + os << "]\n"; +} + +// Function: dump +inline std::string ChromeObserver::dump() const { + std::ostringstream oss; + dump(oss); + return oss.str(); +} + +// Function: num_tasks +inline size_t ChromeObserver::num_tasks() const { + return std::accumulate( + _timeline.segments.begin(), _timeline.segments.end(), size_t{0}, + [](size_t sum, const auto& exe){ + return sum + exe.size(); + } + ); +} + +// ---------------------------------------------------------------------------- +// TFProfObserver definition +// ---------------------------------------------------------------------------- + +/** +@class TFProfObserver + +@brief class to create an observer based on the built-in taskflow profiler format + +A tf::TFProfObserver inherits tf::ObserverInterface and defines methods to dump +the observed thread activities into a format that can be visualized through +@TFProf. + +@code{.cpp} +tf::Taskflow taskflow; +tf::Executor executor; + +// insert tasks into taskflow +// ... + +// create a custom observer +std::shared_ptr observer = executor.make_observer(); + +// run the taskflow +executor.run(taskflow).wait(); + +// dump the thread activities to Taskflow Profiler format. +observer->dump(std::cout); +@endcode + +*/ +class TFProfObserver : public ObserverInterface { + + friend class Executor; + friend class TFProfManager; + + /** @private overall task summary */ + struct TaskSummary { + size_t count {0}; + size_t total_span {0}; + size_t min_span; + size_t max_span; + + float avg_span() const { return total_span * 1.0f / count; } + }; + + /** @private worker summary at a level */ + struct WorkerSummary { + + size_t id; + size_t level; + size_t count {0}; + size_t total_span {0}; + size_t min_span{0}; + size_t max_span{0}; + + std::array tsum; + + float avg_span() const { return total_span * 1.0f / count; } + //return count < 2 ? 0.0f : total_delay * 1.0f / (count-1); + }; + + /** @private */ + struct Summary { + std::array tsum; + std::vector wsum; + + void dump_tsum(std::ostream&) const; + void dump_wsum(std::ostream&) const; + void dump(std::ostream&) const; + }; + + public: + + /** + @brief dumps the timelines into a @TFProf format through + an output stream + */ + void dump(std::ostream& ostream) const; + + /** + @brief dumps the timelines into a JSON string + */ + std::string dump() const; + + /** + @brief shows the summary report through an output stream + */ + void summary(std::ostream& ostream) const; + + /** + @brief returns the summary report in a string + */ + std::string summary() const; + + /** + @brief clears the timeline data + */ + void clear(); + + /** + @brief queries the number of tasks observed + */ + size_t num_tasks() const; + + /** + @brief queries the number of observed workers + */ + size_t num_workers() const; + + private: + + Timeline _timeline; + + std::vector> _stacks; + + inline void set_up(size_t num_workers) override final; + inline void on_entry(WorkerView, TaskView) override final; + inline void on_exit(WorkerView, TaskView) override final; +}; + + +// dump the task summary +inline void TFProfObserver::Summary::dump_tsum(std::ostream& os) const { + + // task summary + size_t type_w{10}, count_w{5}, time_w{9}, avg_w{8}, min_w{8}, max_w{8}; + + std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){ + if(i.count == 0) return; + count_w = std::max(count_w, std::to_string(i.count).size()); + }); + + std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){ + if(i.count == 0) return; + time_w = std::max(time_w, std::to_string(i.total_span).size()); + }); + + std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){ + if(i.count == 0) return; + avg_w = std::max(time_w, std::to_string(i.avg_span()).size()); + }); + + std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){ + if(i.count == 0) return; + min_w = std::max(min_w, std::to_string(i.min_span).size()); + }); + + std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){ + if(i.count == 0) return; + max_w = std::max(max_w, std::to_string(i.max_span).size()); + }); + + os << std::setw(type_w) << "-Task-" + << std::setw(count_w+2) << "Count" + << std::setw(time_w+2) << "Time (us)" + << std::setw(avg_w+2) << "Avg (us)" + << std::setw(min_w+2) << "Min (us)" + << std::setw(max_w+2) << "Max (us)" + << '\n'; + + for(size_t i=0; i(); + _timeline.origin = observer_stamp_t::clock::now(); + _timeline.segments.resize(num_workers); + _stacks.resize(num_workers); +} + +// Procedure: on_entry +inline void TFProfObserver::on_entry(WorkerView wv, TaskView) { + _stacks[wv.id()].push(observer_stamp_t::clock::now()); +} + +// Procedure: on_exit +inline void TFProfObserver::on_exit(WorkerView wv, TaskView tv) { + + size_t w = wv.id(); + + assert(!_stacks[w].empty()); + + if(_stacks[w].size() > _timeline.segments[w].size()) { + _timeline.segments[w].resize(_stacks[w].size()); + } + + auto beg = _stacks[w].top(); + _stacks[w].pop(); + + _timeline.segments[w][_stacks[w].size()].emplace_back( + tv.name(), tv.type(), beg, observer_stamp_t::clock::now() + ); +} + +// Function: clear +inline void TFProfObserver::clear() { + for(size_t w=0; w<_timeline.segments.size(); ++w) { + for(size_t l=0; l<_timeline.segments[w].size(); ++l) { + _timeline.segments[w][l].clear(); + } + while(!_stacks[w].empty()) { + _stacks[w].pop(); + } + } +} + +// Procedure: dump +inline void TFProfObserver::dump(std::ostream& os) const { + + using namespace std::chrono; + + size_t first; + + for(first = 0; first<_timeline.segments.size(); ++first) { + if(_timeline.segments[first].size() > 0) { + break; + } + } + + // not timeline data to dump + if(first == _timeline.segments.size()) { + os << "{}\n"; + return; + } + + os << "{\"executor\":\"" << _timeline.uid << "\",\"data\":["; + + bool comma = false; + + for(size_t w=first; w<_timeline.segments.size(); w++) { + for(size_t l=0; l<_timeline.segments[w].size(); l++) { + + if(_timeline.segments[w][l].empty()) { + continue; + } + + if(comma) { + os << ','; + } + else { + comma = true; + } + + os << "{\"worker\":" << w << ",\"level\":" << l << ",\"data\":["; + for(size_t i=0; i<_timeline.segments[w][l].size(); ++i) { + + const auto& s = _timeline.segments[w][l][i]; + + if(i) os << ','; + + // span + os << "{\"span\":[" + << duration_cast(s.beg - _timeline.origin).count() + << "," + << duration_cast(s.end - _timeline.origin).count() + << "],"; + + // name + os << "\"name\":\""; + if(s.name.empty()) { + os << w << '_' << i; + } + else { + os << s.name; + } + os << "\","; + + // e.g., category "type": "Condition Task" + os << "\"type\":\"" << to_string(s.type) << "\""; + + os << "}"; + } + os << "]}"; + } + } + + os << "]}\n"; +} + +// Function: dump +inline std::string TFProfObserver::dump() const { + std::ostringstream oss; + dump(oss); + return oss.str(); +} + +// Procedure: summary +inline void TFProfObserver::summary(std::ostream& os) const { + + using namespace std::chrono; + + Summary summary; + std::optional view_beg, view_end; + + // find the first non-empty worker + size_t first; + for(first = 0; first<_timeline.segments.size(); ++first) { + if(_timeline.segments[first].size() > 0) { + break; + } + } + + // not timeline data to dump + if(first == _timeline.segments.size()) { + goto end_of_summary; + } + + for(size_t w=first; w<_timeline.segments.size(); w++) { + for(size_t l=0; l<_timeline.segments[w].size(); l++) { + + if(_timeline.segments[w][l].empty()) { + continue; + } + + // worker w at level l + WorkerSummary ws; + ws.id = w; + ws.level = l; + ws.count = _timeline.segments[w][l].size(); + + // scan all tasks at level l + for(size_t i=0; i<_timeline.segments[w][l].size(); ++i) { + + // update the entire span + auto& s = _timeline.segments[w][l][i]; + view_beg = view_beg ? std::min(*view_beg, s.beg) : s.beg; + view_end = view_end ? std::max(*view_end, s.end) : s.end; + + // update the task summary + size_t t = duration_cast(s.end - s.beg).count(); + + auto& x = summary.tsum[static_cast(s.type)]; + x.count += 1; + x.total_span += t; + x.min_span = (x.count == 1) ? t : std::min(t, x.min_span); + x.max_span = (x.count == 1) ? t : std::max(t, x.max_span); + + // update the worker summary + ws.total_span += t; + ws.min_span = (i == 0) ? t : std::min(t, ws.min_span); + ws.max_span = (i == 0) ? t : std::max(t, ws.max_span); + + auto&y = ws.tsum[static_cast(s.type)]; + y.count += 1; + y.total_span += t; + y.min_span = (y.count == 1) ? t : std::min(t, y.min_span); + y.max_span = (y.count == 1) ? t : std::max(t, y.max_span); + + // update the delay + //if(i) { + // size_t d = duration_cast( + // s.beg - _timeline.segments[w][l][i-1].end + // ).count(); + // ws.total_delay += d; + // ws.min_delay = (i == 1) ? d : std::min(ws.min_delay, d); + // ws.max_delay = (i == 1) ? d : std::max(ws.max_delay, d); + //} + } + summary.wsum.push_back(ws); + } + } + + end_of_summary: + + size_t view = 0; + if(view_beg && view_end) { + view = duration_cast(*view_end - *view_beg).count(); + } + + os << "==Observer " << _timeline.uid << ": " + << num_workers() << " workers completed " + << num_tasks() << " tasks in " + << view << " us\n"; + + summary.dump(os); +} + +// Procedure: summary +inline std::string TFProfObserver::summary() const { + std::ostringstream oss; + summary(oss); + return oss.str(); +} + +// Function: num_tasks +inline size_t TFProfObserver::num_tasks() const { + size_t s = 0; + for(size_t w=0; w<_timeline.segments.size(); ++w) { + for(size_t l=0; l<_timeline.segments[w].size(); ++l) { + s += _timeline.segments[w][l].size(); + } + } + return s; +} + +// Function: num_workers +inline size_t TFProfObserver::num_workers() const { + size_t w = 0; + for(size_t i=0; i<_timeline.segments.size(); ++i) { + w += (!_timeline.segments[i].empty()); + } + return w; +} + + +// ---------------------------------------------------------------------------- +// TFProfManager +// ---------------------------------------------------------------------------- + +/** +@private +*/ +class TFProfManager { + + friend class Executor; + + public: + + ~TFProfManager(); + + TFProfManager(const TFProfManager&) = delete; + TFProfManager& operator=(const TFProfManager&) = delete; + + static TFProfManager& get(); + + void dump(std::ostream& ostream) const; + + private: + + const std::string _fpath; + + std::mutex _mutex; + std::vector> _observers; + + TFProfManager(); + + void _manage(std::shared_ptr observer); +}; + +// constructor +inline TFProfManager::TFProfManager() : + _fpath {get_env(TF_ENABLE_PROFILER)} { + +} + +// Procedure: manage +inline void TFProfManager::_manage(std::shared_ptr observer) { + std::lock_guard lock(_mutex); + _observers.push_back(std::move(observer)); +} + +// Procedure: dump +inline void TFProfManager::dump(std::ostream& os) const { + for(size_t i=0; i<_observers.size(); ++i) { + if(i) os << ','; + _observers[i]->dump(os); + } +} + +// Destructor +inline TFProfManager::~TFProfManager() { + std::ofstream ofs(_fpath); + if(ofs) { + // .tfp + if(_fpath.rfind(".tfp") != std::string::npos) { + ProfileData data; + data.timelines.reserve(_observers.size()); + for(size_t i=0; i<_observers.size(); ++i) { + data.timelines.push_back(std::move(_observers[i]->_timeline)); + } + Serializer serializer(ofs); + serializer(data); + } + // .json + else { // if(_fpath.rfind(".json") != std::string::npos) { + ofs << "[\n"; + for(size_t i=0; i<_observers.size(); ++i) { + if(i) ofs << ','; + _observers[i]->dump(ofs); + } + ofs << "]\n"; + } + } + // do a summary report in stderr for each observer + else { + std::ostringstream oss; + for(size_t i=0; i<_observers.size(); ++i) { + _observers[i]->summary(oss); + } + fprintf(stderr, "%s", oss.str().c_str()); + } +} + +// Function: get +inline TFProfManager& TFProfManager::get() { + static TFProfManager mgr; + return mgr; +} + +// ---------------------------------------------------------------------------- +// Identifier for Each Built-in Observer +// ---------------------------------------------------------------------------- + +/** @enum ObserverType + +@brief enumeration of all observer types + +*/ +enum class ObserverType : int { + TFPROF = 0, + CHROME, + UNDEFINED +}; + +/** +@brief convert an observer type to a human-readable string +*/ +inline const char* to_string(ObserverType type) { + switch(type) { + case ObserverType::TFPROF: return "tfprof"; + case ObserverType::CHROME: return "chrome"; + default: return "undefined"; + } +} + + +} // end of namespace tf ----------------------------------------------------- + + diff --git a/bundled/taskflow-3.6.0/include/core/semaphore.hpp b/bundled/taskflow-3.6.0/include/core/semaphore.hpp new file mode 100644 index 0000000000..12d6069b1d --- /dev/null +++ b/bundled/taskflow-3.6.0/include/core/semaphore.hpp @@ -0,0 +1,132 @@ +#pragma once + +#include +#include + +#include "declarations.hpp" + +/** +@file semaphore.hpp +@brief semaphore include file +*/ + +namespace tf { + +// ---------------------------------------------------------------------------- +// Semaphore +// ---------------------------------------------------------------------------- + +/** +@class Semaphore + +@brief class to create a semophore object for building a concurrency constraint + +A semaphore creates a constraint that limits the maximum concurrency, +i.e., the number of workers, in a set of tasks. +You can let a task acquire/release one or multiple semaphores before/after +executing its work. +A task can acquire and release a semaphore, +or just acquire or just release it. +A tf::Semaphore object starts with an initial count. +As long as that count is above 0, tasks can acquire the semaphore and do +their work. +If the count is 0 or less, a task trying to acquire the semaphore will not run +but goes to a waiting list of that semaphore. +When the semaphore is released by another task, +it reschedules all tasks on that waiting list. + +@code{.cpp} +tf::Executor executor(8); // create an executor of 8 workers +tf::Taskflow taskflow; + +tf::Semaphore semaphore(1); // create a semaphore with initial count 1 + +std::vector tasks { + taskflow.emplace([](){ std::cout << "A" << std::endl; }), + taskflow.emplace([](){ std::cout << "B" << std::endl; }), + taskflow.emplace([](){ std::cout << "C" << std::endl; }), + taskflow.emplace([](){ std::cout << "D" << std::endl; }), + taskflow.emplace([](){ std::cout << "E" << std::endl; }) +}; + +for(auto & task : tasks) { // each task acquires and release the semaphore + task.acquire(semaphore); + task.release(semaphore); +} + +executor.run(taskflow).wait(); +@endcode + +The above example creates five tasks with no dependencies between them. +Under normal circumstances, the five tasks would be executed concurrently. +However, this example has a semaphore with initial count 1, +and all tasks need to acquire that semaphore before running and release that +semaphore after they are done. +This arrangement limits the number of concurrently running tasks to only one. + +*/ +class Semaphore { + + friend class Node; + + public: + + /** + @brief constructs a semaphore with the given counter + + A semaphore creates a constraint that limits the maximum concurrency, + i.e., the number of workers, in a set of tasks. + + @code{.cpp} + tf::Semaphore semaphore(4); // concurrency constraint of 4 workers + @endcode + */ + explicit Semaphore(size_t max_workers); + + /** + @brief queries the counter value (not thread-safe during the run) + */ + size_t count() const; + + private: + + std::mutex _mtx; + + size_t _counter; + + std::vector _waiters; + + bool _try_acquire_or_wait(Node*); + + std::vector _release(); +}; + +inline Semaphore::Semaphore(size_t max_workers) : + _counter(max_workers) { +} + +inline bool Semaphore::_try_acquire_or_wait(Node* me) { + std::lock_guard lock(_mtx); + if(_counter > 0) { + --_counter; + return true; + } + else { + _waiters.push_back(me); + return false; + } +} + +inline std::vector Semaphore::_release() { + std::lock_guard lock(_mtx); + ++_counter; + std::vector r{std::move(_waiters)}; + return r; +} + +inline size_t Semaphore::count() const { + return _counter; +} + +} // end of namespace tf. --------------------------------------------------- + diff --git a/bundled/taskflow-2.5.0/include/taskflow/core/task.hpp b/bundled/taskflow-3.6.0/include/core/task.hpp similarity index 50% rename from bundled/taskflow-2.5.0/include/taskflow/core/task.hpp rename to bundled/taskflow-3.6.0/include/core/task.hpp index 589050bf29..cd10b73117 100644 --- a/bundled/taskflow-2.5.0/include/taskflow/core/task.hpp +++ b/bundled/taskflow-3.6.0/include/core/task.hpp @@ -2,6 +2,11 @@ #include "graph.hpp" +/** +@file task.hpp +@brief task include file +*/ + namespace tf { // ---------------------------------------------------------------------------- @@ -13,35 +18,62 @@ namespace tf { @brief enumeration of all task types */ -enum TaskType { - PLACEHOLDER_TASK = Node::PLACEHOLDER_WORK, -#ifdef TF_ENABLE_CUDA - CUDAFLOW_TASK = Node::CUDAFLOW_WORK, -#endif - STATIC_TASK = Node::STATIC_WORK, - DYNAMIC_TASK = Node::DYNAMIC_WORK, - CONDITION_TASK = Node::CONDITION_WORK, - MODULE_TASK = Node::MODULE_WORK, - NUM_TASK_TYPES +enum class TaskType : int { + /** @brief placeholder task type */ + PLACEHOLDER = 0, + /** @brief static task type */ + STATIC, + /** @brief dynamic (subflow) task type */ + DYNAMIC, + /** @brief condition task type */ + CONDITION, + /** @brief module task type */ + MODULE, + /** @brief asynchronous task type */ + ASYNC, + /** @brief undefined task type (for internal use only) */ + UNDEFINED +}; + +/** +@private +@brief array of all task types (used for iterating task types) +*/ +inline constexpr std::array TASK_TYPES = { + TaskType::PLACEHOLDER, + TaskType::STATIC, + TaskType::DYNAMIC, + TaskType::CONDITION, + TaskType::MODULE, + TaskType::ASYNC, }; /** @brief convert a task type to a human-readable string + +The name of each task type is the litte-case string of its characters. + +@code{.cpp} +TaskType::PLACEHOLDER -> "placeholder" +TaskType::STATIC -> "static" +TaskType::DYNAMIC -> "subflow" +TaskType::CONDITION -> "condition" +TaskType::MODULE -> "module" +TaskType::ASYNC -> "async" +@endcode */ -inline const char* task_type_to_string(TaskType type) { +inline const char* to_string(TaskType type) { const char* val; switch(type) { - case PLACEHOLDER_TASK: val = "placeholder"; break; -#ifdef TF_ENABLE_CUDA - case CUDAFLOW_TASK: val = "cudaflow"; break; -#endif - case STATIC_TASK: val = "static"; break; - case DYNAMIC_TASK: val = "subflow"; break; - case CONDITION_TASK: val = "condition"; break; - case MODULE_TASK: val = "module"; break; - default: val = "undefined"; break; + case TaskType::PLACEHOLDER: val = "placeholder"; break; + case TaskType::STATIC: val = "static"; break; + case TaskType::DYNAMIC: val = "subflow"; break; + case TaskType::CONDITION: val = "condition"; break; + case TaskType::MODULE: val = "module"; break; + case TaskType::ASYNC: val = "async"; break; + default: val = "undefined"; break; } return val; @@ -52,49 +84,51 @@ inline const char* task_type_to_string(TaskType type) { // ---------------------------------------------------------------------------- /** -@struct is_static_task - -@brief determines if a callable is a static task - -A static task is a callable object constructible from std::function. -*/ -template -constexpr bool is_static_task_v = is_invocable_r_v && - !is_invocable_r_v; - -/** -@struct is_dynamic_task - @brief determines if a callable is a dynamic task A dynamic task is a callable object constructible from std::function. */ template -constexpr bool is_dynamic_task_v = is_invocable_r_v; +constexpr bool is_dynamic_task_v = + std::is_invocable_r_v && + !std::is_invocable_r_v; /** -@struct is_condition_task - @brief determines if a callable is a condition task -A condition task is a callable object constructible from std::function. +A condition task is a callable object constructible from std::function +or std::function. */ template -constexpr bool is_condition_task_v = is_invocable_r_v; +constexpr bool is_condition_task_v = + (std::is_invocable_r_v || std::is_invocable_r_v) && + !is_dynamic_task_v; -#ifdef TF_ENABLE_CUDA /** -@struct is_cudaflow_task +@brief determines if a callable is a multi-condition task -@brief determines if a callable is a cudaflow task - -A cudaFlow task is a callable object constructible from std::function. +A multi-condition task is a callable object constructible from +std::function()> or +std::function(tf::Runtime&)>. */ template -constexpr bool is_cudaflow_task_v = is_invocable_r_v; -#endif +constexpr bool is_multi_condition_task_v = + (std::is_invocable_r_v, C> || + std::is_invocable_r_v, C, Runtime&>) && + !is_dynamic_task_v; +/** +@brief determines if a callable is a static task +A static task is a callable object constructible from std::function +or std::function. +*/ +template +constexpr bool is_static_task_v = + (std::is_invocable_r_v || std::is_invocable_r_v) && + !is_condition_task_v && + !is_multi_condition_task_v && + !is_dynamic_task_v; // ---------------------------------------------------------------------------- // Task @@ -103,19 +137,23 @@ constexpr bool is_cudaflow_task_v = is_invocable_r_v; /** @class Task -@brief handle to a node in a task dependency graph - -A Task is handle object of a node in a dependency graph. -It provides a set of methods for users to access and modify the attributes of -the associated graph node. +@brief class to create a task handle over a node in a taskflow graph +A task is a wrapper over a node in a taskflow graph. +It provides a set of methods for users to access and modify the attributes of +the associated node in the taskflow graph. +A task is very lightweight object (i.e., only storing a node pointer) that +can be trivially copied around, +and it does not own the lifetime of the associated node. */ class Task { friend class FlowBuilder; + friend class Runtime; friend class Taskflow; friend class TaskView; - + friend class Executor; + public: /** @@ -127,12 +165,12 @@ class Task { @brief constructs the task with the copy of the other task */ Task(const Task& other); - + /** @brief replaces the contents with a copy of the other task */ Task& operator = (const Task&); - + /** @brief replaces the contents with a null pointer */ @@ -147,12 +185,12 @@ class Task { @brief compares if two tasks are not associated with the same graph node */ bool operator != (const Task& rhs) const; - + /** @brief queries the name of the task */ const std::string& name() const; - + /** @brief queries the number of successors of the task */ @@ -162,7 +200,7 @@ class Task { @brief queries the number of predecessors of the task */ size_t num_dependents() const; - + /** @brief queries the number of strong dependents of the task */ @@ -172,7 +210,7 @@ class Task { @brief queries the number of weak dependents of the task */ size_t num_weak_dependents() const; - + /** @brief assigns a name to the task @@ -183,88 +221,109 @@ class Task { Task& name(const std::string& name); /** - @brief assigns a static task + @brief assigns a callable - @tparam C callable object type + @tparam C callable type - @param callable a callable object constructible from std::function + @param callable callable to construct a task @return @c *this */ template - std::enable_if_t, Task>& work(C&& callable); - - /** - @brief assigns a dynamic task + Task& work(C&& callable); - @tparam C callable object type + /** + @brief creates a module task from a taskflow - @param callable a callable object constructible from std::function + @tparam T object type + @param object a custom object that defines @c T::graph() method @return @c *this */ - template - std::enable_if_t, Task>& work(C&& callable); - + template + Task& composed_of(T& object); + /** - @brief assigns a condition task + @brief adds precedence links from this to other tasks - @tparam C callable object type + @tparam Ts parameter pack - @param callable a callable object constructible from std::function + @param tasks one or multiple tasks @return @c *this */ - template - std::enable_if_t, Task>& work(C&& callable); + template + Task& precede(Ts&&... tasks); -#ifdef TF_ENABLE_CUDA /** - @brief assigns a cudaFlow task + @brief adds precedence links from other tasks to this - @tparam C callable object type + @tparam Ts parameter pack - @param callable a callable object constructible from std::function + @param tasks one or multiple tasks @return @c *this */ - template - std::enable_if_t, Task>& work(C&& callable); -#endif + template + Task& succeed(Ts&&... tasks); /** - @brief creates a module task from a taskflow - - @param taskflow a taskflow object for the module + @brief makes the task release this semaphore + */ + Task& release(Semaphore& semaphore); - @return @c *this + /** + @brief makes the task acquire this semaphore */ - Task& composed_of(Taskflow& taskflow); - + Task& acquire(Semaphore& semaphore); + /** - @brief adds precedence links from this to other tasks + @brief assigns pointer to user data - @tparam Ts... parameter pack + @param data pointer to user data - @param tasks one or multiple tasks + The following example shows how to attach user data to a task and + run the task iteratively while changing the data value: - @return @c *this - */ - template - Task& precede(Ts&&... tasks); - - /** - @brief adds precedence links from other tasks to this + @code{.cpp} + tf::Executor executor; + tf::Taskflow taskflow("attach data to a task"); - @tparam Ts parameter pack + int data; - @param tasks one or multiple tasks + // create a task and attach it the data + auto A = taskflow.placeholder(); + A.data(&data).work([A](){ + auto d = *static_cast(A.data()); + std::cout << "data is " << d << std::endl; + }); + + // run the taskflow iteratively with changing data + for(data = 0; data<10; data++){ + executor.run(taskflow).wait(); + } + @endcode @return @c *this */ - template - Task& succeed(Ts&&... tasks); + Task& data(void* data); + + /** + @brief assigns a priority value to the task + + A priority value can be one of the following three levels, + tf::TaskPriority::HIGH (numerically equivalent to 0), + tf::TaskPriority::NORMAL (numerically equivalent to 1), and + tf::TaskPriority::LOW (numerically equivalent to 2). + The smaller the priority value, the higher the priority. + */ + Task& priority(TaskPriority p); + /** + @brief queries the priority value of the task + */ + TaskPriority priority() const; + /** @brief resets the task handle to null */ @@ -284,13 +343,13 @@ class Task { @brief queries if the task has a work assigned */ bool has_work() const; - + /** @brief applies an visitor callable to each successor of the task */ template void for_each_successor(V&& visitor) const; - + /** @brief applies an visitor callable to each dependents of the task */ @@ -301,29 +360,28 @@ class Task { @brief obtains a hash value of the underlying node */ size_t hash_value() const; - + /** @brief returns the task type */ TaskType type() const; + /** + @brief dumps the task through an output stream + */ + void dump(std::ostream& ostream) const; + + /** + @brief queries pointer to user data + */ + void* data() const; + + private: - + Task(Node*); Node* _node {nullptr}; - - template - void _precede(T&&); - - template - void _precede(T&&, Rest&&...); - - template - void _succeed(T&&); - - template - void _succeed(T&&, Rest&&...); }; // Constructor @@ -337,52 +395,23 @@ inline Task::Task(const Task& rhs) : _node {rhs._node} { // Function: precede template Task& Task::precede(Ts&&... tasks) { - //(_node->_precede(tgts._node), ...); - _precede(std::forward(tasks)...); + (_node->_precede(tasks._node), ...); + //_precede(std::forward(tasks)...); return *this; } -/// @private -// Procedure: _precede -template -void Task::_precede(T&& other) { - _node->_precede(other._node); -} - -/// @private -// Procedure: _precede -template -void Task::_precede(T&& task, Ts&&... others) { - _precede(std::forward(task)); - _precede(std::forward(others)...); -} - // Function: succeed template Task& Task::succeed(Ts&&... tasks) { - //(tasks._node->_precede(_node), ...); - _succeed(std::forward(tasks)...); + (tasks._node->_precede(_node), ...); + //_succeed(std::forward(tasks)...); return *this; } -/// @private -// Procedure: succeed -template -void Task::_succeed(T&& other) { - other._node->_precede(_node); -} - -/// @private -// Procedure: _succeed -template -void Task::_succeed(T&& task, Ts&&... others) { - _succeed(std::forward(task)); - _succeed(std::forward(others)...); -} - // Function: composed_of -inline Task& Task::composed_of(Taskflow& tf) { - _node->_handle.emplace(&tf); +template +Task& Task::composed_of(T& object) { + _node->_handle.emplace(object); return *this; } @@ -414,6 +443,25 @@ inline Task& Task::name(const std::string& name) { return *this; } +// Function: acquire +inline Task& Task::acquire(Semaphore& s) { + if(!_node->_semaphores) { + _node->_semaphores = std::make_unique(); + } + _node->_semaphores->to_acquire.push_back(&s); + return *this; +} + +// Function: release +inline Task& Task::release(Semaphore& s) { + if(!_node->_semaphores) { + //_node->_semaphores.emplace(); + _node->_semaphores = std::make_unique(); + } + _node->_semaphores->to_release.push_back(&s); + return *this; +} + // Procedure: reset inline void Task::reset() { _node = nullptr; @@ -421,7 +469,7 @@ inline void Task::reset() { // Procedure: reset_work inline void Task::reset_work() { - _node->_handle = nstd::monostate{}; + _node->_handle.emplace(); } // Function: name @@ -461,7 +509,17 @@ inline bool Task::has_work() const { // Function: task_type inline TaskType Task::type() const { - return static_cast(_node->_handle.index()); + switch(_node->_handle.index()) { + case Node::PLACEHOLDER: return TaskType::PLACEHOLDER; + case Node::STATIC: return TaskType::STATIC; + case Node::DYNAMIC: return TaskType::DYNAMIC; + case Node::CONDITION: return TaskType::CONDITION; + case Node::MULTI_CONDITION: return TaskType::CONDITION; + case Node::MODULE: return TaskType::MODULE; + case Node::ASYNC: return TaskType::ASYNC; + case Node::DEPENDENT_ASYNC: return TaskType::ASYNC; + default: return TaskType::UNDEFINED; + } } // Function: for_each_successor @@ -485,40 +543,72 @@ inline size_t Task::hash_value() const { return std::hash{}(_node); } -// Function: work -// assign a static work -template -std::enable_if_t, Task>& Task::work(C&& c) { - _node->_handle.emplace(std::forward(c)); - return *this; +// Procedure: dump +inline void Task::dump(std::ostream& os) const { + os << "task "; + if(name().empty()) os << _node; + else os << name(); + os << " [type=" << to_string(type()) << ']'; } // Function: work -// assigns a dynamic work template -std::enable_if_t, Task>& Task::work(C&& c) { - _node->_handle.emplace(std::forward(c)); +Task& Task::work(C&& c) { + + if constexpr(is_static_task_v) { + _node->_handle.emplace(std::forward(c)); + } + else if constexpr(is_dynamic_task_v) { + _node->_handle.emplace(std::forward(c)); + } + else if constexpr(is_condition_task_v) { + _node->_handle.emplace(std::forward(c)); + } + else if constexpr(is_multi_condition_task_v) { + _node->_handle.emplace(std::forward(c)); + } + else { + static_assert(dependent_false_v, "invalid task callable"); + } return *this; } -// Function: work -// assigns a condition work -template -std::enable_if_t, Task>& Task::work(C&& c) { - _node->_handle.emplace(std::forward(c)); +// Function: data +inline void* Task::data() const { + return _node->_data; +} + +// Function: data +inline Task& Task::data(void* data) { + _node->_data = data; return *this; } -#ifdef TF_ENABLE_CUDA -// Function: work -// assigns a cudaFlow work -template -std::enable_if_t, Task>& Task::work(C&& c) { - _node->_handle.emplace(std::forward(c)); +// Function: priority +inline Task& Task::priority(TaskPriority p) { + _node->_priority = static_cast(p); return *this; } -#endif +// Function: priority +inline TaskPriority Task::priority() const { + return static_cast(_node->_priority); +} + +// ---------------------------------------------------------------------------- +// global ostream +// ---------------------------------------------------------------------------- + +/** +@brief overload of ostream inserter operator for Task +*/ +inline std::ostream& operator << (std::ostream& os, const Task& task) { + task.dump(os); + return os; +} + +// ---------------------------------------------------------------------------- +// Task View // ---------------------------------------------------------------------------- /** @@ -527,56 +617,16 @@ std::enable_if_t, Task>& Task::work(C&& c) { @brief class to access task information from the observer interface */ class TaskView { - + friend class Executor; public: - /** - @brief constructs an empty task view - */ - TaskView() = default; - - /** - @brief constructs a task view from a task - */ - TaskView(const Task& task); - - /** - @brief constructs the task with the copy of the other task - */ - TaskView(const TaskView& other); - - /** - @brief replaces the contents with a copy of the other task - */ - TaskView& operator = (const TaskView& other); - - /** - @brief replaces the contents with another task - */ - TaskView& operator = (const Task& other); - - /** - @brief replaces the contents with a null pointer - */ - TaskView& operator = (std::nullptr_t); - - /** - @brief compares if two taskviews are associated with the same task - */ - bool operator == (const TaskView&) const; - - /** - @brief compares if two taskviews are associated with different tasks - */ - bool operator != (const TaskView&) const; - /** @brief queries the name of the task */ const std::string& name() const; - + /** @brief queries the number of successors of the task */ @@ -586,7 +636,7 @@ class TaskView { @brief queries the number of predecessors of the task */ size_t num_dependents() const; - + /** @brief queries the number of strong dependents of the task */ @@ -597,22 +647,12 @@ class TaskView { */ size_t num_weak_dependents() const; - /** - @brief resets to an empty view - */ - void reset(); - - /** - @brief queries if the task view is empty - */ - bool empty() const; - /** @brief applies an visitor callable to each successor of the task */ template void for_each_successor(V&& visitor) const; - + /** @brief applies an visitor callable to each dependents of the task */ @@ -623,107 +663,82 @@ class TaskView { @brief queries the task type */ TaskType type() const; - - private: - - TaskView(Node*); - - Node* _node {nullptr}; -}; - -// Constructor -inline TaskView::TaskView(Node* node) : _node {node} { -} -// Constructor -inline TaskView::TaskView(const TaskView& rhs) : _node {rhs._node} { -} + /** + @brief obtains a hash value of the underlying node + */ + size_t hash_value() const; -// Constructor -inline TaskView::TaskView(const Task& task) : _node {task._node} { -} + private: -// Operator = -inline TaskView& TaskView::operator = (const TaskView& rhs) { - _node = rhs._node; - return *this; -} + TaskView(const Node&); + TaskView(const TaskView&) = default; -// Operator = -inline TaskView& TaskView::operator = (const Task& rhs) { - _node = rhs._node; - return *this; -} + const Node& _node; +}; -// Operator = -inline TaskView& TaskView::operator = (std::nullptr_t ptr) { - _node = ptr; - return *this; +// Constructor +inline TaskView::TaskView(const Node& node) : _node {node} { } // Function: name inline const std::string& TaskView::name() const { - return _node->_name; + return _node._name; } // Function: num_dependents inline size_t TaskView::num_dependents() const { - return _node->num_dependents(); + return _node.num_dependents(); } // Function: num_strong_dependents inline size_t TaskView::num_strong_dependents() const { - return _node->num_strong_dependents(); + return _node.num_strong_dependents(); } // Function: num_weak_dependents inline size_t TaskView::num_weak_dependents() const { - return _node->num_weak_dependents(); + return _node.num_weak_dependents(); } // Function: num_successors inline size_t TaskView::num_successors() const { - return _node->num_successors(); -} - -// Function: reset -inline void TaskView::reset() { - _node = nullptr; -} - -// Function: empty -inline bool TaskView::empty() const { - return _node == nullptr; + return _node.num_successors(); } // Function: type inline TaskType TaskView::type() const { - return static_cast(_node->_handle.index()); -} - -// Operator == -inline bool TaskView::operator == (const TaskView& rhs) const { - return _node == rhs._node; + switch(_node._handle.index()) { + case Node::PLACEHOLDER: return TaskType::PLACEHOLDER; + case Node::STATIC: return TaskType::STATIC; + case Node::DYNAMIC: return TaskType::DYNAMIC; + case Node::CONDITION: return TaskType::CONDITION; + case Node::MULTI_CONDITION: return TaskType::CONDITION; + case Node::MODULE: return TaskType::MODULE; + case Node::ASYNC: return TaskType::ASYNC; + case Node::DEPENDENT_ASYNC: return TaskType::ASYNC; + default: return TaskType::UNDEFINED; + } } -// Operator != -inline bool TaskView::operator != (const TaskView& rhs) const { - return _node != rhs._node; +// Function: hash_value +inline size_t TaskView::hash_value() const { + return std::hash{}(&_node); } // Function: for_each_successor template void TaskView::for_each_successor(V&& visitor) const { - for(size_t i=0; i<_node->_successors.size(); ++i) { - visitor(TaskView(_node->_successors[i])); + for(size_t i=0; i<_node._successors.size(); ++i) { + visitor(TaskView(*_node._successors[i])); } } // Function: for_each_dependent template void TaskView::for_each_dependent(V&& visitor) const { - for(size_t i=0; i<_node->_dependents.size(); ++i) { - visitor(TaskView(_node->_dependents[i])); + for(size_t i=0; i<_node._dependents.size(); ++i) { + visitor(TaskView(*_node._dependents[i])); } } @@ -732,10 +747,9 @@ void TaskView::for_each_dependent(V&& visitor) const { namespace std { /** -@class hash +@struct hash @brief hash specialization for std::hash - */ template <> struct hash { @@ -744,6 +758,18 @@ struct hash { } }; +/** +@struct hash + +@brief hash specialization for std::hash +*/ +template <> +struct hash { + auto operator() (const tf::TaskView& task_view) const noexcept { + return task_view.hash_value(); + } +}; + } // end of namespace std ---------------------------------------------------- diff --git a/bundled/taskflow-3.6.0/include/core/taskflow.hpp b/bundled/taskflow-3.6.0/include/core/taskflow.hpp new file mode 100644 index 0000000000..ff836f5fbd --- /dev/null +++ b/bundled/taskflow-3.6.0/include/core/taskflow.hpp @@ -0,0 +1,619 @@ +#pragma once + +#include "flow_builder.hpp" + +/** +@file taskflow/core/taskflow.hpp +@brief taskflow include file +*/ + +namespace tf { + +// ---------------------------------------------------------------------------- + +/** +@class Taskflow + +@brief class to create a taskflow object + +A %taskflow manages a task dependency graph where each task represents a +callable object (e.g., @std_lambda, @std_function) and an edge represents a +dependency between two tasks. A task is one of the following types: + + 1. static task : the callable constructible from + @c std::function + 2. dynamic task : the callable constructible from + @c std::function + 3. condition task : the callable constructible from + @c std::function + 4. multi-condition task: the callable constructible from + @c %std::function()> + 5. module task : the task constructed from tf::Taskflow::composed_of + @c std::function + +Each task is a basic computation unit and is run by one worker thread +from an executor. +The following example creates a simple taskflow graph of four static tasks, +@c A, @c B, @c C, and @c D, where +@c A runs before @c B and @c C and +@c D runs after @c B and @c C. + +@code{.cpp} +tf::Executor executor; +tf::Taskflow taskflow("simple"); + +tf::Task A = taskflow.emplace([](){ std::cout << "TaskA\n"; }); +tf::Task B = taskflow.emplace([](){ std::cout << "TaskB\n"; }); +tf::Task C = taskflow.emplace([](){ std::cout << "TaskC\n"; }); +tf::Task D = taskflow.emplace([](){ std::cout << "TaskD\n"; }); + +A.precede(B, C); // A runs before B and C +D.succeed(B, C); // D runs after B and C + +executor.run(taskflow).wait(); +@endcode + +The taskflow object itself is NOT thread-safe. You should not +modifying the graph while it is running, +such as adding new tasks, adding new dependencies, and moving +the taskflow to another. +To minimize the overhead of task creation, +our runtime leverages a global object pool to recycle +tasks in a thread-safe manner. + +Please refer to @ref Cookbook to learn more about each task type +and how to submit a taskflow to an executor. +*/ +class Taskflow : public FlowBuilder { + + friend class Topology; + friend class Executor; + friend class FlowBuilder; + + struct Dumper { + size_t id; + std::stack> stack; + std::unordered_map visited; + }; + + public: + + /** + @brief constructs a taskflow with the given name + + @code{.cpp} + tf::Taskflow taskflow("My Taskflow"); + std::cout << taskflow.name(); // "My Taskflow" + @endcode + */ + Taskflow(const std::string& name); + + /** + @brief constructs a taskflow + */ + Taskflow(); + + /** + @brief constructs a taskflow from a moved taskflow + + Constructing a taskflow @c taskflow1 from a moved taskflow @c taskflow2 will + migrate the graph of @c taskflow2 to @c taskflow1. + After the move, @c taskflow2 will become empty. + + @code{.cpp} + tf::Taskflow taskflow1(std::move(taskflow2)); + assert(taskflow2.empty()); + @endcode + + Notice that @c taskflow2 should not be running in an executor + during the move operation, or the behavior is undefined. + */ + Taskflow(Taskflow&& rhs); + + /** + @brief move assignment operator + + Moving a taskflow @c taskflow2 to another taskflow @c taskflow1 will destroy + the existing graph of @c taskflow1 and assign it the graph of @c taskflow2. + After the move, @c taskflow2 will become empty. + + @code{.cpp} + taskflow1 = std::move(taskflow2); + assert(taskflow2.empty()); + @endcode + + Notice that both @c taskflow1 and @c taskflow2 should not be running + in an executor during the move operation, or the behavior is undefined. + */ + Taskflow& operator = (Taskflow&& rhs); + + /** + @brief default destructor + + When the destructor is called, all tasks and their associated data + (e.g., captured data) will be destroyed. + It is your responsibility to ensure all submitted execution of this + taskflow have completed before destroying it. + For instance, the following code results in undefined behavior + since the executor may still be running the taskflow while + it is destroyed after the block. + + @code{.cpp} + { + tf::Taskflow taskflow; + executor.run(taskflow); + } + @endcode + + To fix the problem, we must wait for the execution to complete + before destroying the taskflow. + + @code{.cpp} + { + tf::Taskflow taskflow; + executor.run(taskflow).wait(); + } + @endcode + */ + ~Taskflow() = default; + + /** + @brief dumps the taskflow to a DOT format through a std::ostream target + + @code{.cpp} + taskflow.dump(std::cout); // dump the graph to the standard output + + std::ofstream ofs("output.dot"); + taskflow.dump(ofs); // dump the graph to the file output.dot + @endcode + + For dynamically spawned tasks, such as module tasks, subflow tasks, + and GPU tasks, you need to run the taskflow first before you can + dump the entire graph. + + @code{.cpp} + tf::Task parent = taskflow.emplace([](tf::Subflow sf){ + sf.emplace([](){ std::cout << "child\n"; }); + }); + taskflow.dump(std::cout); // this dumps only the parent tasks + executor.run(taskflow).wait(); + taskflow.dump(std::cout); // this dumps both parent and child tasks + @endcode + */ + void dump(std::ostream& ostream) const; + + /** + @brief dumps the taskflow to a std::string of DOT format + + This method is similar to tf::Taskflow::dump(std::ostream& ostream), + but returning a string of the graph in DOT format. + */ + std::string dump() const; + + /** + @brief queries the number of tasks + */ + size_t num_tasks() const; + + /** + @brief queries the emptiness of the taskflow + + An empty taskflow has no tasks. That is the return of + tf::Taskflow::num_tasks is zero. + */ + bool empty() const; + + /** + @brief assigns a name to the taskflow + + @code{.cpp} + taskflow.name("assign another name"); + @endcode + */ + void name(const std::string&); + + /** + @brief queries the name of the taskflow + + @code{.cpp} + std::cout << "my name is: " << taskflow.name(); + @endcode + */ + const std::string& name() const; + + /** + @brief clears the associated task dependency graph + + When you clear a taskflow, all tasks and their associated data + (e.g., captured data in task callables) will be destroyed. + The behavior of clearing a running taskflow is undefined. + */ + void clear(); + + /** + @brief applies a visitor to each task in the taskflow + + A visitor is a callable that takes an argument of type tf::Task + and returns nothing. The following example iterates each task in a + taskflow and prints its name: + + @code{.cpp} + taskflow.for_each_task([](tf::Task task){ + std::cout << task.name() << '\n'; + }); + @endcode + */ + template + void for_each_task(V&& visitor) const; + + /** + @brief returns a reference to the underlying graph object + + A graph object (of type tf::Graph) is the ultimate storage for the + task dependency graph and should only be used as an opaque + data structure to interact with the executor (e.g., composition). + */ + Graph& graph(); + + private: + + mutable std::mutex _mutex; + + std::string _name; + + Graph _graph; + + std::queue> _topologies; + + std::optional::iterator> _satellite; + + void _dump(std::ostream&, const Graph*) const; + void _dump(std::ostream&, const Node*, Dumper&) const; + void _dump(std::ostream&, const Graph*, Dumper&) const; +}; + +// Constructor +inline Taskflow::Taskflow(const std::string& name) : + FlowBuilder {_graph}, + _name {name} { +} + +// Constructor +inline Taskflow::Taskflow() : FlowBuilder{_graph} { +} + +// Move constructor +inline Taskflow::Taskflow(Taskflow&& rhs) : FlowBuilder{_graph} { + + std::scoped_lock lock(rhs._mutex); + + _name = std::move(rhs._name); + _graph = std::move(rhs._graph); + _topologies = std::move(rhs._topologies); + _satellite = rhs._satellite; + + rhs._satellite.reset(); +} + +// Move assignment +inline Taskflow& Taskflow::operator = (Taskflow&& rhs) { + if(this != &rhs) { + std::scoped_lock lock(_mutex, rhs._mutex); + _name = std::move(rhs._name); + _graph = std::move(rhs._graph); + _topologies = std::move(rhs._topologies); + _satellite = rhs._satellite; + rhs._satellite.reset(); + } + return *this; +} + +// Procedure: +inline void Taskflow::clear() { + _graph._clear(); +} + +// Function: num_tasks +inline size_t Taskflow::num_tasks() const { + return _graph.size(); +} + +// Function: empty +inline bool Taskflow::empty() const { + return _graph.empty(); +} + +// Function: name +inline void Taskflow::name(const std::string &name) { + _name = name; +} + +// Function: name +inline const std::string& Taskflow::name() const { + return _name; +} + +// Function: graph +inline Graph& Taskflow::graph() { + return _graph; +} + +// Function: for_each_task +template +void Taskflow::for_each_task(V&& visitor) const { + for(size_t i=0; i<_graph._nodes.size(); ++i) { + visitor(Task(_graph._nodes[i])); + } +} + +// Procedure: dump +inline std::string Taskflow::dump() const { + std::ostringstream oss; + dump(oss); + return oss.str(); +} + +// Function: dump +inline void Taskflow::dump(std::ostream& os) const { + os << "digraph Taskflow {\n"; + _dump(os, &_graph); + os << "}\n"; +} + +// Procedure: _dump +inline void Taskflow::_dump(std::ostream& os, const Graph* top) const { + + Dumper dumper; + + dumper.id = 0; + dumper.stack.push({nullptr, top}); + dumper.visited[top] = dumper.id++; + + while(!dumper.stack.empty()) { + + auto [p, f] = dumper.stack.top(); + dumper.stack.pop(); + + os << "subgraph cluster_p" << f << " {\nlabel=\""; + + // n-level module + if(p) { + os << 'm' << dumper.visited[f]; + } + // top-level taskflow graph + else { + os << "Taskflow: "; + if(_name.empty()) os << 'p' << this; + else os << _name; + } + + os << "\";\n"; + + _dump(os, f, dumper); + os << "}\n"; + } +} + +// Procedure: _dump +inline void Taskflow::_dump( + std::ostream& os, const Node* node, Dumper& dumper +) const { + + os << 'p' << node << "[label=\""; + if(node->_name.empty()) os << 'p' << node; + else os << node->_name; + os << "\" "; + + // shape for node + switch(node->_handle.index()) { + + case Node::CONDITION: + case Node::MULTI_CONDITION: + os << "shape=diamond color=black fillcolor=aquamarine style=filled"; + break; + + default: + break; + } + + os << "];\n"; + + for(size_t s=0; s_successors.size(); ++s) { + if(node->_is_conditioner()) { + // case edge is dashed + os << 'p' << node << " -> p" << node->_successors[s] + << " [style=dashed label=\"" << s << "\"];\n"; + } else { + os << 'p' << node << " -> p" << node->_successors[s] << ";\n"; + } + } + + // subflow join node + if(node->_parent && node->_parent->_handle.index() == Node::DYNAMIC && + node->_successors.size() == 0 + ) { + os << 'p' << node << " -> p" << node->_parent << ";\n"; + } + + // node info + switch(node->_handle.index()) { + + case Node::DYNAMIC: { + auto& sbg = std::get_if(&node->_handle)->subgraph; + if(!sbg.empty()) { + os << "subgraph cluster_p" << node << " {\nlabel=\"Subflow: "; + if(node->_name.empty()) os << 'p' << node; + else os << node->_name; + + os << "\";\n" << "color=blue\n"; + _dump(os, &sbg, dumper); + os << "}\n"; + } + } + break; + + default: + break; + } +} + +// Procedure: _dump +inline void Taskflow::_dump( + std::ostream& os, const Graph* graph, Dumper& dumper +) const { + + for(const auto& n : graph->_nodes) { + + // regular task + if(n->_handle.index() != Node::MODULE) { + _dump(os, n, dumper); + } + // module task + else { + //auto module = &(std::get_if(&n->_handle)->module); + auto module = &(std::get_if(&n->_handle)->graph); + + os << 'p' << n << "[shape=box3d, color=blue, label=\""; + if(n->_name.empty()) os << 'p' << n; + else os << n->_name; + + if(dumper.visited.find(module) == dumper.visited.end()) { + dumper.visited[module] = dumper.id++; + dumper.stack.push({n, module}); + } + + os << " [m" << dumper.visited[module] << "]\"];\n"; + + for(const auto s : n->_successors) { + os << 'p' << n << "->" << 'p' << s << ";\n"; + } + } + } +} + +// ---------------------------------------------------------------------------- +// class definition: Future +// ---------------------------------------------------------------------------- + +/** +@class Future + +@brief class to access the result of an execution + +tf::Future is a derived class from std::future that will eventually hold the +execution result of a submitted taskflow (tf::Executor::run) +or an asynchronous task (tf::Executor::async, tf::Executor::silent_async). +In addition to the base methods inherited from std::future, +you can call tf::Future::cancel to cancel the execution of the running taskflow +associated with this future object. +The following example cancels a submission of a taskflow that contains +1000 tasks each running one second. + +@code{.cpp} +tf::Executor executor; +tf::Taskflow taskflow; + +for(int i=0; i<1000; i++) { + taskflow.emplace([](){ + std::this_thread::sleep_for(std::chrono::seconds(1)); + }); +} + +// submit the taskflow +tf::Future fu = executor.run(taskflow); + +// request to cancel the submitted execution above +fu.cancel(); + +// wait until the cancellation finishes +fu.get(); +@endcode +*/ +template +class Future : public std::future { + + friend class Executor; + friend class Subflow; + friend class Runtime; + + using handle_t = std::variant< + std::monostate, std::weak_ptr + >; + + public: + + /** + @brief default constructor + */ + Future() = default; + + /** + @brief disabled copy constructor + */ + Future(const Future&) = delete; + + /** + @brief default move constructor + */ + Future(Future&&) = default; + + /** + @brief disabled copy assignment + */ + Future& operator = (const Future&) = delete; + + /** + @brief default move assignment + */ + Future& operator = (Future&&) = default; + + /** + @brief cancels the execution of the running taskflow associated with + this future object + + @return @c true if the execution can be cancelled or + @c false if the execution has already completed + + When you request a cancellation, the executor will stop scheduling + any tasks onwards. Tasks that are already running will continue to finish + (non-preemptive). + You can call tf::Future::wait to wait for the cancellation to complete. + */ + bool cancel(); + + private: + + handle_t _handle; + + template + Future(std::future&&, P&&); +}; + +template +template +Future::Future(std::future&& fu, P&& p) : + std::future {std::move(fu)}, + _handle {std::forward

(p)} { +} + +// Function: cancel +template +bool Future::cancel() { + return std::visit([](auto&& arg){ + using P = std::decay_t; + if constexpr(std::is_same_v) { + return false; + } + else { + auto ptr = arg.lock(); + if(ptr) { + ptr->_is_cancelled.store(true, std::memory_order_relaxed); + return true; + } + return false; + } + }, _handle); +} + + +} // end of namespace tf. --------------------------------------------------- diff --git a/bundled/taskflow-3.6.0/include/core/topology.hpp b/bundled/taskflow-3.6.0/include/core/topology.hpp new file mode 100644 index 0000000000..b4d9eab2e0 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/core/topology.hpp @@ -0,0 +1,56 @@ +#pragma once + +namespace tf { + +// ---------------------------------------------------------------------------- + +// class: TopologyBase +class TopologyBase { + + friend class Executor; + friend class Node; + + template + friend class Future; + + protected: + + std::atomic _is_cancelled { false }; +}; + +// ---------------------------------------------------------------------------- + +// class: Topology +class Topology : public TopologyBase { + + friend class Executor; + friend class Runtime; + + public: + + template + Topology(Taskflow&, P&&, C&&); + + private: + + Taskflow& _taskflow; + + std::promise _promise; + + SmallVector _sources; + + std::function _pred; + std::function _call; + + std::atomic _join_counter {0}; +}; + +// Constructor +template +Topology::Topology(Taskflow& tf, P&& p, C&& c): + _taskflow(tf), + _pred {std::forward

(p)}, + _call {std::forward(c)} { +} + +} // end of namespace tf. ---------------------------------------------------- diff --git a/bundled/taskflow-3.6.0/include/core/tsq.hpp b/bundled/taskflow-3.6.0/include/core/tsq.hpp new file mode 100644 index 0000000000..e4ea76c289 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/core/tsq.hpp @@ -0,0 +1,441 @@ +#pragma once + +#include "../utility/macros.hpp" +#include "../utility/traits.hpp" + +/** +@file tsq.hpp +@brief task queue include file +*/ + +namespace tf { + + +// ---------------------------------------------------------------------------- +// Task Types +// ---------------------------------------------------------------------------- + +/** +@enum TaskPriority + +@brief enumeration of all task priority values + +A priority is an enumerated value of type @c unsigned. +Currently, %Taskflow defines three priority levels, +@c HIGH, @c NORMAL, and @c LOW, starting from 0, 1, to 2. +That is, the lower the value, the higher the priority. + +*/ +enum class TaskPriority : unsigned { + /** @brief value of the highest priority (i.e., 0) */ + HIGH = 0, + /** @brief value of the normal priority (i.e., 1) */ + NORMAL = 1, + /** @brief value of the lowest priority (i.e., 2) */ + LOW = 2, + /** @brief conventional value for iterating priority values */ + MAX = 3 +}; + + + +// ---------------------------------------------------------------------------- +// Task Queue +// ---------------------------------------------------------------------------- + + +/** +@class: TaskQueue + +@tparam T data type (must be a pointer type) +@tparam TF_MAX_PRIORITY maximum level of the priority + +@brief class to create a lock-free unbounded single-producer multiple-consumer queue + +This class implements the work-stealing queue described in the paper, +Correct and Efficient Work-Stealing for Weak Memory Models, +and extends it to include priority. + +Only the queue owner can perform pop and push operations, +while others can steal data from the queue simultaneously. +Priority starts from zero (highest priority) to the template value +`TF_MAX_PRIORITY-1` (lowest priority). +All operations are associated with priority values to indicate +the corresponding queues to which an operation is applied. + +The default template value, `TF_MAX_PRIORITY`, is `TaskPriority::MAX` +which applies only three priority levels to the task queue. + +@code{.cpp} +auto [A, B, C, D, E] = taskflow.emplace( + [] () { }, + [&] () { + std::cout << "Task B: " << counter++ << '\n'; // 0 + }, + [&] () { + std::cout << "Task C: " << counter++ << '\n'; // 2 + }, + [&] () { + std::cout << "Task D: " << counter++ << '\n'; // 1 + }, + [] () { } +); + +A.precede(B, C, D); +E.succeed(B, C, D); + +B.priority(tf::TaskPriority::HIGH); +C.priority(tf::TaskPriority::LOW); +D.priority(tf::TaskPriority::NORMAL); + +executor.run(taskflow).wait(); +@endcode + +In the above example, we have a task graph of five tasks, +@c A, @c B, @c C, @c D, and @c E, in which @c B, @c C, and @c D +can run in simultaneously when @c A finishes. +Since we only uses one worker thread in the executor, +we can deterministically run @c B first, then @c D, and @c C +in order of their priority values. +The output is as follows: + +@code{.shell-session} +Task B: 0 +Task D: 1 +Task C: 2 +@endcode + +*/ +template (TaskPriority::MAX)> +class TaskQueue { + + static_assert(TF_MAX_PRIORITY > 0, "TF_MAX_PRIORITY must be at least one"); + static_assert(std::is_pointer_v, "T must be a pointer type"); + + struct Array { + + int64_t C; + int64_t M; + std::atomic* S; + + explicit Array(int64_t c) : + C {c}, + M {c-1}, + S {new std::atomic[static_cast(C)]} { + } + + ~Array() { + delete [] S; + } + + int64_t capacity() const noexcept { + return C; + } + + void push(int64_t i, T o) noexcept { + S[i & M].store(o, std::memory_order_relaxed); + } + + T pop(int64_t i) noexcept { + return S[i & M].load(std::memory_order_relaxed); + } + + Array* resize(int64_t b, int64_t t) { + Array* ptr = new Array {2*C}; + for(int64_t i=t; i!=b; ++i) { + ptr->push(i, pop(i)); + } + return ptr; + } + + }; + + // Doubling the alignment by 2 seems to generate the most + // decent performance. + CachelineAligned> _top[TF_MAX_PRIORITY]; + CachelineAligned> _bottom[TF_MAX_PRIORITY]; + std::atomic _array[TF_MAX_PRIORITY]; + std::vector _garbage[TF_MAX_PRIORITY]; + + //std::atomic _cache {nullptr}; + + public: + + /** + @brief constructs the queue with a given capacity + + @param capacity the capacity of the queue (must be power of 2) + */ + explicit TaskQueue(int64_t capacity = 512); + + /** + @brief destructs the queue + */ + ~TaskQueue(); + + /** + @brief queries if the queue is empty at the time of this call + */ + bool empty() const noexcept; + + /** + @brief queries if the queue is empty at a specific priority value + */ + bool empty(unsigned priority) const noexcept; + + /** + @brief queries the number of items at the time of this call + */ + size_t size() const noexcept; + + /** + @brief queries the number of items with the given priority + at the time of this call + */ + size_t size(unsigned priority) const noexcept; + + /** + @brief queries the capacity of the queue + */ + int64_t capacity() const noexcept; + + /** + @brief queries the capacity of the queue at a specific priority value + */ + int64_t capacity(unsigned priority) const noexcept; + + /** + @brief inserts an item to the queue + + @param item the item to push to the queue + @param priority priority value of the item to push (default = 0) + + Only the owner thread can insert an item to the queue. + The operation can trigger the queue to resize its capacity + if more space is required. + */ + TF_FORCE_INLINE void push(T item, unsigned priority); + + /** + @brief pops out an item from the queue + + Only the owner thread can pop out an item from the queue. + The return can be a @c nullptr if this operation failed (empty queue). + */ + T pop(); + + /** + @brief pops out an item with a specific priority value from the queue + + @param priority priority of the item to pop + + Only the owner thread can pop out an item from the queue. + The return can be a @c nullptr if this operation failed (empty queue). + */ + TF_FORCE_INLINE T pop(unsigned priority); + + /** + @brief steals an item from the queue + + Any threads can try to steal an item from the queue. + The return can be a @c nullptr if this operation failed (not necessary empty). + */ + T steal(); + + /** + @brief steals an item with a specific priority value from the queue + + @param priority priority of the item to steal + + Any threads can try to steal an item from the queue. + The return can be a @c nullptr if this operation failed (not necessary empty). + */ + T steal(unsigned priority); + + private: + TF_NO_INLINE Array* resize_array(Array* a, unsigned p, std::int64_t b, std::int64_t t); +}; + +// Constructor +template +TaskQueue::TaskQueue(int64_t c) { + assert(c && (!(c & (c-1)))); + unroll<0, TF_MAX_PRIORITY, 1>([&](auto p){ + _top[p].data.store(0, std::memory_order_relaxed); + _bottom[p].data.store(0, std::memory_order_relaxed); + _array[p].store(new Array{c}, std::memory_order_relaxed); + _garbage[p].reserve(32); + }); +} + +// Destructor +template +TaskQueue::~TaskQueue() { + unroll<0, TF_MAX_PRIORITY, 1>([&](auto p){ + for(auto a : _garbage[p]) { + delete a; + } + delete _array[p].load(); + }); +} + +// Function: empty +template +bool TaskQueue::empty() const noexcept { + for(unsigned i=0; i +bool TaskQueue::empty(unsigned p) const noexcept { + int64_t b = _bottom[p].data.load(std::memory_order_relaxed); + int64_t t = _top[p].data.load(std::memory_order_relaxed); + return (b <= t); +} + +// Function: size +template +size_t TaskQueue::size() const noexcept { + size_t s; + unroll<0, TF_MAX_PRIORITY, 1>([&](auto i) { s = i ? size(i) + s : size(i); }); + return s; +} + +// Function: size +template +size_t TaskQueue::size(unsigned p) const noexcept { + int64_t b = _bottom[p].data.load(std::memory_order_relaxed); + int64_t t = _top[p].data.load(std::memory_order_relaxed); + return static_cast(b >= t ? b - t : 0); +} + +// Function: push +template +TF_FORCE_INLINE void TaskQueue::push(T o, unsigned p) { + + int64_t b = _bottom[p].data.load(std::memory_order_relaxed); + int64_t t = _top[p].data.load(std::memory_order_acquire); + Array* a = _array[p].load(std::memory_order_relaxed); + + // queue is full + if(a->capacity() - 1 < (b - t)) { + a = resize_array(a, p, b, t); + } + + a->push(b, o); + std::atomic_thread_fence(std::memory_order_release); + _bottom[p].data.store(b + 1, std::memory_order_relaxed); +} + +// Function: pop +template +T TaskQueue::pop() { + for(unsigned i=0; i +TF_FORCE_INLINE T TaskQueue::pop(unsigned p) { + + int64_t b = _bottom[p].data.load(std::memory_order_relaxed) - 1; + Array* a = _array[p].load(std::memory_order_relaxed); + _bottom[p].data.store(b, std::memory_order_relaxed); + std::atomic_thread_fence(std::memory_order_seq_cst); + int64_t t = _top[p].data.load(std::memory_order_relaxed); + + T item {nullptr}; + + if(t <= b) { + item = a->pop(b); + if(t == b) { + // the last item just got stolen + if(!_top[p].data.compare_exchange_strong(t, t+1, + std::memory_order_seq_cst, + std::memory_order_relaxed)) { + item = nullptr; + } + _bottom[p].data.store(b + 1, std::memory_order_relaxed); + } + } + else { + _bottom[p].data.store(b + 1, std::memory_order_relaxed); + } + + return item; +} + +// Function: steal +template +T TaskQueue::steal() { + for(unsigned i=0; i +T TaskQueue::steal(unsigned p) { + + int64_t t = _top[p].data.load(std::memory_order_acquire); + std::atomic_thread_fence(std::memory_order_seq_cst); + int64_t b = _bottom[p].data.load(std::memory_order_acquire); + + T item {nullptr}; + + if(t < b) { + Array* a = _array[p].load(std::memory_order_consume); + item = a->pop(t); + if(!_top[p].data.compare_exchange_strong(t, t+1, + std::memory_order_seq_cst, + std::memory_order_relaxed)) { + return nullptr; + } + } + + return item; +} + +// Function: capacity +template +int64_t TaskQueue::capacity() const noexcept { + size_t s; + unroll<0, TF_MAX_PRIORITY, 1>([&](auto i) { + s = i ? capacity(i) + s : capacity(i); + }); + return s; +} + +// Function: capacity +template +int64_t TaskQueue::capacity(unsigned p) const noexcept { + return _array[p].load(std::memory_order_relaxed)->capacity(); +} + +template +TF_NO_INLINE typename TaskQueue::Array* + TaskQueue::resize_array(Array* a, unsigned p, std::int64_t b, std::int64_t t) { + + Array* tmp = a->resize(b, t); + _garbage[p].push_back(a); + std::swap(a, tmp); + _array[p].store(a, std::memory_order_release); + // Note: the original paper using relaxed causes t-san to complain + //_array.store(a, std::memory_order_relaxed); + return a; +} + + +} // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-3.6.0/include/core/worker.hpp b/bundled/taskflow-3.6.0/include/core/worker.hpp new file mode 100644 index 0000000000..47fcf819d6 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/core/worker.hpp @@ -0,0 +1,260 @@ +#pragma once + +#include "declarations.hpp" +#include "tsq.hpp" +#include "notifier.hpp" + +/** +@file worker.hpp +@brief worker include file +*/ + +namespace tf { + +// ---------------------------------------------------------------------------- +// Class Definition: Worker +// ---------------------------------------------------------------------------- + +/** +@class Worker + +@brief class to create a worker in an executor + +The class is primarily used by the executor to perform work-stealing algorithm. +Users can access a worker object and alter its property +(e.g., changing the thread affinity in a POSIX-like system) +using tf::WorkerInterface. +*/ +class Worker { + + friend class Executor; + friend class WorkerView; + + public: + + /** + @brief queries the worker id associated with its parent executor + + A worker id is a unsigned integer in the range [0, N), + where @c N is the number of workers spawned at the construction + time of the executor. + */ + inline size_t id() const { return _id; } + + /** + @brief acquires a pointer access to the underlying thread + */ + inline std::thread* thread() const { return _thread; } + + /** + @brief queries the size of the queue (i.e., number of enqueued tasks to + run) associated with the worker + */ + inline size_t queue_size() const { return _wsq.size(); } + + /** + @brief queries the current capacity of the queue + */ + inline size_t queue_capacity() const { return static_cast(_wsq.capacity()); } + + private: + + size_t _id; + size_t _vtm; + Executor* _executor; + std::thread* _thread; + Notifier::Waiter* _waiter; + std::default_random_engine _rdgen { std::random_device{}() }; + TaskQueue _wsq; + Node* _cache; +}; + +// ---------------------------------------------------------------------------- +// Class Definition: PerThreadWorker +// ---------------------------------------------------------------------------- + +/** +@private +*/ +//struct PerThreadWorker { +// +// Worker* worker; +// +// PerThreadWorker() : worker {nullptr} {} +// +// PerThreadWorker(const PerThreadWorker&) = delete; +// PerThreadWorker(PerThreadWorker&&) = delete; +// +// PerThreadWorker& operator = (const PerThreadWorker&) = delete; +// PerThreadWorker& operator = (PerThreadWorker&&) = delete; +//}; + +/** +@private +*/ +//inline PerThreadWorker& this_worker() { +// thread_local PerThreadWorker worker; +// return worker; +//} + + +// ---------------------------------------------------------------------------- +// Class Definition: WorkerView +// ---------------------------------------------------------------------------- + +/** +@class WorkerView + +@brief class to create an immutable view of a worker in an executor + +An executor keeps a set of internal worker threads to run tasks. +A worker view provides users an immutable interface to observe +when a worker runs a task, and the view object is only accessible +from an observer derived from tf::ObserverInterface. +*/ +class WorkerView { + + friend class Executor; + + public: + + /** + @brief queries the worker id associated with its parent executor + + A worker id is a unsigned integer in the range [0, N), + where @c N is the number of workers spawned at the construction + time of the executor. + */ + size_t id() const; + + /** + @brief queries the size of the queue (i.e., number of pending tasks to + run) associated with the worker + */ + size_t queue_size() const; + + /** + @brief queries the current capacity of the queue + */ + size_t queue_capacity() const; + + private: + + WorkerView(const Worker&); + WorkerView(const WorkerView&) = default; + + const Worker& _worker; + +}; + +// Constructor +inline WorkerView::WorkerView(const Worker& w) : _worker{w} { +} + +// function: id +inline size_t WorkerView::id() const { + return _worker._id; +} + +// Function: queue_size +inline size_t WorkerView::queue_size() const { + return _worker._wsq.size(); +} + +// Function: queue_capacity +inline size_t WorkerView::queue_capacity() const { + return static_cast(_worker._wsq.capacity()); +} + + +// ---------------------------------------------------------------------------- +// Class Definition: WorkerInterface +// ---------------------------------------------------------------------------- + +/** +@class WorkerInterface + +@brief class to configure worker behavior in an executor + +The tf::WorkerInterface class lets users interact with the executor +to customize the worker behavior, +such as calling custom methods before and after a worker enters and leaves +the loop. +When you create an executor, it spawns a set of workers to run tasks. +The interaction between the executor and its spawned workers looks like +the following: + +for(size_t n=0; n +std::shared_ptr make_worker_interface(ArgsT&&... args) { + static_assert( + std::is_base_of_v, + "T must be derived from WorkerInterface" + ); + return std::make_shared(std::forward(args)...); +} + +} // end of namespact tf ----------------------------------------------------- + + diff --git a/bundled/taskflow-3.6.0/include/cuda/algorithm/find.hpp b/bundled/taskflow-3.6.0/include/cuda/algorithm/find.hpp new file mode 100644 index 0000000000..9d5b10fd55 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/algorithm/find.hpp @@ -0,0 +1,295 @@ +#pragma once + +#include "for_each.hpp" +#include "reduce.hpp" + +/** +@file taskflow/cuda/algorithm/find.hpp +@brief cuda find algorithms include file +*/ + +namespace tf::detail { + +/** @private */ +template +struct cudaFindPair { + + T key; + unsigned index; + + __device__ operator unsigned () const { return index; } +}; + +/** @private */ +template +void cuda_find_if_loop(P&& p, I input, unsigned count, unsigned* idx, U pred) { + + if(count == 0) { + cuda_single_task(p, [=] __device__ () { *idx = 0; }); + return; + } + + using E = std::decay_t

; + + auto B = (count + E::nv - 1) / E::nv; + + // set the index to the maximum + cuda_single_task(p, [=] __device__ () { *idx = count; }); +ls + + // launch the kernel to atomic-find the minimum + cuda_kernel<<>>([=] __device__ (auto tid, auto bid) { + + __shared__ unsigned shm_id; + + if(!tid) { + shm_id = count; + } + + __syncthreads(); + + auto tile = cuda_get_tile(bid, E::nv, count); + + auto x = cuda_mem_to_reg_strided( + input + tile.begin, tid, tile.count() + ); + + auto id = count; + + for(unsigned i=0; i::Storage shm; + + //id = cudaBlockReduce()( + // tid, + // id, + // shm, + // (tile.count() < E::nt ? tile.count() : E::nt), + // cuda_minimum{}, + // false + //); + + // only need the minimum id + atomicMin(&shm_id, id); + __syncthreads(); + + // reduce all to the global memory + if(!tid) { + atomicMin(idx, shm_id); + //atomicMin(idx, id); + } + }); +} + +/** @private */ +template +void cuda_min_element_loop( + P&& p, I input, unsigned count, unsigned* idx, O op, void* ptr +) { + + if(count == 0) { + cuda_single_task(p, [=] __device__ () { *idx = 0; }); + return; + } + + using T = cudaFindPair::value_type>; + + cuda_uninitialized_reduce_loop(p, + cuda_make_load_iterator([=]__device__(auto i){ + return T{*(input+i), i}; + }), + count, + idx, + [=] __device__ (const auto& a, const auto& b) { + return op(a.key, b.key) ? a : b; + }, + ptr + ); +} + +/** @private */ +template +void cuda_max_element_loop( + P&& p, I input, unsigned count, unsigned* idx, O op, void* ptr +) { + + if(count == 0) { + cuda_single_task(p, [=] __device__ () { *idx = 0; }); + return; + } + + using T = cudaFindPair::value_type>; + + cuda_uninitialized_reduce_loop(p, + cuda_make_load_iterator([=]__device__(auto i){ + return T{*(input+i), i}; + }), + count, + idx, + [=] __device__ (const auto& a, const auto& b) { + return op(a.key, b.key) ? b : a; + }, + ptr + ); +} + +} // end of namespace tf::detail --------------------------------------------- + +namespace tf { + + +// ---------------------------------------------------------------------------- +// cuda_find_if +// ---------------------------------------------------------------------------- + +/** +@brief finds the index of the first element that satisfies the given criteria + +@tparam P execution policy type +@tparam I input iterator type +@tparam U unary operator type + +@param p execution policy +@param first iterator to the beginning of the range +@param last iterator to the end of the range +@param idx pointer to the index of the found element +@param op unary operator which returns @c true for the required element + +The function launches kernels asynchronously to find the index @c idx of the +first element in the range [first, last) +such that op(*(first+idx)) is true. +This is equivalent to the parallel execution of the following loop: + +@code{.cpp} +unsigned idx = 0; +for(; first != last; ++first, ++idx) { + if (p(*first)) { + return idx; + } +} +return idx; +@endcode +*/ +template +void cuda_find_if( + P&& p, I first, I last, unsigned* idx, U op +) { + detail::cuda_find_if_loop(p, first, std::distance(first, last), idx, op); +} + +// ---------------------------------------------------------------------------- +// cuda_min_element +// ---------------------------------------------------------------------------- + +// Function: min-element_bufsz +template +template +unsigned cudaExecutionPolicy::min_element_bufsz(unsigned count) { + return reduce_bufsz>(count); +} + +/** +@brief finds the index of the minimum element in a range + +@tparam P execution policy type +@tparam I input iterator type +@tparam O comparator type + +@param p execution policy object +@param first iterator to the beginning of the range +@param last iterator to the end of the range +@param idx solution index of the minimum element +@param op comparison function object +@param buf pointer to the buffer + +The function launches kernels asynchronously to find +the smallest element in the range [first, last) +using the given comparator @c op. +You need to provide a buffer that holds at least +tf::cuda_min_element_bufsz bytes for internal use. +The function is equivalent to a parallel execution of the following loop: + +@code{.cpp} +if(first == last) { + return 0; +} +auto smallest = first; +for (++first; first != last; ++first) { + if (op(*first, *smallest)) { + smallest = first; + } +} +return std::distance(first, smallest); +@endcode +*/ +template +void cuda_min_element(P&& p, I first, I last, unsigned* idx, O op, void* buf) { + detail::cuda_min_element_loop( + p, first, std::distance(first, last), idx, op, buf + ); +} + +// ---------------------------------------------------------------------------- +// cuda_max_element +// ---------------------------------------------------------------------------- + +// Function: max_element_bufsz +template +template +unsigned cudaExecutionPolicy::max_element_bufsz(unsigned count) { + return reduce_bufsz>(count); +} + +/** +@brief finds the index of the maximum element in a range + +@tparam P execution policy type +@tparam I input iterator type +@tparam O comparator type + +@param p execution policy object +@param first iterator to the beginning of the range +@param last iterator to the end of the range +@param idx solution index of the maximum element +@param op comparison function object +@param buf pointer to the buffer + +The function launches kernels asynchronously to find +the largest element in the range [first, last) +using the given comparator @c op. +You need to provide a buffer that holds at least +tf::cuda_max_element_bufsz bytes for internal use. +The function is equivalent to a parallel execution of the following loop: + +@code{.cpp} +if(first == last) { + return 0; +} +auto largest = first; +for (++first; first != last; ++first) { + if (op(*largest, *first)) { + largest = first; + } +} +return std::distance(first, largest); +@endcode +*/ +template +void cuda_max_element(P&& p, I first, I last, unsigned* idx, O op, void* buf) { + detail::cuda_max_element_loop( + p, first, std::distance(first, last), idx, op, buf + ); +} + + +} // end of namespace tf ----------------------------------------------------- + + diff --git a/bundled/taskflow-3.6.0/include/cuda/algorithm/for_each.hpp b/bundled/taskflow-3.6.0/include/cuda/algorithm/for_each.hpp new file mode 100644 index 0000000000..38a6f85977 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/algorithm/for_each.hpp @@ -0,0 +1,315 @@ +#pragma once + +#include "../cudaflow.hpp" + +/** +@file taskflow/cuda/algorithm/for_each.hpp +@brief cuda parallel-iteration algorithms include file +*/ + +namespace tf { + +namespace detail { + +/** +@private +*/ +template +__global__ void cuda_for_each_kernel(I first, unsigned count, C c) { + auto tid = threadIdx.x; + auto bid = blockIdx.x; + auto tile = cuda_get_tile(bid, nt*vt, count); + cuda_strided_iterate( + [=](auto, auto j) { + c(*(first + tile.begin + j)); + }, + tid, tile.count() + ); +} + +/** @private */ +template +__global__ void cuda_for_each_index_kernel(I first, I inc, unsigned count, C c) { + auto tid = threadIdx.x; + auto bid = blockIdx.x; + auto tile = cuda_get_tile(bid, nt*vt, count); + cuda_strided_iterate( + [=]__device__(auto, auto j) { + c(first + inc*(tile.begin+j)); + }, + tid, tile.count() + ); +} + +} // end of namespace detail ------------------------------------------------- + +// ---------------------------------------------------------------------------- +// cuda standard algorithms: single_task/for_each/for_each_index +// ---------------------------------------------------------------------------- + +/** +@brief runs a callable asynchronously using one kernel thread + +@tparam P execution policy type +@tparam C closure type + +@param p execution policy +@param c closure to run by one kernel thread + +The function launches a single kernel thread to run the given callable +through the stream in the execution policy object. +*/ +template +void cuda_single_task(P&& p, C c) { + cuda_kernel<<<1, 1, 0, p.stream()>>>( + [=]__device__(auto, auto) mutable { c(); } + ); +} + +/** +@brief performs asynchronous parallel iterations over a range of items + +@tparam P execution policy type +@tparam I input iterator type +@tparam C unary operator type + +@param p execution policy object +@param first iterator to the beginning of the range +@param last iterator to the end of the range +@param c unary operator to apply to each dereferenced iterator + +This function is equivalent to a parallel execution of the following loop +on a GPU: + +@code{.cpp} +for(auto itr = first; itr != last; itr++) { + c(*itr); +} +@endcode +*/ +template +void cuda_for_each(P&& p, I first, I last, C c) { + + using E = std::decay_t

; + + unsigned count = std::distance(first, last); + + if(count == 0) { + return; + } + + detail::cuda_for_each_kernel<<>>( + first, count, c + ); +} + +/** +@brief performs asynchronous parallel iterations over + an index-based range of items + +@tparam P execution policy type +@tparam I input index type +@tparam C unary operator type + +@param p execution policy object +@param first index to the beginning of the range +@param last index to the end of the range +@param inc step size between successive iterations +@param c unary operator to apply to each index + +This function is equivalent to a parallel execution of +the following loop on a GPU: + +@code{.cpp} +// step is positive [first, last) +for(auto i=first; ilast; i+=step) { + c(i); +} +@endcode +*/ +template +void cuda_for_each_index(P&& p, I first, I last, I inc, C c) { + + using E = std::decay_t

; + + unsigned count = distance(first, last, inc); + + if(count == 0) { + return; + } + + detail::cuda_for_each_index_kernel<<>>( + first, inc, count, c + ); +} + +// ---------------------------------------------------------------------------- +// single_task +// ---------------------------------------------------------------------------- + +/** @private */ +template +__global__ void cuda_single_task(C callable) { + callable(); +} + +// Function: single_task +template +cudaTask cudaFlow::single_task(C c) { + return kernel(1, 1, 0, cuda_single_task, c); +} + +// Function: single_task +template +void cudaFlow::single_task(cudaTask task, C c) { + return kernel(task, 1, 1, 0, cuda_single_task, c); +} + +// Function: single_task +template +cudaTask cudaFlowCapturer::single_task(C callable) { + return on([=] (cudaStream_t stream) mutable { + cuda_single_task(cudaDefaultExecutionPolicy(stream), callable); + }); +} + +// Function: single_task +template +void cudaFlowCapturer::single_task(cudaTask task, C callable) { + on(task, [=] (cudaStream_t stream) mutable { + cuda_single_task(cudaDefaultExecutionPolicy(stream), callable); + }); +} + +// ---------------------------------------------------------------------------- +// cudaFlow: for_each, for_each_index +// ---------------------------------------------------------------------------- + +// Function: for_each +template +cudaTask cudaFlow::for_each(I first, I last, C c) { + + using E = cudaDefaultExecutionPolicy; + + unsigned count = std::distance(first, last); + + // TODO: + //if(count == 0) { + // return; + //} + + return kernel( + E::num_blocks(count), E::nt, 0, + detail::cuda_for_each_kernel, first, count, c + ); +} + +// Function: for_each +template +void cudaFlow::for_each(cudaTask task, I first, I last, C c) { + + using E = cudaDefaultExecutionPolicy; + + unsigned count = std::distance(first, last); + + // TODO: + //if(count == 0) { + // return; + //} + + kernel(task, + E::num_blocks(count), E::nt, 0, + detail::cuda_for_each_kernel, first, count, c + ); +} + +// Function: for_each_index +template +cudaTask cudaFlow::for_each_index(I first, I last, I inc, C c) { + + using E = cudaDefaultExecutionPolicy; + + unsigned count = distance(first, last, inc); + + // TODO: + //if(count == 0) { + // return; + //} + + return kernel( + E::num_blocks(count), E::nt, 0, + detail::cuda_for_each_index_kernel, first, inc, count, c + ); +} + +// Function: for_each_index +template +void cudaFlow::for_each_index(cudaTask task, I first, I last, I inc, C c) { + + using E = cudaDefaultExecutionPolicy; + + unsigned count = distance(first, last, inc); + + // TODO: + //if(count == 0) { + // return; + //} + + return kernel(task, + E::num_blocks(count), E::nt, 0, + detail::cuda_for_each_index_kernel, first, inc, count, c + ); +} + +// ---------------------------------------------------------------------------- +// cudaFlowCapturer: for_each, for_each_index +// ---------------------------------------------------------------------------- + +// Function: for_each +template +cudaTask cudaFlowCapturer::for_each(I first, I last, C c) { + return on([=](cudaStream_t stream) mutable { + cuda_for_each(cudaDefaultExecutionPolicy(stream), first, last, c); + }); +} + +// Function: for_each_index +template +cudaTask cudaFlowCapturer::for_each_index(I beg, I end, I inc, C c) { + return on([=] (cudaStream_t stream) mutable { + cuda_for_each_index(cudaDefaultExecutionPolicy(stream), beg, end, inc, c); + }); +} + +// Function: for_each +template +void cudaFlowCapturer::for_each(cudaTask task, I first, I last, C c) { + on(task, [=](cudaStream_t stream) mutable { + cuda_for_each(cudaDefaultExecutionPolicy(stream), first, last, c); + }); +} + +// Function: for_each_index +template +void cudaFlowCapturer::for_each_index( + cudaTask task, I beg, I end, I inc, C c +) { + on(task, [=] (cudaStream_t stream) mutable { + cuda_for_each_index(cudaDefaultExecutionPolicy(stream), beg, end, inc, c); + }); +} + + + +} // end of namespace tf ----------------------------------------------------- + + + + + + diff --git a/bundled/taskflow-3.6.0/include/cuda/algorithm/matmul.hpp b/bundled/taskflow-3.6.0/include/cuda/algorithm/matmul.hpp new file mode 100644 index 0000000000..d0f6620a19 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/algorithm/matmul.hpp @@ -0,0 +1,57 @@ +#pragma once + +#include "../cudaflow.hpp" + +namespace tf { + +// ---------------------------------------------------------------------------- +// row-major matrix multiplication +// ---------------------------------------------------------------------------- + +template +__global__ void cuda_matmul( + const T* A, + const T* B, + T* C, + size_t M, + size_t K, + size_t N +) { + __shared__ T A_tile[32][32]; + __shared__ T B_tile[32][32]; + + size_t x = blockIdx.x * blockDim.x + threadIdx.x; + size_t y = blockIdx.y * blockDim.y + threadIdx.y; + + T res = 0; + + for(size_t k = 0; k < K; k += 32) { + if((threadIdx.x + k) < K && y < M) { + A_tile[threadIdx.y][threadIdx.x] = A[y * K + threadIdx.x + k]; + } + else{ + A_tile[threadIdx.y][threadIdx.x] = 0; + } + + if((threadIdx.y + k) < K && x < N) { + B_tile[threadIdx.y][threadIdx.x] = B[(threadIdx.y + k) * N + x]; + } + else{ + B_tile[threadIdx.y][threadIdx.x] = 0; + } + + __syncthreads(); + + for(size_t i = 0; i < 32; ++i) { + res += A_tile[threadIdx.y][i] * B_tile[i][threadIdx.x]; + } + __syncthreads(); + } + + if(x < N && y < M) { + C[y * N + x] = res; + } + +} + +} // end of namespace tf --------------------------------------------------------- diff --git a/bundled/taskflow-3.6.0/include/cuda/algorithm/merge.hpp b/bundled/taskflow-3.6.0/include/cuda/algorithm/merge.hpp new file mode 100644 index 0000000000..d325491c8b --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/algorithm/merge.hpp @@ -0,0 +1,585 @@ +#pragma once + +#include "../cudaflow.hpp" + +/** +@file taskflow/cuda/algorithm/merge.hpp +@brief CUDA merge algorithm include file +*/ + +namespace tf::detail { + +/** +@private +@brief merge bound type +*/ +enum class cudaMergeBoundType { + LOWER, + UPPER +}; + +/** @private */ +template +struct cudaMergePair { + cudaArray keys; + cudaArray indices; +}; + +/** @private */ +struct cudaMergeRange { + unsigned a_begin, a_end, b_begin, b_end; + + __device__ unsigned a_count() const { return a_end - a_begin; } + __device__ unsigned b_count() const { return b_end - b_begin; } + __device__ unsigned total() const { return a_count() + b_count(); } + + __device__ cudaRange a_range() const { + return cudaRange { a_begin, a_end }; + } + __device__ cudaRange b_range() const { + return cudaRange { b_begin, b_end }; + } + + __device__ cudaMergeRange to_local() const { + return cudaMergeRange { 0, a_count(), a_count(), total() }; + } + + // Partition from mp to the end. + __device__ cudaMergeRange partition(unsigned mp0, unsigned diag) const { + return cudaMergeRange { a_begin + mp0, a_end, b_begin + diag - mp0, b_end }; + } + + // Partition from mp0 to mp1. + __device__ cudaMergeRange partition(unsigned mp0, unsigned diag0, + unsigned mp1, unsigned diag1) const { + return cudaMergeRange { + a_begin + mp0, + a_begin + mp1, + b_begin + diag0 - mp0, + b_begin + diag1 - mp1 + }; + } + + __device__ bool a_valid() const { + return a_begin < a_end; + } + + __device__ bool b_valid() const { + return b_begin < b_end; + } +}; + +/** @private */ +template< + cudaMergeBoundType bounds = cudaMergeBoundType::LOWER, + typename a_keys_it, typename b_keys_it, typename comp_t +> +__device__ auto cuda_merge_path( + a_keys_it a_keys, unsigned a_count, + b_keys_it b_keys, unsigned b_count, + unsigned diag, comp_t comp +) { + + unsigned beg = (diag > b_count) ? diag - b_count : 0; + unsigned end = diag < a_count ? diag : a_count; + + while(beg < end) { + auto mid = (beg + end) / 2; + auto a_key = a_keys[mid]; + auto b_key = b_keys[diag - 1 - mid]; + bool pred = (cudaMergeBoundType::UPPER == bounds) ? + comp(a_key, b_key) : + !comp(b_key, a_key); + + if(pred) beg = mid + 1; + else end = mid; + } + return beg; +} + +/** @private */ +template +__device__ auto cuda_merge_path( + keys_it keys, cudaMergeRange range, unsigned diag, comp_t comp +) { + + return cuda_merge_path( + keys + range.a_begin, range.a_count(), + keys + range.b_begin, range.b_count(), + diag, comp); +} + +/** @private */ +template +__device__ bool cuda_merge_predicate( + T a_key, T b_key, cudaMergeRange range, comp_t comp +) { + + bool p; + if(range_check && !range.a_valid()) { + p = false; + } + else if(range_check && !range.b_valid()) { + p = true; + } + else { + p = (cudaMergeBoundType::UPPER == bounds) ? comp(a_key, b_key) : + !comp(b_key, a_key); + } + return p; +} + +/** @private */ +inline __device__ auto cuda_compute_merge_range( + unsigned a_count, unsigned b_count, + unsigned partition, unsigned spacing, + unsigned mp0, unsigned mp1 +) { + + auto diag0 = spacing * partition; + auto diag1 = min(a_count + b_count, diag0 + spacing); + + return cudaMergeRange { mp0, mp1, diag0 - mp0, diag1 - mp1 }; +} + +/** +@private + +Specialization that emits just one LD instruction. Can only reliably used +with raw pointer types. Fixed not to use pointer arithmetic so that +we don't get undefined behaviors with unaligned types. +*/ +template +__device__ auto cuda_load_two_streams_reg( + const T* a, unsigned a_count, const T* b, unsigned b_count, unsigned tid +) { + + b -= a_count; + cudaArray x; + cuda_strided_iterate([&](auto i, auto index) { + const T* p = (index >= a_count) ? b : a; + x[i] = p[index]; + }, tid, a_count + b_count); + + return x; +} + +/** @private */ +template +__device__ +std::enable_if_t< + !(std::is_pointer::value && std::is_pointer::value), + cudaArray +> load_two_streams_reg(a_it a, unsigned a_count, b_it b, unsigned b_count, unsigned tid) { + b -= a_count; + cudaArray x; + cuda_strided_iterate([&](auto i, auto index) { + x[i] = (index < a_count) ? a[index] : b[index]; + }, tid, a_count + b_count); + return x; +} + +/** @private */ +template +__device__ void cuda_load_two_streams_shared(A a, unsigned a_count, + B b, unsigned b_count, unsigned tid, T (&shared)[S], bool sync = true +) { + // Load into register then make an unconditional strided store into memory. + auto x = cuda_load_two_streams_reg(a, a_count, b, b_count, tid); + cuda_reg_to_shared_strided(x, tid, shared, sync); +} + +/** @private */ +template +__device__ auto cuda_gather_two_streams_strided(const T* a, + unsigned a_count, const T* b, unsigned b_count, cudaArray indices, + unsigned tid) { + + ptrdiff_t b_offset = b - a - a_count; + auto count = a_count + b_count; + + cudaArray x; + cuda_strided_iterate([&](auto i, auto j) { + ptrdiff_t gather = indices[i]; + if(gather >= a_count) gather += b_offset; + x[i] = a[gather]; + }, tid, count); + + return x; +} + +/** @private */ +template +__device__ +std::enable_if_t< + !(std::is_pointer::value && std::is_pointer::value), + cudaArray +> cuda_gather_two_streams_strided(a_it a, + unsigned a_count, b_it b, unsigned b_count, cudaArray indices, unsigned tid) { + + b -= a_count; + cudaArray x; + cuda_strided_iterate([&](auto i, auto j) { + x[i] = (indices[i] < a_count) ? a[indices[i]] : b[indices[i]]; + }, tid, a_count + b_count); + + return x; +} + +/** @private */ +template +__device__ void cuda_transfer_two_streams_strided( + a_it a, unsigned a_count, b_it b, unsigned b_count, + cudaArray indices, unsigned tid, c_it c +) { + + using T = typename std::iterator_traits::value_type; + auto x = cuda_gather_two_streams_strided( + a, a_count, b, b_count, indices, tid + ); + + cuda_reg_to_mem_strided(x, tid, a_count + b_count, c); +} + + +/** +@private + +This function must be able to dereference keys[a_begin] and keys[b_begin], +no matter the indices for each. The caller should allocate at least +nt * vt + 1 elements for +*/ +template +__device__ auto cuda_serial_merge( + const T* keys_shared, cudaMergeRange range, comp_t comp, bool sync = true +) { + + auto a_key = keys_shared[range.a_begin]; + auto b_key = keys_shared[range.b_begin]; + + cudaMergePair merge_pair; + cuda_iterate([&](auto i) { + bool p = cuda_merge_predicate(a_key, b_key, range, comp); + auto index = p ? range.a_begin : range.b_begin; + + merge_pair.keys[i] = p ? a_key : b_key; + merge_pair.indices[i] = index; + + T c_key = keys_shared[++index]; + if(p) a_key = c_key, range.a_begin = index; + else b_key = c_key, range.b_begin = index; + }); + + if(sync) __syncthreads(); + return merge_pair; +} + +/** +@private + +Load arrays a and b from global memory and merge unsignedo register. +*/ +template +__device__ auto block_merge_from_mem( + a_it a, b_it b, cudaMergeRange range_mem, unsigned tid, comp_t comp, T (&keys_shared)[S] +) { + + static_assert(S >= nt * vt + 1, + "block_merge_from_mem requires temporary storage of at " + "least nt * vt + 1 items"); + + // Load the data into shared memory. + cuda_load_two_streams_shared( + a + range_mem.a_begin, range_mem.a_count(), + b + range_mem.b_begin, range_mem.b_count(), + tid, keys_shared, true + ); + + // Run a merge path to find the start of the serial merge for each thread. + auto range_local = range_mem.to_local(); + auto diag = vt * tid; + auto mp = cuda_merge_path(keys_shared, range_local, diag, comp); + + // Compute the ranges of the sources in shared memory. The end iterators + // of the range are inaccurate, but still facilitate exact merging, because + // only vt elements will be merged. + auto merged = cuda_serial_merge( + keys_shared, range_local.partition(mp, diag), comp + ); + + return merged; +}; + +/** @private */ +template +void cuda_merge_path_partitions( + P&& p, + a_keys_it a, unsigned a_count, + b_keys_it b, unsigned b_count, + unsigned spacing, + comp_t comp, + unsigned* buf +) { + + //int num_partitions = (int)div_up(a_count + b_count, spacing) + 1; + + unsigned num_partitions = (a_count + b_count + spacing - 1) / spacing + 1; + + const unsigned nt = 128; + const unsigned vt = 1; + const unsigned nv = nt * vt; + + unsigned B = (num_partitions + nv - 1) / nv; // nt = 128, vt = 1 + + cuda_kernel<<>>([=]__device__(auto tid, auto bid) { + auto range = cuda_get_tile(bid, nt * vt, num_partitions); + cuda_strided_iterate([=](auto, auto j) { + auto index = range.begin + j; + auto diag = min(spacing * index, a_count + b_count); + buf[index] = cuda_merge_path(a, a_count, b, b_count, diag, comp); + }, tid, range.count()); + }); +} + +//template +//auto load_balance_partitions(int64_t dest_count, segments_it segments, +// int num_segments, int spacing, context_t& context) -> +// mem_t::value_type> { +// +// typedef typename std::iterator_traits::value_type int_t; +// return merge_path_partitions(counting_iterator_t(0), +// dest_count, segments, num_segments, spacing, less_t(), context); +//} + +//template +//mem_t binary_search_partitions(keys_it keys, int count, int num_items, +// int spacing, context_t& context) { +// +// int num_partitions = div_up(count, spacing) + 1; +// mem_t mem(num_partitions, context); +// int* p = mem.data(); +// transform([=]MGPU_DEVICE(int index) { +// int key = min(spacing * index, count); +// p[index] = binary_search(keys, num_items, key, less_t()); +// }, num_partitions, context); +// return mem; +//} + +/** @private */ +template< + typename P, + typename a_keys_it, typename a_vals_it, + typename b_keys_it, typename b_vals_it, + typename c_keys_it, typename c_vals_it, + typename comp_t +> +void cuda_merge_loop( + P&& p, + a_keys_it a_keys, a_vals_it a_vals, unsigned a_count, + b_keys_it b_keys, b_vals_it b_vals, unsigned b_count, + c_keys_it c_keys, c_vals_it c_vals, + comp_t comp, + void* ptr +) { + + using E = std::decay_t

; + using T = typename std::iterator_traits::value_type; + using V = typename std::iterator_traits::value_type; + + auto buf = static_cast(ptr); + + auto has_values = !std::is_same::value; + + cuda_merge_path_partitions( + p, a_keys, a_count, b_keys, b_count, E::nv, comp, buf + ); + + unsigned B = p.num_blocks(a_count + b_count); + + // we use small kernel + cuda_kernel<<>>([=] __device__ (auto tid, auto bid) { + + __shared__ union { + T keys[E::nv + 1]; + unsigned indices[E::nv]; + } shared; + + // Load the range for this CTA and merge the values into register. + auto mp0 = buf[bid + 0]; + auto mp1 = buf[bid + 1]; + auto range = cuda_compute_merge_range(a_count, b_count, bid, E::nv, mp0, mp1); + + auto merge = block_merge_from_mem( + a_keys, b_keys, range, tid, comp, shared.keys + ); + + auto dest_offset = E::nv * bid; + cuda_reg_to_mem_thread( + merge.keys, tid, range.total(), c_keys + dest_offset, shared.keys + ); + + if(has_values) { + // Transpose the indices from thread order to strided order. + auto indices = cuda_reg_thread_to_strided( + merge.indices, tid, shared.indices + ); + + // Gather the input values and merge into the output values. + cuda_transfer_two_streams_strided( + a_vals + range.a_begin, range.a_count(), + b_vals + range.b_begin, range.b_count(), indices, tid, + c_vals + dest_offset + ); + } + }); +} + +} // end of namespace tf::detail --------------------------------------------- + +namespace tf { + +// ---------------------------------------------------------------------------- +// standalone merge algorithms +// ---------------------------------------------------------------------------- + +// Function: merge_bufsz +template +unsigned cudaExecutionPolicy::merge_bufsz(unsigned a_count, unsigned b_count) { + return sizeof(unsigned) * (num_blocks(a_count + b_count + nv) + 1); +} + + +// ---------------------------------------------------------------------------- +// key-value merge +// ---------------------------------------------------------------------------- + +/** +@brief performs asynchronous key-value merge over a range of keys and values + +@tparam P execution policy type +@tparam a_keys_it first key iterator type +@tparam a_vals_it first value iterator type +@tparam b_keys_it second key iterator type +@tparam b_vals_it second value iterator type +@tparam c_keys_it output key iterator type +@tparam c_vals_it output value iterator type +@tparam C comparator type + +@param p execution policy +@param a_keys_first iterator to the beginning of the first key range +@param a_keys_last iterator to the end of the first key range +@param a_vals_first iterator to the beginning of the first value range +@param b_keys_first iterator to the beginning of the second key range +@param b_keys_last iterator to the end of the second key range +@param b_vals_first iterator to the beginning of the second value range +@param c_keys_first iterator to the beginning of the output key range +@param c_vals_first iterator to the beginning of the output value range +@param comp comparator +@param buf pointer to the temporary buffer + +Performs a key-value merge that copies elements from +[a_keys_first, a_keys_last) and [b_keys_first, b_keys_last) +into a single range, [c_keys_first, c_keys_last + (a_keys_last - a_keys_first) + (b_keys_last - b_keys_first)) +such that the resulting range is in ascending key order. + +At the same time, the merge copies elements from the two associated ranges +[a_vals_first + (a_keys_last - a_keys_first)) and +[b_vals_first + (b_keys_last - b_keys_first)) into a single range, +[c_vals_first, c_vals_first + (a_keys_last - a_keys_first) + (b_keys_last - b_keys_first)) +such that the resulting range is in ascending order +implied by each input element's associated key. + +For example, assume: + + @c a_keys = {1, 8}; + + @c a_vals = {2, 1}; + + @c b_keys = {3, 7}; + + @c b_vals = {3, 4}; + +After the merge, we have: + + @c c_keys = {1, 3, 7, 8} + + @c c_vals = {2, 3, 4, 1} + +*/ +template< + typename P, + typename a_keys_it, typename a_vals_it, + typename b_keys_it, typename b_vals_it, + typename c_keys_it, typename c_vals_it, + typename C +> +void cuda_merge_by_key( + P&& p, + a_keys_it a_keys_first, a_keys_it a_keys_last, a_vals_it a_vals_first, + b_keys_it b_keys_first, b_keys_it b_keys_last, b_vals_it b_vals_first, + c_keys_it c_keys_first, c_vals_it c_vals_first, + C comp, + void* buf +) { + + unsigned a_count = std::distance(a_keys_first, a_keys_last); + unsigned b_count = std::distance(b_keys_first, b_keys_last); + + if(a_count + b_count == 0) { + return; + } + + detail::cuda_merge_loop(p, + a_keys_first, a_vals_first, a_count, + b_keys_first, b_vals_first, b_count, + c_keys_first, c_vals_first, comp, + buf + ); +} + +// ---------------------------------------------------------------------------- +// key-only merge +// ---------------------------------------------------------------------------- + +/** +@brief performs asynchronous key-only merge over a range of keys + +@tparam P execution policy type +@tparam a_keys_it first key iterator type +@tparam b_keys_it second key iterator type +@tparam c_keys_it output key iterator type +@tparam C comparator type + +@param p execution policy +@param a_keys_first iterator to the beginning of the first key range +@param a_keys_last iterator to the end of the first key range +@param b_keys_first iterator to the beginning of the second key range +@param b_keys_last iterator to the end of the second key range +@param c_keys_first iterator to the beginning of the output key range +@param comp comparator +@param buf pointer to the temporary buffer + +This function is equivalent to tf::cuda_merge_by_key without values. + +*/ +template +void cuda_merge( + P&& p, + a_keys_it a_keys_first, a_keys_it a_keys_last, + b_keys_it b_keys_first, b_keys_it b_keys_last, + c_keys_it c_keys_first, + C comp, + void* buf +) { + cuda_merge_by_key( + p, + a_keys_first, a_keys_last, (const cudaEmpty*)nullptr, + b_keys_first, b_keys_last, (const cudaEmpty*)nullptr, + c_keys_first, (cudaEmpty*)nullptr, comp, + buf + ); +} + + +} // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-3.6.0/include/cuda/algorithm/reduce.hpp b/bundled/taskflow-3.6.0/include/cuda/algorithm/reduce.hpp new file mode 100644 index 0000000000..d6ba33244d --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/algorithm/reduce.hpp @@ -0,0 +1,460 @@ +#pragma once + +#include "../cudaflow.hpp" + +/** +@file taskflow/cuda/algorithm/reduce.hpp +@brief cuda reduce algorithms include file +*/ + +namespace tf::detail { + +// ---------------------------------------------------------------------------- +// reduction helper functions +// ---------------------------------------------------------------------------- + +/** @private */ +template +struct cudaBlockReduce { + + static const unsigned group_size = std::min(nt, CUDA_WARP_SIZE); + static const unsigned num_passes = log2(group_size); + static const unsigned num_items = nt / group_size; + + static_assert( + nt && (0 == nt % CUDA_WARP_SIZE), + "cudaBlockReduce requires num threads to be a multiple of warp_size (32)" + ); + + /** @private */ + struct Storage { + T data[std::max(nt, 2 * group_size)]; + }; + + template + __device__ T operator()(unsigned, T, Storage&, unsigned, op_t, bool = true) const; +}; + +// function: reduce to be called from a block +template +template +__device__ T cudaBlockReduce::operator ()( + unsigned tid, T x, Storage& storage, unsigned count, op_t op, bool ret +) const { + + // Store your data into shared memory. + storage.data[tid] = x; + __syncthreads(); + + if(tid < group_size) { + // Each thread scans within its lane. + cuda_strided_iterate([&](auto i, auto j) { + if(i > 0) { + x = op(x, storage.data[j]); + } + }, tid, count); + storage.data[tid] = x; + } + __syncthreads(); + + auto count2 = count < group_size ? count : group_size; + auto first = (1 & num_passes) ? group_size : 0; + if(tid < group_size) { + storage.data[first + tid] = x; + } + __syncthreads(); + + cuda_iterate([&](auto pass) { + if(tid < group_size) { + if(auto offset = 1 << pass; tid + offset < count2) { + x = op(x, storage.data[first + offset + tid]); + } + first = group_size - first; + storage.data[first + tid] = x; + } + __syncthreads(); + }); + + if(ret) { + x = storage.data[0]; + __syncthreads(); + } + return x; +} + +// ---------------------------------------------------------------------------- +// cuda_reduce +// ---------------------------------------------------------------------------- + +/** +@private +*/ +template +__global__ void cuda_reduce_kernel( + I input, unsigned count, T* res, O op, void* ptr +) { + + using U = typename std::iterator_traits::value_type; + + __shared__ typename cudaBlockReduce::Storage shm; + + auto tid = threadIdx.x; + auto bid = blockIdx.x; + auto tile = cuda_get_tile(bid, nt*vt, count); + auto x = cuda_mem_to_reg_strided( + input + tile.begin, tid, tile.count() + ); + + // reduce multiple values per thread into a scalar. + U s; + cuda_strided_iterate( + [&] (auto i, auto) { s = i ? op(s, x[i]) : x[0]; }, tid, tile.count() + ); + // reduce to a scalar per block. + s = cudaBlockReduce()( + tid, s, shm, (tile.count() < nt ? tile.count() : nt), op, false + ); + + if(!tid) { + auto buf = static_cast(ptr); + (count <= nt*vt) ? *res = op(*res, s) : buf[bid] = s; + } +} + +/** @private */ +template +void cuda_reduce_loop( + P&& p, I input, unsigned count, T* res, O op, void* ptr +) { + + using U = typename std::iterator_traits::value_type; + using E = std::decay_t

; + + auto buf = static_cast(ptr); + auto B = E::num_blocks(count); + + cuda_reduce_kernel<<>>( + input, count, res, op, ptr + ); + + if(B > 1) { + cuda_reduce_loop(p, buf, B, res, op, buf+B); + } +} + +// ---------------------------------------------------------------------------- +// cuda_uninitialized_reduce +// ---------------------------------------------------------------------------- + +/** +@private +*/ +template +__global__ void cuda_uninitialized_reduce_kernel( + I input, unsigned count, T* res, O op, void* ptr +) { + + using U = typename std::iterator_traits::value_type; + + __shared__ typename cudaBlockReduce::Storage shm; + + auto tid = threadIdx.x; + auto bid = blockIdx.x; + auto tile = cuda_get_tile(bid, nt*vt, count); + auto x = cuda_mem_to_reg_strided( + input + tile.begin, tid, tile.count() + ); + + // reduce multiple values per thread into a scalar. + U s; + cuda_strided_iterate( + [&] (auto i, auto) { s = i ? op(s, x[i]) : x[0]; }, tid, tile.count() + ); + + // reduce to a scalar per block. + s = cudaBlockReduce()( + tid, s, shm, (tile.count() < nt ? tile.count() : nt), op, false + ); + + if(!tid) { + auto buf = static_cast(ptr); + (count <= nt*vt) ? *res = s : buf[bid] = s; + } +} + +/** +@private +*/ +template +void cuda_uninitialized_reduce_loop( + P&& p, I input, unsigned count, T* res, O op, void* ptr +) { + + using U = typename std::iterator_traits::value_type; + using E = std::decay_t

; + + auto buf = static_cast(ptr); + auto B = (count + E::nv - 1) / E::nv; + + cuda_uninitialized_reduce_kernel<<>>( + input, count, res, op, buf + ); + + if(B > 1) { + cuda_uninitialized_reduce_loop(p, buf, B, res, op, buf+B); + } +} + +} // namespace tf::detail ---------------------------------------------------- + +namespace tf { + +// Function: reduce_bufsz +template +template +unsigned cudaExecutionPolicy::reduce_bufsz(unsigned count) { + unsigned B = num_blocks(count); + unsigned n = 0; + while(B > 1) { + n += B; + B = num_blocks(B); + } + return n*sizeof(T); +} + +// ---------------------------------------------------------------------------- +// cuda_reduce +// ---------------------------------------------------------------------------- + +/** +@brief performs asynchronous parallel reduction over a range of items + +@tparam P execution policy type +@tparam I input iterator type +@tparam T value type +@tparam O binary operator type + +@param p execution policy +@param first iterator to the beginning of the range +@param last iterator to the end of the range +@param res pointer to the result +@param op binary operator to apply to reduce elements +@param buf pointer to the temporary buffer + +This method is equivalent to the parallel execution of the following loop on a GPU: + +@code{.cpp} +while (first != last) { + *result = op(*result, *first++); +} +@endcode + */ +template +void cuda_reduce( + P&& p, I first, I last, T* res, O op, void* buf +) { + unsigned count = std::distance(first, last); + if(count == 0) { + return; + } + detail::cuda_reduce_loop(p, first, count, res, op, buf); +} + +// ---------------------------------------------------------------------------- +// cuda_uninitialized_reduce +// ---------------------------------------------------------------------------- + +/** +@brief performs asynchronous parallel reduction over a range of items without + an initial value + +@tparam P execution policy type +@tparam I input iterator type +@tparam T value type +@tparam O binary operator type + +@param p execution policy +@param first iterator to the beginning of the range +@param last iterator to the end of the range +@param res pointer to the result +@param op binary operator to apply to reduce elements +@param buf pointer to the temporary buffer + +This method is equivalent to the parallel execution of the following loop +on a GPU: + +@code{.cpp} +*result = *first++; // no initial values partitipcate in the loop +while (first != last) { + *result = op(*result, *first++); +} +@endcode +*/ +template +void cuda_uninitialized_reduce( + P&& p, I first, I last, T* res, O op, void* buf +) { + unsigned count = std::distance(first, last); + if(count == 0) { + return; + } + detail::cuda_uninitialized_reduce_loop(p, first, count, res, op, buf); +} + +// ---------------------------------------------------------------------------- +// transform_reduce +// ---------------------------------------------------------------------------- + +/** +@brief performs asynchronous parallel reduction over a range of transformed items + without an initial value + +@tparam P execution policy type +@tparam I input iterator type +@tparam T value type +@tparam O binary operator type +@tparam U unary operator type + +@param p execution policy +@param first iterator to the beginning of the range +@param last iterator to the end of the range +@param res pointer to the result +@param bop binary operator to apply to reduce elements +@param uop unary operator to apply to transform elements +@param buf pointer to the temporary buffer + +This method is equivalent to the parallel execution of the following loop on a GPU: + +@code{.cpp} +while (first != last) { + *result = bop(*result, uop(*first++)); +} +@endcode +*/ +template +void cuda_transform_reduce( + P&& p, I first, I last, T* res, O bop, U uop, void* buf +) { + + unsigned count = std::distance(first, last); + + if(count == 0) { + return; + } + + // reduction loop + detail::cuda_reduce_loop(p, + cuda_make_load_iterator([=]__device__(auto i){ + return uop(*(first+i)); + }), + count, res, bop, buf + ); +} + +// ---------------------------------------------------------------------------- +// transform_uninitialized_reduce +// ---------------------------------------------------------------------------- + +/** +@brief performs asynchronous parallel reduction over a range of transformed items + with an initial value + +@tparam P execution policy type +@tparam I input iterator type +@tparam T value type +@tparam O binary operator type +@tparam U unary operator type + +@param p execution policy +@param first iterator to the beginning of the range +@param last iterator to the end of the range +@param res pointer to the result +@param bop binary operator to apply to reduce elements +@param uop unary operator to apply to transform elements +@param buf pointer to the temporary buffer + +This method is equivalent to the parallel execution of the following loop +on a GPU: + +@code{.cpp} +*result = uop(*first++); // no initial values partitipcate in the loop +while (first != last) { + *result = bop(*result, uop(*first++)); +} +@endcode +*/ +template +void cuda_uninitialized_transform_reduce( + P&& p, I first, I last, T* res, O bop, U uop, void* buf +) { + + unsigned count = std::distance(first, last); + + if(count == 0) { + return; + } + + detail::cuda_uninitialized_reduce_loop(p, + cuda_make_load_iterator([=]__device__(auto i){ return uop(*(first+i)); }), + count, res, bop, buf + ); +} + +// ---------------------------------------------------------------------------- + +//template +//__device__ void cuda_warp_reduce( +// volatile T* shm, size_t N, size_t tid, C op +//) { +// if(tid + 32 < N) shm[tid] = op(shm[tid], shm[tid+32]); +// if(tid + 16 < N) shm[tid] = op(shm[tid], shm[tid+16]); +// if(tid + 8 < N) shm[tid] = op(shm[tid], shm[tid+8]); +// if(tid + 4 < N) shm[tid] = op(shm[tid], shm[tid+4]); +// if(tid + 2 < N) shm[tid] = op(shm[tid], shm[tid+2]); +// if(tid + 1 < N) shm[tid] = op(shm[tid], shm[tid+1]); +//} +// +//template +//__global__ void cuda_reduce(I first, size_t N, T* res, C op) { +// +// size_t tid = threadIdx.x; +// +// if(tid >= N) { +// return; +// } +// +// cudaSharedMemory shared_memory; +// T* shm = shared_memory.get(); +// +// shm[tid] = *(first+tid); +// +// for(size_t i=tid+blockDim.x; i 32; s >>= 1) { +// if(tid < s && tid + s < N) { +// shm[tid] = op(shm[tid], shm[tid+s]); +// } +// __syncthreads(); +// } +// +// if(tid < 32) { +// cuda_warp_reduce(shm, N, tid, op); +// } +// +// if(tid == 0) { +// if constexpr (uninitialized) { +// *res = shm[0]; +// } +// else { +// *res = op(*res, shm[0]); +// } +// } +//} + + +} // end of namespace tf ----------------------------------------------------- + diff --git a/bundled/taskflow-3.6.0/include/cuda/algorithm/scan.hpp b/bundled/taskflow-3.6.0/include/cuda/algorithm/scan.hpp new file mode 100644 index 0000000000..bce0d63417 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/algorithm/scan.hpp @@ -0,0 +1,488 @@ +#pragma once + +#include "reduce.hpp" + +/** +@file taskflow/cuda/algorithm/scan.hpp +@brief CUDA scan algorithm include file +*/ + +namespace tf::detail { + +// ---------------------------------------------------------------------------- +// scan +// ---------------------------------------------------------------------------- + +/** @private */ +inline constexpr unsigned cudaScanRecursionThreshold = 8; + +/** @private */ +enum class cudaScanType : int { + EXCLUSIVE = 1, + INCLUSIVE +}; + +/** @private */ +template 0)> +struct cudaScanResult { + T scan; + T reduction; +}; + +/** @private */ +template +struct cudaScanResult { + cudaArray scan; + T reduction; +}; + +//----------------------------------------------------------------------------- + +/** @private */ +template +struct cudaBlockScan { + + const static unsigned num_warps = nt / CUDA_WARP_SIZE; + const static unsigned num_passes = log2(nt); + const static unsigned capacity = nt + num_warps; + + /** @private */ + union storage_t { + T data[2 * nt]; + struct { T threads[nt], warps[num_warps]; }; + }; + + // standard scan + template + __device__ cudaScanResult operator ()( + unsigned tid, + T x, + storage_t& storage, + unsigned count = nt, + op_t op = op_t(), + T init = T(), + cudaScanType type = cudaScanType::EXCLUSIVE + ) const; + + // vectorized scan. accepts multiple values per thread and adds in + // optional global carry-in + template + __device__ cudaScanResult operator()( + unsigned tid, + cudaArray x, + storage_t& storage, + T carry_in = T(), + bool use_carry_in = false, + unsigned count = nt, + op_t op = op_t(), + T init = T(), + cudaScanType type = cudaScanType::EXCLUSIVE + ) const; +}; + +// standard scan +template +template +__device__ cudaScanResult cudaBlockScan::operator () ( + unsigned tid, T x, storage_t& storage, unsigned count, op_t op, + T init, cudaScanType type +) const { + + unsigned first = 0; + storage.data[first + tid] = x; + __syncthreads(); + + cuda_iterate([&](auto pass) { + if(auto offset = 1<= offset) { + x = op(storage.data[first + tid - offset], x); + } + first = nt - first; + storage.data[first + tid] = x; + __syncthreads(); + }); + + cudaScanResult result; + result.reduction = storage.data[first + count - 1]; + result.scan = (tid < count) ? + (cudaScanType::INCLUSIVE == type ? x : + (tid ? storage.data[first + tid - 1] : init)) : + result.reduction; + __syncthreads(); + + return result; +} + +// vectorized scan block +template +template +__device__ cudaScanResult cudaBlockScan::operator()( + unsigned tid, + cudaArray x, + storage_t& storage, + T carry_in, + bool use_carry_in, + unsigned count, op_t op, + T init, + cudaScanType type +) const { + + // Start with an inclusive scan of the in-range elements. + if(count >= nt * vt) { + cuda_iterate([&](auto i) { + x[i] = i ? op(x[i], x[i - 1]) : x[i]; + }); + } else { + cuda_iterate([&](auto i) { + auto index = vt * tid + i; + x[i] = i ? + ((index < count) ? op(x[i], x[i - 1]) : x[i - 1]) : + (x[i] = (index < count) ? x[i] : init); + }); + } + + // Scan the thread-local reductions for a carry-in for each thread. + auto result = operator()( + tid, x[vt - 1], storage, + (count + vt - 1) / vt, op, init, cudaScanType::EXCLUSIVE + ); + + // Perform the scan downsweep and add both the global carry-in and the + // thread carry-in to the values. + if(use_carry_in) { + result.reduction = op(carry_in, result.reduction); + result.scan = tid ? op(carry_in, result.scan) : carry_in; + } else { + use_carry_in = tid > 0; + } + + cudaArray y; + cuda_iterate([&](auto i) { + if(cudaScanType::EXCLUSIVE == type) { + y[i] = i ? x[i - 1] : result.scan; + if(use_carry_in && i > 0) y[i] = op(result.scan, y[i]); + } else { + y[i] = use_carry_in ? op(x[i], result.scan) : x[i]; + } + }); + + return cudaScanResult { y, result.reduction }; +} + +/** +@private +@brief single-pass scan for small input + */ +template +void cuda_single_pass_scan( + P&& p, + cudaScanType scan_type, + I input, + unsigned count, + O output, + C op + //reduction_it reduction, +) { + + using T = typename std::iterator_traits::value_type; + using E = std::decay_t

; + + // Small input specialization. This is the non-recursive branch. + cuda_kernel<<<1, E::nt, 0, p.stream()>>>([=] __device__ (auto tid, auto bid) { + + using scan_t = cudaBlockScan; + + __shared__ union { + typename scan_t::storage_t scan; + T values[E::nv]; + } shared; + + auto carry_in = T(); + for(unsigned cur = 0; cur < count; cur += E::nv) { + // Cooperatively load values into register. + auto count2 = min(count - cur, E::nv); + + auto x = cuda_mem_to_reg_thread(input + cur, + tid, count2, shared.values); + + auto result = scan_t()(tid, x, shared.scan, + carry_in, cur > 0, count2, op, T(), scan_type); + + // Store the scanned values back to global memory. + cuda_reg_to_mem_thread(result.scan, tid, count2, + output + cur, shared.values); + + // Roll the reduction into carry_in. + carry_in = result.reduction; + } + + // Store the carry-out to the reduction pointer. This may be a + // discard_iterator_t if no reduction is wanted. + //if(!tid) *reduction = carry_in; + }); +} + +/** +@private + +@brief main scan loop +*/ +template +void cuda_scan_loop( + P&& p, + cudaScanType scan_type, + I input, + unsigned count, + O output, + C op, + //reduction_it reduction, + void* ptr +) { + + using E = std::decay_t

; + using T = typename std::iterator_traits::value_type; + + T* buffer = static_cast(ptr); + + //launch_t::cta_dim(context).B(count); + unsigned B = (count + E::nv - 1) / E::nv; + + if(B > cudaScanRecursionThreshold) { + + //cudaDeviceVector partials(B); + //auto buffer = partials.data(); + + // upsweep phase + cuda_kernel<<>>([=] __device__ (auto tid, auto bid) { + + __shared__ typename cudaBlockReduce::Storage shm; + + // Load the tile's data into register. + auto tile = cuda_get_tile(bid, E::nv, count); + auto x = cuda_mem_to_reg_strided( + input + tile.begin, tid, tile.count() + ); + + // Reduce the thread's values into a scalar. + T scalar; + cuda_strided_iterate( + [&] (auto i, auto j) { scalar = i ? op(scalar, x[i]) : x[0]; }, + tid, tile.count() + ); + + // Reduce across all threads. + auto all_reduce = cudaBlockReduce()( + tid, scalar, shm, tile.count(), op + ); + + // Store the final reduction to the partials. + if(!tid) { + buffer[bid] = all_reduce; + } + }); + + // recursively call scan + //cuda_scan_loop(p, cudaScanType::EXCLUSIVE, buffer, B, buffer, op, S); + cuda_scan_loop( + p, cudaScanType::EXCLUSIVE, buffer, B, buffer, op, buffer+B + ); + + // downsweep: perform an intra-tile scan and add the scan of the partials + // as carry-in + cuda_kernel<<>>([=] __device__ (auto tid, auto bid) { + + using scan_t = cudaBlockScan; + + __shared__ union { + typename scan_t::storage_t scan; + T values[E::nv]; + } shared; + + // Load a tile to register in thread order. + auto tile = cuda_get_tile(bid, E::nv, count); + auto x = cuda_mem_to_reg_thread( + input + tile.begin, tid, tile.count(), shared.values + ); + + // Scan the array with carry-in from the partials. + auto y = scan_t()(tid, x, shared.scan, + buffer[bid], bid > 0, tile.count(), op, T(), + scan_type).scan; + + // Store the scanned values to the output. + cuda_reg_to_mem_thread( + y, tid, tile.count(), output + tile.begin, shared.values + ); + }); + } + // Small input specialization. This is the non-recursive branch. + else { + cuda_single_pass_scan(p, scan_type, input, count, output, op); + } +} + +} // namespace tf::detail ---------------------------------------------------- + +namespace tf { + +// Function: scan_bufsz +template +template +unsigned cudaExecutionPolicy::scan_bufsz(unsigned count) { + unsigned B = num_blocks(count); + unsigned n = 0; + for(auto b=B; b>detail::cudaScanRecursionThreshold; b=num_blocks(b)) { + n += b; + } + return n*sizeof(T); +} + + +/** +@brief performs asynchronous inclusive scan over a range of items + +@tparam P execution policy type +@tparam I input iterator +@tparam O output iterator +@tparam C binary operator type + +@param p execution policy +@param first iterator to the beginning of the input range +@param last iterator to the end of the input range +@param output iterator to the beginning of the output range +@param op binary operator to apply to scan +@param buf pointer to the temporary buffer + +*/ +template +void cuda_inclusive_scan( + P&& p, I first, I last, O output, C op, void* buf +) { + + unsigned count = std::distance(first, last); + + if(count == 0) { + return; + } + + // launch the scan loop + detail::cuda_scan_loop( + p, detail::cudaScanType::INCLUSIVE, first, count, output, op, buf + ); +} + +/** +@brief performs asynchronous inclusive scan over a range of transformed items + +@tparam P execution policy type +@tparam I input iterator +@tparam O output iterator +@tparam C binary operator type +@tparam U unary operator type + +@param p execution policy +@param first iterator to the beginning of the input range +@param last iterator to the end of the input range +@param output iterator to the beginning of the output range +@param bop binary operator to apply to scan +@param uop unary operator to apply to transform each item before scan +@param buf pointer to the temporary buffer + +*/ +template +void cuda_transform_inclusive_scan( + P&& p, I first, I last, O output, C bop, U uop, void* buf +) { + + using T = typename std::iterator_traits::value_type; + + unsigned count = std::distance(first, last); + + if(count == 0) { + return; + } + + // launch the scan loop + detail::cuda_scan_loop( + p, detail::cudaScanType::INCLUSIVE, + cuda_make_load_iterator([=]__device__(auto i){ return uop(*(first+i)); }), + count, output, bop, buf + ); +} + +/** +@brief performs asynchronous exclusive scan over a range of items + +@tparam P execution policy type +@tparam I input iterator +@tparam O output iterator +@tparam C binary operator type + +@param p execution policy +@param first iterator to the beginning of the input range +@param last iterator to the end of the input range +@param output iterator to the beginning of the output range +@param op binary operator to apply to scan +@param buf pointer to the temporary buffer + +*/ +template +void cuda_exclusive_scan( + P&& p, I first, I last, O output, C op, void* buf +) { + + unsigned count = std::distance(first, last); + + if(count == 0) { + return; + } + + // launch the scan loop + detail::cuda_scan_loop( + p, detail::cudaScanType::EXCLUSIVE, first, count, output, op, buf + ); +} + +/** +@brief performs asynchronous exclusive scan over a range of items + +@tparam P execution policy type +@tparam I input iterator +@tparam O output iterator +@tparam C binary operator type +@tparam U unary operator type + +@param p execution policy +@param first iterator to the beginning of the input range +@param last iterator to the end of the input range +@param output iterator to the beginning of the output range +@param bop binary operator to apply to scan +@param uop unary operator to apply to transform each item before scan +@param buf pointer to the temporary buffer + +*/ +template +void cuda_transform_exclusive_scan( + P&& p, I first, I last, O output, C bop, U uop, void* buf +) { + + using T = typename std::iterator_traits::value_type; + + unsigned count = std::distance(first, last); + + if(count == 0) { + return; + } + + // launch the scan loop + detail::cuda_scan_loop( + p, detail::cudaScanType::EXCLUSIVE, + cuda_make_load_iterator([=]__device__(auto i){ return uop(*(first+i)); }), + count, output, bop, buf + ); +} + + +} // end of namespace tf ----------------------------------------------------- + + + diff --git a/bundled/taskflow-3.6.0/include/cuda/algorithm/sort.hpp b/bundled/taskflow-3.6.0/include/cuda/algorithm/sort.hpp new file mode 100644 index 0000000000..3cc01d5ae3 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/algorithm/sort.hpp @@ -0,0 +1,506 @@ +#pragma once + +#include "merge.hpp" + +/** +@file taskflow/cuda/algorithm/sort.hpp +@brief CUDA sort algorithm include file +*/ + +namespace tf::detail { + +// ---------------------------------------------------------------------------- +// odd-even sort in register +// ---------------------------------------------------------------------------- + +/** +@private +@brief counts the number of leading zeros starting from the most significant bit +*/ +constexpr int cuda_clz(int x) { + for(int i = 31; i >= 0; --i) { + if((1<< i) & x) { + return 31 - i; + } + } + return 32; +} + +/** +@private +@brief finds log2(x) and optionally round up to the next integer logarithm. +*/ +constexpr int cuda_find_log2(int x, bool round_up = false) { + int a = 31 - cuda_clz(x); + if(round_up) { + a += !is_pow2(x); + } + return a; +} + +/** @private */ +template +__device__ auto cuda_odd_even_sort( + cudaArray x, C comp, int flags = 0 +) { + cuda_iterate([&](auto I) { + #pragma unroll + for(auto i = 1 & I; i < vt - 1; i += 2) { + if((0 == ((2<< i) & flags)) && comp(x[i + 1], x[i])) + cuda_swap(x[i], x[i + 1]); + } + }); + return x; +} + +/** @private */ +template +__device__ auto cuda_odd_even_sort( + cudaKVArray x, C comp, int flags = 0 +) { + cuda_iterate([&](auto I) { + #pragma unroll + for(auto i = 1 & I; i < vt - 1; i += 2) { + if((0 == ((2<< i) & flags)) && comp(x.keys[i + 1], x.keys[i])) { + cuda_swap(x.keys[i], x.keys[i + 1]); + cuda_swap(x.vals[i], x.vals[i + 1]); + } + } + }); + return x; +} + +// ---------------------------------------------------------------------------- +// range check +// ---------------------------------------------------------------------------- + +/** @private */ +__device__ inline int cuda_out_of_range_flags(int first, int vt, int count) { + int out_of_range = min(vt, first + vt - count); + int head_flags = 0; + if(out_of_range > 0) { + const int mask = (1<< vt) - 1; + head_flags = mask & (~mask>> out_of_range); + } + return head_flags; +} + +/** @private */ +__device__ inline auto cuda_compute_merge_sort_frame( + unsigned partition, unsigned coop, unsigned spacing +) { + + unsigned size = spacing * (coop / 2); + unsigned start = ~(coop - 1) & partition; + unsigned a_begin = spacing * start; + unsigned b_begin = spacing * start + size; + + return cudaMergeRange { + a_begin, + a_begin + size, + b_begin, + b_begin + size + }; +} + +/** @private */ +__device__ inline auto cuda_compute_merge_sort_range( + unsigned count, unsigned partition, unsigned coop, unsigned spacing +) { + + auto frame = cuda_compute_merge_sort_frame(partition, coop, spacing); + + return cudaMergeRange { + frame.a_begin, + min(count, frame.a_end), + min(count, frame.b_begin), + min(count, frame.b_end) + }; +} + +/** @private */ +__device__ inline auto cuda_compute_merge_sort_range( + unsigned count, unsigned partition, unsigned coop, unsigned spacing, + unsigned mp0, unsigned mp1 +) { + + auto range = cuda_compute_merge_sort_range(count, partition, coop, spacing); + + // Locate the diagonal from the start of the A sublist. + unsigned diag = spacing * partition - range.a_begin; + + // The end partition of the last cta for each merge operation is computed + // and stored as the begin partition for the subsequent merge. i.e. it is + // the same partition but in the wrong coordinate system, so its 0 when it + // should be listSize. Correct that by checking if this is the last cta + // in this merge operation. + if(coop - 1 != ((coop - 1) & partition)) { + range.a_end = range.a_begin + mp1; + range.b_end = min(count, range.b_begin + diag + spacing - mp1); + } + + range.a_begin = range.a_begin + mp0; + range.b_begin = min(count, range.b_begin + diag - mp0); + + return range; +} + +/** @private */ +template +struct cudaBlockSort { + + static constexpr bool has_values = !std::is_same::value; + static constexpr unsigned num_passes = log2(nt); + + /** @private */ + union Storage { + K keys[nt * vt + 1]; + V vals[nt * vt]; + }; + + static_assert(is_pow2(nt), "cudaBlockSort requires pow2 number of threads"); + + template + __device__ auto merge_pass( + cudaKVArray x, + unsigned tid, unsigned count, unsigned pass, + C comp, Storage& storage + ) const { + + // Divide the CTA's keys into lists. + unsigned coop = 2 << pass; + auto range = cuda_compute_merge_sort_range(count, tid, coop, vt); + unsigned diag = vt * tid - range.a_begin; + + // Store the keys into shared memory for searching. + cuda_reg_to_shared_thread(x.keys, tid, storage.keys); + + // Search for the merge path for this thread within its list. + auto mp = cuda_merge_path( + storage.keys, range, diag, comp + ); + + // Run a serial merge and return. + auto merge = cuda_serial_merge( + storage.keys, range.partition(mp, diag), comp + ); + x.keys = merge.keys; + + if(has_values) { + // Reorder values through shared memory. + cuda_reg_to_shared_thread(x.vals, tid, storage.vals); + x.vals = cuda_shared_gather(storage.vals, merge.indices); + } + + return x; + } + + template + __device__ auto block_sort(cudaKVArray x, + unsigned tid, unsigned count, C comp, Storage& storage + ) const { + + // Sort the inputs within each thread. If any threads have fewer than + // vt items, use the segmented sort network to prevent out-of-range + // elements from contaminating the sort. + if(count < nt * vt) { + auto head_flags = cuda_out_of_range_flags(vt * tid, vt, count); + x = cuda_odd_even_sort(x, comp, head_flags); + } else { + x = cuda_odd_even_sort(x, comp); + } + + // Merge threads starting with a pair until all values are merged. + for(unsigned pass = 0; pass < num_passes; ++pass) { + x = merge_pass(x, tid, count, pass, comp, storage); + } + + return x; + } +}; + +/** @private */ +template +void cuda_merge_sort_partitions( + P&& p, K keys, unsigned count, + unsigned coop, unsigned spacing, C comp, unsigned* buf +) { + + // bufer size is num_partitions + 1 + unsigned num_partitions = (count + spacing - 1) / spacing + 1; + + const unsigned nt = 128; + const unsigned vt = 1; + const unsigned nv = nt * vt; + + unsigned B = (num_partitions + nv - 1) / nv; // nt = 128, vt = 1 + + cuda_kernel<<>>([=] __device__ (auto tid, auto bid) { + auto range = cuda_get_tile(bid, nt * vt, num_partitions); + cuda_strided_iterate([=](auto, auto j) { + auto index = j + range.begin; + auto range = cuda_compute_merge_sort_range(count, index, coop, spacing); + auto diag = min(spacing * index, count) - range.a_begin; + buf[index] = cuda_merge_path( + keys + range.a_begin, range.a_count(), + keys + range.b_begin, range.b_count(), + diag, comp + ); + }, tid, range.count()); + }); +} + +/** @private */ +template +void merge_sort_loop( + P&& p, K_it keys_input, V_it vals_input, unsigned count, C comp, void* buf +) { + + using K = typename std::iterator_traits::value_type; + using V = typename std::iterator_traits::value_type; + using E = std::decay_t

; + + const bool has_values = !std::is_same::value; + + unsigned B = (count + E::nv - 1) / E::nv; + unsigned R = cuda_find_log2(B, true); + + K* keys_output {nullptr}; + V* vals_output {nullptr}; + unsigned *mp_data {nullptr}; + + if(R) { + keys_output = (K*)(buf); + if(has_values) { + vals_output = (V*)(keys_output + count); + mp_data = (unsigned*)(vals_output + count); + } + else { + mp_data = (unsigned*)(keys_output + count); + } + } + + //cudaDeviceVector keys_temp(R ? count : 0); + //auto keys_output = keys_temp.data(); + ////std::cout << "keys_output = " << keys_temp.size()*sizeof(K) << std::endl; + + //cudaDeviceVector vals_temp((has_values && R) ? count : 0); + //auto vals_output = vals_temp.data(); + //std::cout << "vals_output = " << vals_temp.size()*sizeof(V) << std::endl; + + auto keys_blocksort = (1 & R) ? keys_output : keys_input; + auto vals_blocksort = (1 & R) ? vals_output : vals_input; + + //printf("B=%u, R=%u\n", B, R); + + cuda_kernel<<>>([=] __device__ (auto tid, auto bid) { + + using sort_t = cudaBlockSort; + + __shared__ union { + typename sort_t::Storage sort; + K keys[E::nv]; + V vals[E::nv]; + } shared; + + auto tile = cuda_get_tile(bid, E::nv, count); + + // Load the keys and values. + cudaKVArray unsorted; + unsorted.keys = cuda_mem_to_reg_thread( + keys_input + tile.begin, tid, tile.count(), shared.keys + ); + + if(has_values) { + unsorted.vals = cuda_mem_to_reg_thread( + vals_input + tile.begin, tid, tile.count(), shared.vals + ); + } + + // Blocksort. + auto sorted = sort_t().block_sort(unsorted, tid, tile.count(), comp, shared.sort); + + // Store the keys and values. + cuda_reg_to_mem_thread( + sorted.keys, tid, tile.count(), keys_blocksort + tile.begin, shared.keys + ); + + if(has_values) { + cuda_reg_to_mem_thread( + sorted.vals, tid, tile.count(), vals_blocksort + tile.begin, shared.vals + ); + } + }); + + // merge passes + + if(1 & R) { + std::swap(keys_input, keys_output); + std::swap(vals_input, vals_output); + } + + // number of partitions + //unsigned num_partitions = B + 1; + //cudaDeviceVector mem(num_partitions); + //auto mp_data = mem.data(); + //std::cout << "num_partitions = " << (B+1)*sizeof(unsigned) << std::endl; + + for(unsigned pass = 0; pass < R; ++pass) { + + unsigned coop = 2 << pass; + + cuda_merge_sort_partitions( + p, keys_input, count, coop, E::nv, comp, mp_data + ); + + cuda_kernel<<>>([=]__device__(auto tid, auto bid) { + + __shared__ union { + K keys[E::nv + 1]; + unsigned indices[E::nv]; + } shared; + + auto tile = cuda_get_tile(bid, E::nv, count); + + // Load the range for this CTA and merge the values into register. + auto range = cuda_compute_merge_sort_range( + count, bid, coop, E::nv, mp_data[bid + 0], mp_data[bid + 1] + ); + + auto merge = block_merge_from_mem( + keys_input, keys_input, range, tid, comp, shared.keys + ); + + // Store merged values back out. + cuda_reg_to_mem_thread( + merge.keys, tid, tile.count(), keys_output + tile.begin, shared.keys + ); + + if(has_values) { + // Transpose the indices from thread order to strided order. + auto indices = cuda_reg_thread_to_strided( + merge.indices, tid, shared.indices + ); + + // Gather the input values and merge into the output values. + cuda_transfer_two_streams_strided( + vals_input + range.a_begin, range.a_count(), + vals_input + range.b_begin, range.b_count(), + indices, tid, vals_output + tile.begin + ); + } + }); + + std::swap(keys_input, keys_output); + std::swap(vals_input, vals_output); + } +} + +} // end of namespace tf::detail --------------------------------------------- + +namespace tf { + +/** +@brief queries the buffer size in bytes needed to call sort kernels + for the given number of elements + +@tparam P execution policy type +@tparam K key type +@tparam V value type (default tf::cudaEmpty) + +@param count number of keys/values to sort + +The function is used to allocate a buffer for calling tf::cuda_sort. + +*/ +template +unsigned cuda_sort_buffer_size(unsigned count) { + + using E = std::decay_t

; + + const bool has_values = !std::is_same::value; + + unsigned B = (count + E::nv - 1) / E::nv; + unsigned R = detail::cuda_find_log2(B, true); + + return R ? (count * sizeof(K) + (has_values ? count*sizeof(V) : 0) + + (B+1)*sizeof(unsigned)) : 0; +} + +// ---------------------------------------------------------------------------- +// key-value sort +// ---------------------------------------------------------------------------- + +/** +@brief performs asynchronous key-value sort on a range of items + +@tparam P execution policy type +@tparam K_it key iterator type +@tparam V_it value iterator type +@tparam C comparator type + +@param p execution policy +@param k_first iterator to the beginning of the key range +@param k_last iterator to the end of the key range +@param v_first iterator to the beginning of the value range +@param comp binary comparator +@param buf pointer to the temporary buffer + +Sorts key-value elements in [k_first, k_last) and +[v_first, v_first + (k_last - k_first)) into ascending key order +using the given comparator @c comp. +If @c i and @c j are any two valid iterators in [k_first, k_last) +such that @c i precedes @c j, and @c p and @c q are iterators in +[v_first, v_first + (k_last - k_first)) corresponding to +@c i and @c j respectively, then comp(*j, *i) evaluates to @c false. + +For example, assume: + + @c keys are {1, 4, 2, 8, 5, 7} + + @c values are {'a', 'b', 'c', 'd', 'e', 'f'} + +After sort: + + @c keys are {1, 2, 4, 5, 7, 8} + + @c values are {'a', 'c', 'b', 'e', 'f', 'd'} + +*/ +template +void cuda_sort_by_key( + P&& p, K_it k_first, K_it k_last, V_it v_first, C comp, void* buf +) { + + unsigned N = std::distance(k_first, k_last); + + if(N <= 1) { + return; + } + + detail::merge_sort_loop(p, k_first, v_first, N, comp, buf); +} + +// ---------------------------------------------------------------------------- +// key sort +// ---------------------------------------------------------------------------- + +/** +@brief performs asynchronous key-only sort on a range of items + +@tparam P execution policy type +@tparam K_it key iterator type +@tparam C comparator type + +@param p execution policy +@param k_first iterator to the beginning of the key range +@param k_last iterator to the end of the key range +@param comp binary comparator +@param buf pointer to the temporary buffer + +This method is equivalent to tf::cuda_sort_by_key without values. + +*/ +template +void cuda_sort(P&& p, K_it k_first, K_it k_last, C comp, void* buf) { + cuda_sort_by_key(p, k_first, k_last, (cudaEmpty*)nullptr, comp, buf); +} + +} // end of namespace tf ----------------------------------------------------- + diff --git a/bundled/taskflow-3.6.0/include/cuda/algorithm/transform.hpp b/bundled/taskflow-3.6.0/include/cuda/algorithm/transform.hpp new file mode 100644 index 0000000000..b1146bdd7b --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/algorithm/transform.hpp @@ -0,0 +1,282 @@ +#pragma once + +#include "../cudaflow.hpp" + +/** +@file taskflow/cuda/algorithm/transform.hpp +@brief cuda parallel-transform algorithms include file +*/ + +namespace tf { + +// ---------------------------------------------------------------------------- +// transform +// ---------------------------------------------------------------------------- + +namespace detail { + +/** +@private +*/ +template +__global__ void cuda_transform_kernel(I first, unsigned count, O output, C op) { + auto tid = threadIdx.x; + auto bid = blockIdx.x; + auto tile = cuda_get_tile(bid, nt*vt, count); + cuda_strided_iterate( + [=]__device__(auto, auto j) { + auto offset = j + tile.begin; + *(output + offset) = op(*(first+offset)); + }, + tid, + tile.count() + ); +} + +/** +@private +*/ +template +__global__ void cuda_transform_kernel( + I1 first1, I2 first2, unsigned count, O output, C op +) { + auto tid = threadIdx.x; + auto bid = blockIdx.x; + auto tile = cuda_get_tile(bid, nt*vt, count); + cuda_strided_iterate( + [=]__device__(auto, auto j) { + auto offset = j + tile.begin; + *(output + offset) = op(*(first1+offset), *(first2+offset)); + }, + tid, + tile.count() + ); +} + +} // end of namespace detail ------------------------------------------------- + +// ---------------------------------------------------------------------------- +// CUDA standard algorithms: transform +// ---------------------------------------------------------------------------- + +/** +@brief performs asynchronous parallel transforms over a range of items + +@tparam P execution policy type +@tparam I input iterator type +@tparam O output iterator type +@tparam C unary operator type + +@param p execution policy +@param first iterator to the beginning of the range +@param last iterator to the end of the range +@param output iterator to the beginning of the output range +@param op unary operator to apply to transform each item + +This method is equivalent to the parallel execution of the following loop on a GPU: + +@code{.cpp} +while (first != last) { + *output++ = op(*first++); +} +@endcode + +*/ +template +void cuda_transform(P&& p, I first, I last, O output, C op) { + + using E = std::decay_t

; + + unsigned count = std::distance(first, last); + + if(count == 0) { + return; + } + + detail::cuda_transform_kernel + <<>> ( + first, count, output, op + ); +} + +/** +@brief performs asynchronous parallel transforms over two ranges of items + +@tparam P execution policy type +@tparam I1 first input iterator type +@tparam I2 second input iterator type +@tparam O output iterator type +@tparam C binary operator type + +@param p execution policy +@param first1 iterator to the beginning of the first range +@param last1 iterator to the end of the first range +@param first2 iterator to the beginning of the second range +@param output iterator to the beginning of the output range +@param op binary operator to apply to transform each pair of items + +This method is equivalent to the parallel execution of the following loop on a GPU: + +@code{.cpp} +while (first1 != last1) { + *output++ = op(*first1++, *first2++); +} +@endcode +*/ +template +void cuda_transform( + P&& p, I1 first1, I1 last1, I2 first2, O output, C op +) { + + using E = std::decay_t

; + + unsigned count = std::distance(first1, last1); + + if(count == 0) { + return; + } + + detail::cuda_transform_kernel + <<>> ( + first1, first2, count, output, op + ); +} + +// ---------------------------------------------------------------------------- +// cudaFlow +// ---------------------------------------------------------------------------- + +// Function: transform +template +cudaTask cudaFlow::transform(I first, I last, O output, C c) { + + using E = cudaDefaultExecutionPolicy; + + unsigned count = std::distance(first, last); + + // TODO: + //if(count == 0) { + // return; + //} + + return kernel( + E::num_blocks(count), E::nt, 0, + detail::cuda_transform_kernel, + first, count, output, c + ); +} + +// Function: transform +template +cudaTask cudaFlow::transform(I1 first1, I1 last1, I2 first2, O output, C c) { + + using E = cudaDefaultExecutionPolicy; + + unsigned count = std::distance(first1, last1); + + // TODO: + //if(count == 0) { + // return; + //} + + return kernel( + E::num_blocks(count), E::nt, 0, + detail::cuda_transform_kernel, + first1, first2, count, output, c + ); +} + +// Function: update transform +template +void cudaFlow::transform(cudaTask task, I first, I last, O output, C c) { + + using E = cudaDefaultExecutionPolicy; + + unsigned count = std::distance(first, last); + + // TODO: + //if(count == 0) { + // return; + //} + + kernel(task, + E::num_blocks(count), E::nt, 0, + detail::cuda_transform_kernel, + first, count, output, c + ); +} + +// Function: update transform +template +void cudaFlow::transform( + cudaTask task, I1 first1, I1 last1, I2 first2, O output, C c +) { + using E = cudaDefaultExecutionPolicy; + + unsigned count = std::distance(first1, last1); + + // TODO: + //if(count == 0) { + // return; + //} + + kernel(task, + E::num_blocks(count), E::nt, 0, + detail::cuda_transform_kernel, + first1, first2, count, output, c + ); +} + +// ---------------------------------------------------------------------------- +// cudaFlowCapturer +// ---------------------------------------------------------------------------- + +// Function: transform +template +cudaTask cudaFlowCapturer::transform(I first, I last, O output, C op) { + return on([=](cudaStream_t stream) mutable { + cudaDefaultExecutionPolicy p(stream); + cuda_transform(p, first, last, output, op); + }); +} + +// Function: transform +template +cudaTask cudaFlowCapturer::transform( + I1 first1, I1 last1, I2 first2, O output, C op +) { + return on([=](cudaStream_t stream) mutable { + cudaDefaultExecutionPolicy p(stream); + cuda_transform(p, first1, last1, first2, output, op); + }); +} + +// Function: transform +template +void cudaFlowCapturer::transform( + cudaTask task, I first, I last, O output, C op +) { + on(task, [=] (cudaStream_t stream) mutable { + cudaDefaultExecutionPolicy p(stream); + cuda_transform(p, first, last, output, op); + }); +} + +// Function: transform +template +void cudaFlowCapturer::transform( + cudaTask task, I1 first1, I1 last1, I2 first2, O output, C op +) { + on(task, [=] (cudaStream_t stream) mutable { + cudaDefaultExecutionPolicy p(stream); + cuda_transform(p, first1, last1, first2, output, op); + }); +} + +} // end of namespace tf ----------------------------------------------------- + + + + + + diff --git a/bundled/taskflow-3.6.0/include/cuda/algorithm/transpose.hpp b/bundled/taskflow-3.6.0/include/cuda/algorithm/transpose.hpp new file mode 100644 index 0000000000..3b02a7f1a8 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/algorithm/transpose.hpp @@ -0,0 +1,41 @@ +#pragma once + +#include "../cuda_error.hpp" + +namespace tf { + +// ---------------------------------------------------------------------------- +// row-wise matrix transpose +// ---------------------------------------------------------------------------- +// +template +__global__ void cuda_transpose( + const T* d_in, + T* d_out, + size_t rows, + size_t cols +) { + __shared__ T tile[32][32]; + size_t x = blockIdx.x * 32 + threadIdx.x; + size_t y = blockIdx.y * 32 + threadIdx.y; + + for(size_t i = 0; i < 32; i += 8) { + if(x < cols && (y + i) < rows) { + tile[threadIdx.y + i][threadIdx.x] = d_in[(y + i) * cols + x]; + } + } + + __syncthreads(); + + x = blockIdx.y * 32 + threadIdx.x; + y = blockIdx.x * 32 + threadIdx.y; + + for(size_t i = 0; i < 32; i += 8) { + if(x < rows && (y + i) < cols) { + d_out[(y + i) * rows + x] = tile[threadIdx.x][threadIdx.y + i]; + } + } +} + +} // end of namespace -------------------------------------------------------- + diff --git a/bundled/taskflow-3.6.0/include/cuda/cuda_capturer.hpp b/bundled/taskflow-3.6.0/include/cuda/cuda_capturer.hpp new file mode 100644 index 0000000000..3b5daee9de --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/cuda_capturer.hpp @@ -0,0 +1,724 @@ +#pragma once + +#include "cuda_task.hpp" +#include "cuda_optimizer.hpp" + +/** +@file cuda_capturer.hpp +@brief %cudaFlow capturer include file +*/ + +namespace tf { + +// ---------------------------------------------------------------------------- +// class definition: cudaFlowCapturer +// ---------------------------------------------------------------------------- + +/** +@class cudaFlowCapturer + +@brief class to create a %cudaFlow graph using stream capture + +The usage of tf::cudaFlowCapturer is similar to tf::cudaFlow, except users can +call the method tf::cudaFlowCapturer::on to capture a sequence of asynchronous +CUDA operations through the given stream. +The following example creates a CUDA graph that captures two kernel tasks, +@c task_1 and @c task_2, where @c task_1 runs before @c task_2. + +@code{.cpp} +taskflow.emplace([](tf::cudaFlowCapturer& capturer){ + + // capture my_kernel_1 through the given stream managed by the capturer + auto task_1 = capturer.on([&](cudaStream_t stream){ + my_kernel_1<<>>(my_parameters_1); + }); + + // capture my_kernel_2 through the given stream managed by the capturer + auto task_2 = capturer.on([&](cudaStream_t stream){ + my_kernel_2<<>>(my_parameters_2); + }); + + task_1.precede(task_2); +}); +@endcode + +Similar to tf::cudaFlow, a %cudaFlowCapturer is a task (tf::Task) +created from tf::Taskflow +and will be run by @em one worker thread in the executor. +That is, the callable that describes a %cudaFlowCapturer +will be executed sequentially. +Inside a %cudaFlow capturer task, different GPU tasks (tf::cudaTask) may run +in parallel depending on the selected optimization algorithm. +By default, we use tf::cudaFlowRoundRobinOptimizer to transform a user-level +graph into a native CUDA graph. + +Please refer to @ref GPUTaskingcudaFlowCapturer for details. +*/ +class cudaFlowCapturer { + + friend class cudaFlow; + friend class Executor; + + // created by user + struct External { + cudaFlowGraph graph; + }; + + // created from cudaFlow + struct Internal { + }; + + using handle_t = std::variant; + + using Optimizer = std::variant< + cudaFlowRoundRobinOptimizer, + cudaFlowSequentialOptimizer, + cudaFlowLinearOptimizer + >; + + public: + + /** + @brief constrcts a standalone cudaFlowCapturer + + A standalone %cudaFlow capturer does not go through any taskflow and + can be run by the caller thread using tf::cudaFlowCapturer::run. + */ + cudaFlowCapturer() = default; + + /** + @brief destructs the cudaFlowCapturer + */ + ~cudaFlowCapturer() = default; + + /** + @brief default move constructor + */ + cudaFlowCapturer(cudaFlowCapturer&&) = default; + + /** + @brief default move assignment operator + */ + cudaFlowCapturer& operator = (cudaFlowCapturer&&) = default; + + /** + @brief queries the emptiness of the graph + */ + bool empty() const; + + /** + @brief queries the number of tasks + */ + size_t num_tasks() const; + + /** + @brief clear this %cudaFlow capturer + */ + void clear(); + + /** + @brief dumps the %cudaFlow graph into a DOT format through an + output stream + */ + void dump(std::ostream& os) const; + + /** + @brief dumps the native captured graph into a DOT format through + an output stream + */ + void dump_native_graph(std::ostream& os) const; + + // ------------------------------------------------------------------------ + // basic methods + // ------------------------------------------------------------------------ + + /** + @brief captures a sequential CUDA operations from the given callable + + @tparam C callable type constructible with @c std::function + @param callable a callable to capture CUDA operations with the stream + + This methods applies a stream created by the flow to capture + a sequence of CUDA operations defined in the callable. + */ + template , void>* = nullptr + > + cudaTask on(C&& callable); + + /** + @brief updates a capture task to another sequential CUDA operations + + The method is similar to cudaFlowCapturer::on but operates + on an existing task. + */ + template , void>* = nullptr + > + void on(cudaTask task, C&& callable); + + /** + @brief captures a no-operation task + + @return a tf::cudaTask handle + + An empty node performs no operation during execution, + but can be used for transitive ordering. + For example, a phased execution graph with 2 groups of @c n nodes + with a barrier between them can be represented using an empty node + and @c 2*n dependency edges, + rather than no empty node and @c n^2 dependency edges. + */ + cudaTask noop(); + + /** + @brief updates a task to a no-operation task + + The method is similar to tf::cudaFlowCapturer::noop but + operates on an existing task. + */ + void noop(cudaTask task); + + /** + @brief copies data between host and device asynchronously through a stream + + @param dst destination memory address + @param src source memory address + @param count size in bytes to copy + + The method captures a @c cudaMemcpyAsync operation through an + internal stream. + */ + cudaTask memcpy(void* dst, const void* src, size_t count); + + /** + @brief updates a capture task to a memcpy operation + + The method is similar to cudaFlowCapturer::memcpy but operates on an + existing task. + */ + void memcpy(cudaTask task, void* dst, const void* src, size_t count); + + /** + @brief captures a copy task of typed data + + @tparam T element type (non-void) + + @param tgt pointer to the target memory block + @param src pointer to the source memory block + @param num number of elements to copy + + @return cudaTask handle + + A copy task transfers num*sizeof(T) bytes of data from a source location + to a target location. Direction can be arbitrary among CPUs and GPUs. + */ + template , void>* = nullptr + > + cudaTask copy(T* tgt, const T* src, size_t num); + + /** + @brief updates a capture task to a copy operation + + The method is similar to cudaFlowCapturer::copy but operates on + an existing task. + */ + template , void>* = nullptr + > + void copy(cudaTask task, T* tgt, const T* src, size_t num); + + /** + @brief initializes or sets GPU memory to the given value byte by byte + + @param ptr pointer to GPU mempry + @param v value to set for each byte of the specified memory + @param n size in bytes to set + + The method captures a @c cudaMemsetAsync operation through an + internal stream to fill the first @c count bytes of the memory area + pointed to by @c devPtr with the constant byte value @c value. + */ + cudaTask memset(void* ptr, int v, size_t n); + + /** + @brief updates a capture task to a memset operation + + The method is similar to cudaFlowCapturer::memset but operates on + an existing task. + */ + void memset(cudaTask task, void* ptr, int value, size_t n); + + /** + @brief captures a kernel + + @tparam F kernel function type + @tparam ArgsT kernel function parameters type + + @param g configured grid + @param b configured block + @param s configured shared memory size in bytes + @param f kernel function + @param args arguments to forward to the kernel function by copy + + @return cudaTask handle + */ + template + cudaTask kernel(dim3 g, dim3 b, size_t s, F f, ArgsT&&... args); + + /** + @brief updates a capture task to a kernel operation + + The method is similar to cudaFlowCapturer::kernel but operates on + an existing task. + */ + template + void kernel( + cudaTask task, dim3 g, dim3 b, size_t s, F f, ArgsT&&... args + ); + + // ------------------------------------------------------------------------ + // generic algorithms + // ------------------------------------------------------------------------ + + /** + @brief capturers a kernel to runs the given callable with only one thread + + @tparam C callable type + + @param c callable to run by a single kernel thread + */ + template + cudaTask single_task(C c); + + /** + @brief updates a capture task to a single-threaded kernel + + This method is similar to cudaFlowCapturer::single_task but operates + on an existing task. + */ + template + void single_task(cudaTask task, C c); + + /** + @brief captures a kernel that applies a callable to each dereferenced element + of the data array + + @tparam I iterator type + @tparam C callable type + + @param first iterator to the beginning + @param last iterator to the end + @param callable a callable object to apply to the dereferenced iterator + + @return cudaTask handle + + This method is equivalent to the parallel execution of the following loop on a GPU: + + @code{.cpp} + for(auto itr = first; itr != last; i++) { + callable(*itr); + } + @endcode + */ + template + cudaTask for_each(I first, I last, C callable); + + /** + @brief updates a capture task to a for-each kernel task + + This method is similar to cudaFlowCapturer::for_each but operates + on an existing task. + */ + template + void for_each(cudaTask task, I first, I last, C callable); + + /** + @brief captures a kernel that applies a callable to each index in the range + with the step size + + @tparam I index type + @tparam C callable type + + @param first beginning index + @param last last index + @param step step size + @param callable the callable to apply to each element in the data array + + @return cudaTask handle + + This method is equivalent to the parallel execution of the following loop on a GPU: + + @code{.cpp} + // step is positive [first, last) + for(auto i=first; ilast; i+=step) { + callable(i); + } + @endcode + */ + template + cudaTask for_each_index(I first, I last, I step, C callable); + + /** + @brief updates a capture task to a for-each-index kernel task + + This method is similar to cudaFlowCapturer::for_each_index but operates + on an existing task. + */ + template + void for_each_index( + cudaTask task, I first, I last, I step, C callable + ); + + /** + @brief captures a kernel that transforms an input range to an output range + + @tparam I input iterator type + @tparam O output iterator type + @tparam C unary operator type + + @param first iterator to the beginning of the input range + @param last iterator to the end of the input range + @param output iterator to the beginning of the output range + @param op unary operator to apply to transform each item in the range + + @return cudaTask handle + + This method is equivalent to the parallel execution of the following loop on a GPU: + + @code{.cpp} + while (first != last) { + *output++ = op(*first++); + } + @endcode + */ + template + cudaTask transform(I first, I last, O output, C op); + + /** + @brief updates a capture task to a transform kernel task + + This method is similar to cudaFlowCapturer::transform but operates + on an existing task. + */ + template + void transform(cudaTask task, I first, I last, O output, C op); + + /** + @brief captures a kernel that transforms two input ranges to an output range + + @tparam I1 first input iterator type + @tparam I2 second input iterator type + @tparam O output iterator type + @tparam C unary operator type + + @param first1 iterator to the beginning of the input range + @param last1 iterator to the end of the input range + @param first2 iterato + @param output iterator to the beginning of the output range + @param op binary operator to apply to transform each pair of items in the + two input ranges + + @return cudaTask handle + + This method is equivalent to the parallel execution of the following loop on a GPU: + + @code{.cpp} + while (first1 != last1) { + *output++ = op(*first1++, *first2++); + } + @endcode + */ + template + cudaTask transform(I1 first1, I1 last1, I2 first2, O output, C op); + + /** + @brief updates a capture task to a transform kernel task + + This method is similar to cudaFlowCapturer::transform but operates + on an existing task. + */ + template + void transform( + cudaTask task, I1 first1, I1 last1, I2 first2, O output, C op + ); + + // ------------------------------------------------------------------------ + // Capturing methods + // ------------------------------------------------------------------------ + + /** + @brief selects a different optimization algorithm + + @tparam OPT optimizer type + @tparam ArgsT arguments types + + @param args arguments to forward to construct the optimizer + + @return a reference to the optimizer + + We currently supports the following optimization algorithms to capture + a user-described %cudaFlow: + + tf::cudaFlowSequentialOptimizer + + tf::cudaFlowRoundRobinOptimizer + + tf::cudaFlowLinearOptimizer + + By default, tf::cudaFlowCapturer uses the round-robin optimization + algorithm with four streams to transform a user-level graph into + a native CUDA graph. + */ + template + OPT& make_optimizer(ArgsT&&... args); + + /** + @brief captures the cudaFlow and turns it into a CUDA Graph + */ + cudaGraph_t capture(); + + // ------------------------------------------------------------------------ + // offload methods + // ------------------------------------------------------------------------ + + /** + @brief offloads the %cudaFlowCapturer onto a GPU asynchronously via a stream + + @param stream stream for performing this operation + + Offloads the present %cudaFlowCapturer onto a GPU asynchronously via + the given stream. + + An offloaded %cudaFlowCapturer forces the underlying graph to be instantiated. + After the instantiation, you should not modify the graph topology + but update node parameters. + */ + void run(cudaStream_t stream); + + /** + @brief acquires a reference to the underlying CUDA graph + */ + cudaGraph_t native_graph(); + + /** + @brief acquires a reference to the underlying CUDA graph executable + */ + cudaGraphExec_t native_executable(); + + private: + + cudaFlowGraph _cfg; + + Optimizer _optimizer; + + cudaGraphExec _exe {nullptr}; +}; + +// Function: empty +inline bool cudaFlowCapturer::empty() const { + return _cfg.empty(); +} + +// Function: num_tasks +inline size_t cudaFlowCapturer::num_tasks() const { + return _cfg._nodes.size(); +} + +// Procedure: clear +inline void cudaFlowCapturer::clear() { + _exe.clear(); + _cfg.clear(); +} + +// Procedure: dump +inline void cudaFlowCapturer::dump(std::ostream& os) const { + _cfg.dump(os, nullptr, ""); +} + +// Procedure: dump_native_graph +inline void cudaFlowCapturer::dump_native_graph(std::ostream& os) const { + cuda_dump_graph(os, _cfg._native_handle); +} + +// Function: capture +template , void>* +> +cudaTask cudaFlowCapturer::on(C&& callable) { + auto node = _cfg.emplace_back(_cfg, + std::in_place_type_t{}, std::forward(callable) + ); + return cudaTask(node); +} + +// Function: noop +inline cudaTask cudaFlowCapturer::noop() { + return on([](cudaStream_t){}); +} + +// Function: noop +inline void cudaFlowCapturer::noop(cudaTask task) { + on(task, [](cudaStream_t){}); +} + +// Function: memcpy +inline cudaTask cudaFlowCapturer::memcpy( + void* dst, const void* src, size_t count +) { + return on([dst, src, count] (cudaStream_t stream) mutable { + TF_CHECK_CUDA( + cudaMemcpyAsync(dst, src, count, cudaMemcpyDefault, stream), + "failed to capture memcpy" + ); + }); +} + +// Function: copy +template , void>*> +cudaTask cudaFlowCapturer::copy(T* tgt, const T* src, size_t num) { + return on([tgt, src, num] (cudaStream_t stream) mutable { + TF_CHECK_CUDA( + cudaMemcpyAsync(tgt, src, sizeof(T)*num, cudaMemcpyDefault, stream), + "failed to capture copy" + ); + }); +} + +// Function: memset +inline cudaTask cudaFlowCapturer::memset(void* ptr, int v, size_t n) { + return on([ptr, v, n] (cudaStream_t stream) mutable { + TF_CHECK_CUDA( + cudaMemsetAsync(ptr, v, n, stream), "failed to capture memset" + ); + }); +} + +// Function: kernel +template +cudaTask cudaFlowCapturer::kernel( + dim3 g, dim3 b, size_t s, F f, ArgsT&&... args +) { + return on([g, b, s, f, args...] (cudaStream_t stream) mutable { + f<<>>(args...); + }); +} + +// Function: capture +inline cudaGraph_t cudaFlowCapturer::capture() { + return std::visit( + [this](auto&& opt){ return opt._optimize(_cfg); }, _optimizer + ); +} + +// Procedure: run +inline void cudaFlowCapturer::run(cudaStream_t stream) { + + // If the topology got changed, we need to destroy the executable + // and create a new one + if(_cfg._state & cudaFlowGraph::CHANGED) { + _cfg._native_handle.reset(capture()); + _exe.instantiate(_cfg._native_handle); + } + // if the graph is just updated (i.e., topology does not change), + // we can skip part of the optimization and just update the executable + // with the new captured graph + else if(_cfg._state & cudaFlowGraph::UPDATED) { + // TODO: skip part of the optimization (e.g., levelization) + _cfg._native_handle.reset(capture()); + if(_exe.update(_cfg._native_handle) != cudaGraphExecUpdateSuccess) { + _exe.instantiate(_cfg._native_handle); + } + } + + // run the executable (should exist) + _exe.launch(stream); + + _cfg._state = cudaFlowGraph::OFFLOADED; +} + +// Function: native_graph +inline cudaGraph_t cudaFlowCapturer::native_graph() { + return _cfg._native_handle; +} + +// Function: native_executable +inline cudaGraphExec_t cudaFlowCapturer::native_executable() { + return _exe; +} + +// Function: on +template , void>* +> +void cudaFlowCapturer::on(cudaTask task, C&& callable) { + + if(task.type() != cudaTaskType::CAPTURE) { + TF_THROW("invalid cudaTask type (must be CAPTURE)"); + } + + _cfg._state |= cudaFlowGraph::UPDATED; + + std::get_if(&task._node->_handle)->work = + std::forward(callable); +} + +// Function: memcpy +inline void cudaFlowCapturer::memcpy( + cudaTask task, void* dst, const void* src, size_t count +) { + on(task, [dst, src, count](cudaStream_t stream) mutable { + TF_CHECK_CUDA( + cudaMemcpyAsync(dst, src, count, cudaMemcpyDefault, stream), + "failed to capture memcpy" + ); + }); +} + +// Function: copy +template , void>* +> +void cudaFlowCapturer::copy( + cudaTask task, T* tgt, const T* src, size_t num +) { + on(task, [tgt, src, num] (cudaStream_t stream) mutable { + TF_CHECK_CUDA( + cudaMemcpyAsync(tgt, src, sizeof(T)*num, cudaMemcpyDefault, stream), + "failed to capture copy" + ); + }); +} + +// Function: memset +inline void cudaFlowCapturer::memset( + cudaTask task, void* ptr, int v, size_t n +) { + on(task, [ptr, v, n] (cudaStream_t stream) mutable { + TF_CHECK_CUDA( + cudaMemsetAsync(ptr, v, n, stream), "failed to capture memset" + ); + }); +} + +// Function: kernel +template +void cudaFlowCapturer::kernel( + cudaTask task, dim3 g, dim3 b, size_t s, F f, ArgsT&&... args +) { + on(task, [g, b, s, f, args...] (cudaStream_t stream) mutable { + f<<>>(args...); + }); +} + +// Function: make_optimizer +template +OPT& cudaFlowCapturer::make_optimizer(ArgsT&&... args) { + return _optimizer.emplace(std::forward(args)...); +} + +} // end of namespace tf ----------------------------------------------------- + diff --git a/bundled/taskflow-3.6.0/include/cuda/cuda_device.hpp b/bundled/taskflow-3.6.0/include/cuda/cuda_device.hpp new file mode 100644 index 0000000000..016b2a6f64 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/cuda_device.hpp @@ -0,0 +1,342 @@ +#pragma once + +#include "cuda_error.hpp" + +/** +@file cuda_device.hpp +@brief CUDA device utilities include file +*/ + +namespace tf { + +/** +@brief queries the number of available devices +*/ +inline size_t cuda_get_num_devices() { + int N = 0; + TF_CHECK_CUDA(cudaGetDeviceCount(&N), "failed to get device count"); + return static_cast(N); +} + +/** +@brief gets the current device associated with the caller thread +*/ +inline int cuda_get_device() { + int id; + TF_CHECK_CUDA(cudaGetDevice(&id), "failed to get current device id"); + return id; +} + +/** +@brief switches to a given device context +*/ +inline void cuda_set_device(int id) { + TF_CHECK_CUDA(cudaSetDevice(id), "failed to switch to device ", id); +} + +/** +@brief obtains the device property +*/ +inline void cuda_get_device_property(int i, cudaDeviceProp& p) { + TF_CHECK_CUDA( + cudaGetDeviceProperties(&p, i), "failed to get property of device ", i + ); +} + +/** +@brief obtains the device property +*/ +inline cudaDeviceProp cuda_get_device_property(int i) { + cudaDeviceProp p; + TF_CHECK_CUDA( + cudaGetDeviceProperties(&p, i), "failed to get property of device ", i + ); + return p; +} + +/** +@brief dumps the device property +*/ +inline void cuda_dump_device_property(std::ostream& os, const cudaDeviceProp& p) { + + os << "Major revision number: " << p.major << '\n' + << "Minor revision number: " << p.minor << '\n' + << "Name: " << p.name << '\n' + << "Total global memory: " << p.totalGlobalMem << '\n' + << "Total shared memory per block: " << p.sharedMemPerBlock << '\n' + << "Total registers per block: " << p.regsPerBlock << '\n' + << "Warp size: " << p.warpSize << '\n' + << "Maximum memory pitch: " << p.memPitch << '\n' + << "Maximum threads per block: " << p.maxThreadsPerBlock << '\n'; + + os << "Maximum dimension of block: "; + for (int i = 0; i < 3; ++i) { + if(i) os << 'x'; + os << p.maxThreadsDim[i]; + } + os << '\n'; + + os << "Maximum dimenstion of grid: "; + for (int i = 0; i < 3; ++i) { + if(i) os << 'x'; + os << p.maxGridSize[i];; + } + os << '\n'; + + os << "Clock rate: " << p.clockRate << '\n' + << "Total constant memory: " << p.totalConstMem << '\n' + << "Texture alignment: " << p.textureAlignment << '\n' + << "Concurrent copy and execution: " << p.deviceOverlap << '\n' + << "Number of multiprocessors: " << p.multiProcessorCount << '\n' + << "Kernel execution timeout: " << p.kernelExecTimeoutEnabled << '\n' + << "GPU sharing Host Memory: " << p.integrated << '\n' + << "Host page-locked mem mapping: " << p.canMapHostMemory << '\n' + << "Alignment for Surfaces: " << p.surfaceAlignment << '\n' + << "Device has ECC support: " << p.ECCEnabled << '\n' + << "Unified Addressing (UVA): " << p.unifiedAddressing << '\n'; +} + +/** +@brief queries the maximum threads per block on a device +*/ +inline size_t cuda_get_device_max_threads_per_block(int d) { + int threads = 0; + TF_CHECK_CUDA( + cudaDeviceGetAttribute(&threads, cudaDevAttrMaxThreadsPerBlock, d), + "failed to query the maximum threads per block on device ", d + ) + return threads; +} + +/** +@brief queries the maximum x-dimension per block on a device +*/ +inline size_t cuda_get_device_max_x_dim_per_block(int d) { + int dim = 0; + TF_CHECK_CUDA( + cudaDeviceGetAttribute(&dim, cudaDevAttrMaxBlockDimX, d), + "failed to query the maximum x-dimension per block on device ", d + ) + return dim; +} + +/** +@brief queries the maximum y-dimension per block on a device +*/ +inline size_t cuda_get_device_max_y_dim_per_block(int d) { + int dim = 0; + TF_CHECK_CUDA( + cudaDeviceGetAttribute(&dim, cudaDevAttrMaxBlockDimY, d), + "failed to query the maximum y-dimension per block on device ", d + ) + return dim; +} + +/** +@brief queries the maximum z-dimension per block on a device +*/ +inline size_t cuda_get_device_max_z_dim_per_block(int d) { + int dim = 0; + TF_CHECK_CUDA( + cudaDeviceGetAttribute(&dim, cudaDevAttrMaxBlockDimZ, d), + "failed to query the maximum z-dimension per block on device ", d + ) + return dim; +} + +/** +@brief queries the maximum x-dimension per grid on a device +*/ +inline size_t cuda_get_device_max_x_dim_per_grid(int d) { + int dim = 0; + TF_CHECK_CUDA( + cudaDeviceGetAttribute(&dim, cudaDevAttrMaxGridDimX, d), + "failed to query the maximum x-dimension per grid on device ", d + ) + return dim; +} + +/** +@brief queries the maximum y-dimension per grid on a device +*/ +inline size_t cuda_get_device_max_y_dim_per_grid(int d) { + int dim = 0; + TF_CHECK_CUDA( + cudaDeviceGetAttribute(&dim, cudaDevAttrMaxGridDimY, d), + "failed to query the maximum y-dimension per grid on device ", d + ) + return dim; +} + +/** +@brief queries the maximum z-dimension per grid on a device +*/ +inline size_t cuda_get_device_max_z_dim_per_grid(int d) { + int dim = 0; + TF_CHECK_CUDA( + cudaDeviceGetAttribute(&dim, cudaDevAttrMaxGridDimZ, d), + "failed to query the maximum z-dimension per grid on device ", d + ) + return dim; +} + +/** +@brief queries the maximum shared memory size in bytes per block on a device +*/ +inline size_t cuda_get_device_max_shm_per_block(int d) { + int num = 0; + TF_CHECK_CUDA( + cudaDeviceGetAttribute(&num, cudaDevAttrMaxSharedMemoryPerBlock, d), + "failed to query the maximum shared memory per block on device ", d + ) + return num; +} + +/** +@brief queries the warp size on a device +*/ +inline size_t cuda_get_device_warp_size(int d) { + int num = 0; + TF_CHECK_CUDA( + cudaDeviceGetAttribute(&num, cudaDevAttrWarpSize, d), + "failed to query the warp size per block on device ", d + ) + return num; +} + +/** +@brief queries the major number of compute capability of a device +*/ +inline int cuda_get_device_compute_capability_major(int d) { + int num = 0; + TF_CHECK_CUDA( + cudaDeviceGetAttribute(&num, cudaDevAttrComputeCapabilityMajor, d), + "failed to query the major number of compute capability of device ", d + ) + return num; +} + +/** +@brief queries the minor number of compute capability of a device +*/ +inline int cuda_get_device_compute_capability_minor(int d) { + int num = 0; + TF_CHECK_CUDA( + cudaDeviceGetAttribute(&num, cudaDevAttrComputeCapabilityMinor, d), + "failed to query the minor number of compute capability of device ", d + ) + return num; +} + +/** +@brief queries if the device supports unified addressing +*/ +inline bool cuda_get_device_unified_addressing(int d) { + int num = 0; + TF_CHECK_CUDA( + cudaDeviceGetAttribute(&num, cudaDevAttrUnifiedAddressing, d), + "failed to query unified addressing status on device ", d + ) + return num; +} + +// ---------------------------------------------------------------------------- +// CUDA Version +// ---------------------------------------------------------------------------- + +/** +@brief queries the latest CUDA version (1000 * major + 10 * minor) supported by the driver +*/ +inline int cuda_get_driver_version() { + int num = 0; + TF_CHECK_CUDA( + cudaDriverGetVersion(&num), + "failed to query the latest cuda version supported by the driver" + ); + return num; +} + +/** +@brief queries the CUDA Runtime version (1000 * major + 10 * minor) +*/ +inline int cuda_get_runtime_version() { + int num = 0; + TF_CHECK_CUDA( + cudaRuntimeGetVersion(&num), "failed to query cuda runtime version" + ); + return num; +} + +// ---------------------------------------------------------------------------- +// cudaScopedDevice +// ---------------------------------------------------------------------------- + +/** @class cudaScopedDevice + +@brief class to create an RAII-styled context switch + +Sample usage: + +@code{.cpp} +{ + tf::cudaScopedDevice device(1); // switch to the device context 1 + + // create a stream under device context 1 + cudaStream_t stream; + cudaStreamCreate(&stream); + +} // leaving the scope and goes back to the previous device context +@endcode + +%cudaScopedDevice is neither movable nor copyable. +*/ +class cudaScopedDevice { + + public: + + /** + @brief constructs a RAII-styled device switcher + + @param device device context to scope in the guard + */ + explicit cudaScopedDevice(int device); + + /** + @brief destructs the guard and switches back to the previous device context + */ + ~cudaScopedDevice(); + + private: + + cudaScopedDevice() = delete; + cudaScopedDevice(const cudaScopedDevice&) = delete; + cudaScopedDevice(cudaScopedDevice&&) = delete; + + int _p; +}; + +// Constructor +inline cudaScopedDevice::cudaScopedDevice(int dev) { + TF_CHECK_CUDA(cudaGetDevice(&_p), "failed to get current device scope"); + if(_p == dev) { + _p = -1; + } + else { + TF_CHECK_CUDA(cudaSetDevice(dev), "failed to scope on device ", dev); + } +} + +// Destructor +inline cudaScopedDevice::~cudaScopedDevice() { + if(_p != -1) { + cudaSetDevice(_p); + //TF_CHECK_CUDA(cudaSetDevice(_p), "failed to scope back to device ", _p); + } +} + +} // end of namespace cuda --------------------------------------------------- + + + + + diff --git a/bundled/taskflow-3.6.0/include/cuda/cuda_error.hpp b/bundled/taskflow-3.6.0/include/cuda/cuda_error.hpp new file mode 100644 index 0000000000..c38e1324c0 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/cuda_error.hpp @@ -0,0 +1,26 @@ +#pragma once + +#include +#include +#include +#include + +#include "../utility/stream.hpp" + +#define TF_CUDA_EXPAND( x ) x +#define TF_CUDA_REMOVE_FIRST_HELPER(N, ...) __VA_ARGS__ +#define TF_CUDA_REMOVE_FIRST(...) TF_CUDA_EXPAND(TF_CUDA_REMOVE_FIRST_HELPER(__VA_ARGS__)) +#define TF_CUDA_GET_FIRST_HELPER(N, ...) N +#define TF_CUDA_GET_FIRST(...) TF_CUDA_EXPAND(TF_CUDA_GET_FIRST_HELPER(__VA_ARGS__)) + +#define TF_CHECK_CUDA(...) \ +if(TF_CUDA_GET_FIRST(__VA_ARGS__) != cudaSuccess) { \ + std::ostringstream oss; \ + auto __ev__ = TF_CUDA_GET_FIRST(__VA_ARGS__); \ + oss << "[" << __FILE__ << ":" << __LINE__ << "] " \ + << (cudaGetErrorString(__ev__)) << " (" \ + << (cudaGetErrorName(__ev__)) << ") - "; \ + tf::ostreamize(oss, TF_CUDA_REMOVE_FIRST(__VA_ARGS__)); \ + throw std::runtime_error(oss.str()); \ +} + diff --git a/bundled/taskflow-3.6.0/include/cuda/cuda_execution_policy.hpp b/bundled/taskflow-3.6.0/include/cuda/cuda_execution_policy.hpp new file mode 100644 index 0000000000..ae90d98aa5 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/cuda_execution_policy.hpp @@ -0,0 +1,155 @@ +#pragma once + +#include "cuda_error.hpp" + +/** +@file cuda_execution_policy.hpp +@brief CUDA execution policy include file +*/ + +namespace tf { + +/** +@class cudaExecutionPolicy + +@brief class to define execution policy for CUDA standard algorithms + +@tparam NT number of threads per block +@tparam VT number of work units per thread + +Execution policy configures the kernel execution parameters in CUDA algorithms. +The first template argument, @c NT, the number of threads per block should +always be a power-of-two number. +The second template argument, @c VT, the number of work units per thread +is recommended to be an odd number to avoid bank conflict. + +Details can be referred to @ref CUDASTDExecutionPolicy. +*/ +template +class cudaExecutionPolicy { + + static_assert(is_pow2(NT), "max # threads per block must be a power of two"); + + public: + + /** @brief static constant for getting the number of threads per block */ + const static unsigned nt = NT; + + /** @brief static constant for getting the number of work units per thread */ + const static unsigned vt = VT; + + /** @brief static constant for getting the number of elements to process per block */ + const static unsigned nv = NT*VT; + + /** + @brief constructs an execution policy object with default stream + */ + cudaExecutionPolicy() = default; + + /** + @brief constructs an execution policy object with the given stream + */ + explicit cudaExecutionPolicy(cudaStream_t s) : _stream{s} {} + + /** + @brief queries the associated stream + */ + cudaStream_t stream() noexcept { return _stream; }; + + /** + @brief assigns a stream + */ + void stream(cudaStream_t stream) noexcept { _stream = stream; } + + /** + @brief queries the number of blocks to accommodate N elements + */ + static unsigned num_blocks(unsigned N) { return (N + nv - 1) / nv; } + + // -------------------------------------------------------------------------- + // Buffer Sizes for Standard Algorithms + // -------------------------------------------------------------------------- + + /** + @brief queries the buffer size in bytes needed to call reduce kernels + + @tparam T value type + + @param count number of elements to reduce + + The function is used to allocate a buffer for calling tf::cuda_reduce, + tf::cuda_uninitialized_reduce, tf::cuda_transform_reduce, and + tf::cuda_uninitialized_transform_reduce. + */ + template + static unsigned reduce_bufsz(unsigned count); + + /** + @brief queries the buffer size in bytes needed to call tf::cuda_min_element + + @tparam T value type + + @param count number of elements to search + + The function is used to decide the buffer size in bytes for calling + tf::cuda_min_element. + */ + template + static unsigned min_element_bufsz(unsigned count); + + /** + @brief queries the buffer size in bytes needed to call tf::cuda_max_element + + @tparam T value type + + @param count number of elements to search + + The function is used to decide the buffer size in bytes for calling + tf::cuda_max_element. + */ + template + static unsigned max_element_bufsz(unsigned count); + + /** + @brief queries the buffer size in bytes needed to call scan kernels + + @tparam T value type + + @param count number of elements to scan + + The function is used to allocate a buffer for calling + tf::cuda_inclusive_scan, tf::cuda_exclusive_scan, + tf::cuda_transform_inclusive_scan, and tf::cuda_transform_exclusive_scan. + */ + template + static unsigned scan_bufsz(unsigned count); + + /** + @brief queries the buffer size in bytes needed for CUDA merge algorithms + + @param a_count number of elements in the first vector to merge + @param b_count number of elements in the second vector to merge + + The buffer size of merge algorithm does not depend on the data type. + The buffer is purely used only for storing temporary indices + (of type @c unsigned) required during the merge process. + + The function is used to allocate a buffer for calling + tf::cuda_merge and tf::cuda_merge_by_key. + */ + inline static unsigned merge_bufsz(unsigned a_count, unsigned b_count); + + private: + + cudaStream_t _stream {0}; +}; + +/** +@brief default execution policy + */ +using cudaDefaultExecutionPolicy = cudaExecutionPolicy<512, 7>; + +} // end of namespace tf ----------------------------------------------------- + + + diff --git a/bundled/taskflow-3.6.0/include/cuda/cuda_graph.hpp b/bundled/taskflow-3.6.0/include/cuda/cuda_graph.hpp new file mode 100644 index 0000000000..f239c8d431 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/cuda_graph.hpp @@ -0,0 +1,805 @@ +#pragma once + +#include "cuda_memory.hpp" +#include "cuda_stream.hpp" +#include "cuda_meta.hpp" + +#include "../utility/traits.hpp" + +namespace tf { + +// ---------------------------------------------------------------------------- +// cudaGraph_t routines +// ---------------------------------------------------------------------------- + +/** +@brief gets the memcpy node parameter of a copy task +*/ +template , void>* = nullptr +> +cudaMemcpy3DParms cuda_get_copy_parms(T* tgt, const T* src, size_t num) { + + using U = std::decay_t; + + cudaMemcpy3DParms p; + + p.srcArray = nullptr; + p.srcPos = ::make_cudaPos(0, 0, 0); + p.srcPtr = ::make_cudaPitchedPtr(const_cast(src), num*sizeof(U), num, 1); + p.dstArray = nullptr; + p.dstPos = ::make_cudaPos(0, 0, 0); + p.dstPtr = ::make_cudaPitchedPtr(tgt, num*sizeof(U), num, 1); + p.extent = ::make_cudaExtent(num*sizeof(U), 1, 1); + p.kind = cudaMemcpyDefault; + + return p; +} + +/** +@brief gets the memcpy node parameter of a memcpy task (untyped) +*/ +inline cudaMemcpy3DParms cuda_get_memcpy_parms( + void* tgt, const void* src, size_t bytes +) { + + // Parameters in cudaPitchedPtr + // d - Pointer to allocated memory + // p - Pitch of allocated memory in bytes + // xsz - Logical width of allocation in elements + // ysz - Logical height of allocation in elements + cudaMemcpy3DParms p; + p.srcArray = nullptr; + p.srcPos = ::make_cudaPos(0, 0, 0); + p.srcPtr = ::make_cudaPitchedPtr(const_cast(src), bytes, bytes, 1); + p.dstArray = nullptr; + p.dstPos = ::make_cudaPos(0, 0, 0); + p.dstPtr = ::make_cudaPitchedPtr(tgt, bytes, bytes, 1); + p.extent = ::make_cudaExtent(bytes, 1, 1); + p.kind = cudaMemcpyDefault; + + return p; +} + +/** +@brief gets the memset node parameter of a memcpy task (untyped) +*/ +inline cudaMemsetParams cuda_get_memset_parms(void* dst, int ch, size_t count) { + + cudaMemsetParams p; + p.dst = dst; + p.value = ch; + p.pitch = 0; + //p.elementSize = (count & 1) == 0 ? ((count & 3) == 0 ? 4 : 2) : 1; + //p.width = (count & 1) == 0 ? ((count & 3) == 0 ? count >> 2 : count >> 1) : count; + p.elementSize = 1; // either 1, 2, or 4 + p.width = count; + p.height = 1; + + return p; +} + +/** +@brief gets the memset node parameter of a fill task (typed) +*/ +template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr +> +cudaMemsetParams cuda_get_fill_parms(T* dst, T value, size_t count) { + + cudaMemsetParams p; + p.dst = dst; + + // perform bit-wise copy + p.value = 0; // crucial + static_assert(sizeof(T) <= sizeof(p.value), "internal error"); + std::memcpy(&p.value, &value, sizeof(T)); + + p.pitch = 0; + p.elementSize = sizeof(T); // either 1, 2, or 4 + p.width = count; + p.height = 1; + + return p; +} + +/** +@brief gets the memset node parameter of a zero task (typed) +*/ +template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr +> +cudaMemsetParams cuda_get_zero_parms(T* dst, size_t count) { + + cudaMemsetParams p; + p.dst = dst; + p.value = 0; + p.pitch = 0; + p.elementSize = sizeof(T); // either 1, 2, or 4 + p.width = count; + p.height = 1; + + return p; +} + +/** +@brief queries the number of root nodes in a native CUDA graph +*/ +inline size_t cuda_graph_get_num_root_nodes(cudaGraph_t graph) { + size_t num_nodes; + TF_CHECK_CUDA( + cudaGraphGetRootNodes(graph, nullptr, &num_nodes), + "failed to get native graph root nodes" + ); + return num_nodes; +} + +/** +@brief queries the number of nodes in a native CUDA graph +*/ +inline size_t cuda_graph_get_num_nodes(cudaGraph_t graph) { + size_t num_nodes; + TF_CHECK_CUDA( + cudaGraphGetNodes(graph, nullptr, &num_nodes), + "failed to get native graph nodes" + ); + return num_nodes; +} + +/** +@brief queries the number of edges in a native CUDA graph +*/ +inline size_t cuda_graph_get_num_edges(cudaGraph_t graph) { + size_t num_edges; + TF_CHECK_CUDA( + cudaGraphGetEdges(graph, nullptr, nullptr, &num_edges), + "failed to get native graph edges" + ); + return num_edges; +} + +/** +@brief acquires the nodes in a native CUDA graph +*/ +inline std::vector cuda_graph_get_nodes(cudaGraph_t graph) { + size_t num_nodes = cuda_graph_get_num_nodes(graph); + std::vector nodes(num_nodes); + TF_CHECK_CUDA( + cudaGraphGetNodes(graph, nodes.data(), &num_nodes), + "failed to get native graph nodes" + ); + return nodes; +} + +/** +@brief acquires the root nodes in a native CUDA graph +*/ +inline std::vector cuda_graph_get_root_nodes(cudaGraph_t graph) { + size_t num_nodes = cuda_graph_get_num_root_nodes(graph); + std::vector nodes(num_nodes); + TF_CHECK_CUDA( + cudaGraphGetRootNodes(graph, nodes.data(), &num_nodes), + "failed to get native graph nodes" + ); + return nodes; +} + +/** +@brief acquires the edges in a native CUDA graph +*/ +inline std::vector> +cuda_graph_get_edges(cudaGraph_t graph) { + size_t num_edges = cuda_graph_get_num_edges(graph); + std::vector froms(num_edges), tos(num_edges); + TF_CHECK_CUDA( + cudaGraphGetEdges(graph, froms.data(), tos.data(), &num_edges), + "failed to get native graph edges" + ); + std::vector> edges(num_edges); + for(size_t i=0; i +void cuda_dump_graph(T& os, cudaGraph_t graph) { + + os << "digraph cudaGraph {\n"; + + std::stack> stack; + stack.push(std::make_tuple(graph, nullptr, 1)); + + int pl = 0; + + while(stack.empty() == false) { + + auto [graph, parent, l] = stack.top(); + stack.pop(); + + for(int i=0; i " << 'p' << to << ";\n"; + } + + for(auto& node : nodes) { + auto type = cuda_get_graph_node_type(node); + if(type == cudaGraphNodeTypeGraph) { + + cudaGraph_t graph; + TF_CHECK_CUDA(cudaGraphChildGraphNodeGetGraph(node, &graph), ""); + stack.push(std::make_tuple(graph, node, l+1)); + + os << 'p' << node << "[" + << "shape=folder, style=filled, fontcolor=white, fillcolor=purple, " + << "label=\"cudaGraph-L" << l+1 + << "\"];\n"; + } + else { + os << 'p' << node << "[label=\"" + << cuda_graph_node_type_to_string(type) + << "\"];\n"; + } + } + + // precede to parent + if(parent != nullptr) { + std::unordered_set successors; + for(const auto& p : edges) { + successors.insert(p.first); + } + for(auto node : nodes) { + if(successors.find(node) == successors.end()) { + os << 'p' << node << " -> " << 'p' << parent << ";\n"; + } + } + } + + // set the previous level + pl = l; + } + + for(int i=0; i<=pl; i++) { + os << "}\n"; + } +} + +// ---------------------------------------------------------------------------- +// cudaGraph +// ---------------------------------------------------------------------------- + +/** +@private +*/ +struct cudaGraphCreator { + cudaGraph_t operator () () const { + cudaGraph_t g; + TF_CHECK_CUDA(cudaGraphCreate(&g, 0), "failed to create a CUDA native graph"); + return g; + } +}; + +/** +@private +*/ +struct cudaGraphDeleter { + void operator () (cudaGraph_t g) const { + if(g) { + cudaGraphDestroy(g); + } + } +}; + +/** +@class cudaGraph + +@brief class to create an RAII-styled wrapper over a CUDA executable graph + +A cudaGraph object is an RAII-styled wrapper over +a native CUDA graph (@c cudaGraph_t). +A cudaGraph object is move-only. +*/ +class cudaGraph : + public cudaObject { + + public: + + /** + @brief constructs an RAII-styled object from the given CUDA exec + + Constructs a cudaGraph object from the given CUDA graph @c native. + */ + explicit cudaGraph(cudaGraph_t native) : cudaObject(native) { } + + /** + @brief constructs a cudaGraph object with a new CUDA graph + */ + cudaGraph() = default; +}; + +// ---------------------------------------------------------------------------- +// cudaGraphExec +// ---------------------------------------------------------------------------- + +/** +@private +*/ +struct cudaGraphExecCreator { + cudaGraphExec_t operator () () const { return nullptr; } +}; + +/** +@private +*/ +struct cudaGraphExecDeleter { + void operator () (cudaGraphExec_t executable) const { + if(executable) { + cudaGraphExecDestroy(executable); + } + } +}; + +/** +@class cudaGraphExec + +@brief class to create an RAII-styled wrapper over a CUDA executable graph + +A cudaGraphExec object is an RAII-styled wrapper over +a native CUDA executable graph (@c cudaGraphExec_t). +A cudaGraphExec object is move-only. +*/ +class cudaGraphExec : + public cudaObject { + + public: + + /** + @brief constructs an RAII-styled object from the given CUDA exec + + Constructs a cudaGraphExec object which owns @c exec. + */ + explicit cudaGraphExec(cudaGraphExec_t exec) : cudaObject(exec) { } + + /** + @brief default constructor + */ + cudaGraphExec() = default; + + /** + @brief instantiates the exexutable from the given CUDA graph + */ + void instantiate(cudaGraph_t graph) { + cudaGraphExecDeleter {} (object); + TF_CHECK_CUDA( + cudaGraphInstantiate(&object, graph, nullptr, nullptr, 0), + "failed to create an executable graph" + ); + } + + /** + @brief updates the exexutable from the given CUDA graph + */ + cudaGraphExecUpdateResult update(cudaGraph_t graph) { + cudaGraphNode_t error_node; + cudaGraphExecUpdateResult error_result; + cudaGraphExecUpdate(object, graph, &error_node, &error_result); + return error_result; + } + + /** + @brief launchs the executable graph via the given stream + */ + void launch(cudaStream_t stream) { + TF_CHECK_CUDA( + cudaGraphLaunch(object, stream), "failed to launch a CUDA executable graph" + ); + } +}; + +// ---------------------------------------------------------------------------- +// cudaFlowGraph class +// ---------------------------------------------------------------------------- + +// class: cudaFlowGraph +class cudaFlowGraph { + + friend class cudaFlowNode; + friend class cudaTask; + friend class cudaFlowCapturer; + friend class cudaFlow; + friend class cudaFlowOptimizerBase; + friend class cudaFlowSequentialOptimizer; + friend class cudaFlowLinearOptimizer; + friend class cudaFlowRoundRobinOptimizer; + friend class Taskflow; + friend class Executor; + + constexpr static int OFFLOADED = 0x01; + constexpr static int CHANGED = 0x02; + constexpr static int UPDATED = 0x04; + + public: + + cudaFlowGraph() = default; + ~cudaFlowGraph() = default; + + cudaFlowGraph(const cudaFlowGraph&) = delete; + cudaFlowGraph(cudaFlowGraph&&) = default; + + cudaFlowGraph& operator = (const cudaFlowGraph&) = delete; + cudaFlowGraph& operator = (cudaFlowGraph&&) = default; + + template + cudaFlowNode* emplace_back(ArgsT&&...); + + bool empty() const; + + void clear(); + void dump(std::ostream&, const void*, const std::string&) const ; + + private: + + int _state{CHANGED}; + cudaGraph _native_handle {nullptr}; + std::vector> _nodes; +}; + +// ---------------------------------------------------------------------------- +// cudaFlowNode class +// ---------------------------------------------------------------------------- + +/** +@private +@class: cudaFlowNode +*/ +class cudaFlowNode { + + friend class cudaFlowGraph; + friend class cudaTask; + friend class cudaFlow; + friend class cudaFlowCapturer; + friend class cudaFlowOptimizerBase; + friend class cudaFlowSequentialOptimizer; + friend class cudaFlowLinearOptimizer; + friend class cudaFlowRoundRobinOptimizer; + friend class Taskflow; + friend class Executor; + + // Empty handle + struct Empty { + }; + + // Host handle + struct Host { + + template + Host(C&&); + + std::function func; + + static void callback(void*); + }; + + // Memset handle + struct Memset { + }; + + // Memcpy handle + struct Memcpy { + }; + + // Kernel handle + struct Kernel { + + template + Kernel(F&& f); + + void* func {nullptr}; + }; + + // Subflow handle + struct Subflow { + cudaFlowGraph cfg; + }; + + // Capture + struct Capture { + + template + Capture(C&&); + + std::function work; + + cudaEvent_t event; + size_t level; + size_t lid; + size_t idx; + }; + + using handle_t = std::variant< + Empty, + Host, + Memset, + Memcpy, + Kernel, + Subflow, + Capture + >; + + public: + + // variant index + constexpr static auto EMPTY = get_index_v; + constexpr static auto HOST = get_index_v; + constexpr static auto MEMSET = get_index_v; + constexpr static auto MEMCPY = get_index_v; + constexpr static auto KERNEL = get_index_v; + constexpr static auto SUBFLOW = get_index_v; + constexpr static auto CAPTURE = get_index_v; + + cudaFlowNode() = delete; + + template + cudaFlowNode(cudaFlowGraph&, ArgsT&&...); + + private: + + cudaFlowGraph& _cfg; + + std::string _name; + + handle_t _handle; + + cudaGraphNode_t _native_handle {nullptr}; + + SmallVector _successors; + SmallVector _dependents; + + void _precede(cudaFlowNode*); +}; + +// ---------------------------------------------------------------------------- +// cudaFlowNode definitions +// ---------------------------------------------------------------------------- + +// Host handle constructor +template +cudaFlowNode::Host::Host(C&& c) : func {std::forward(c)} { +} + +// Host callback +inline void cudaFlowNode::Host::callback(void* data) { + static_cast(data)->func(); +}; + +// Kernel handle constructor +template +cudaFlowNode::Kernel::Kernel(F&& f) : + func {std::forward(f)} { +} + +// Capture handle constructor +template +cudaFlowNode::Capture::Capture(C&& work) : + work {std::forward(work)} { +} + +// Constructor +template +cudaFlowNode::cudaFlowNode(cudaFlowGraph& graph, ArgsT&&... args) : + _cfg {graph}, + _handle {std::forward(args)...} { +} + +// Procedure: _precede +inline void cudaFlowNode::_precede(cudaFlowNode* v) { + + _cfg._state |= cudaFlowGraph::CHANGED; + + _successors.push_back(v); + v->_dependents.push_back(this); + + // capture node doesn't have the native graph yet + if(_handle.index() != cudaFlowNode::CAPTURE) { + TF_CHECK_CUDA( + cudaGraphAddDependencies( + _cfg._native_handle, &_native_handle, &v->_native_handle, 1 + ), + "failed to add a preceding link ", this, "->", v + ); + } +} + +// ---------------------------------------------------------------------------- +// cudaGraph definitions +// ---------------------------------------------------------------------------- + +// Function: empty +inline bool cudaFlowGraph::empty() const { + return _nodes.empty(); +} + +// Procedure: clear +inline void cudaFlowGraph::clear() { + _state |= cudaFlowGraph::CHANGED; + _nodes.clear(); + _native_handle.clear(); +} + +// Function: emplace_back +template +cudaFlowNode* cudaFlowGraph::emplace_back(ArgsT&&... args) { + + _state |= cudaFlowGraph::CHANGED; + + auto node = std::make_unique(std::forward(args)...); + _nodes.emplace_back(std::move(node)); + return _nodes.back().get(); + + // TODO: use object pool to save memory + //auto node = new cudaFlowNode(std::forward(args)...); + //_nodes.push_back(node); + //return node; +} + +// Procedure: dump the graph to a DOT format +inline void cudaFlowGraph::dump( + std::ostream& os, const void* root, const std::string& root_name +) const { + + // recursive dump with stack + std::stack> stack; + stack.push(std::make_tuple(this, nullptr, 1)); + + int pl = 0; + + while(!stack.empty()) { + + auto [graph, parent, l] = stack.top(); + stack.pop(); + + for(int i=0; i_name.empty()) os << 'p' << parent; + else os << parent->_name; + os << "\";\n" << "color=\"purple\"\n"; + } + + for(auto& node : graph->_nodes) { + + auto v = node.get(); + + os << 'p' << v << "[label=\""; + if(v->_name.empty()) { + os << 'p' << v << "\""; + } + else { + os << v->_name << "\""; + } + + switch(v->_handle.index()) { + case cudaFlowNode::KERNEL: + os << " style=\"filled\"" + << " color=\"white\" fillcolor=\"black\"" + << " fontcolor=\"white\"" + << " shape=\"box3d\""; + break; + + case cudaFlowNode::SUBFLOW: + stack.push(std::make_tuple( + &(std::get_if(&v->_handle)->cfg), v, l+1) + ); + os << " style=\"filled\"" + << " color=\"black\" fillcolor=\"purple\"" + << " fontcolor=\"white\"" + << " shape=\"folder\""; + break; + + default: + break; + } + + os << "];\n"; + + for(const auto s : v->_successors) { + os << 'p' << v << " -> " << 'p' << s << ";\n"; + } + + if(v->_successors.size() == 0) { + if(parent == nullptr) { + if(root) { + os << 'p' << v << " -> p" << root << ";\n"; + } + } + else { + os << 'p' << v << " -> p" << parent << ";\n"; + } + } + } + + // set the previous level + pl = l; + } + + for(int i=0; iN*sizeof(T) bytes of memory +on the given device @c d and returns a pointer to the starting address of +the device memory. +*/ +template +T* cuda_malloc_device(size_t N, int d) { + cudaScopedDevice ctx(d); + T* ptr {nullptr}; + TF_CHECK_CUDA( + cudaMalloc(&ptr, N*sizeof(T)), + "failed to allocate memory (", N*sizeof(T), "bytes) on device ", d + ) + return ptr; +} + +/** +@brief allocates memory on the current device associated with the caller + +The function calls malloc_device from the current device associated +with the caller. +*/ +template +T* cuda_malloc_device(size_t N) { + T* ptr {nullptr}; + TF_CHECK_CUDA( + cudaMalloc(&ptr, N*sizeof(T)), + "failed to allocate memory (", N*sizeof(T), "bytes)" + ) + return ptr; +} + +/** +@brief allocates shared memory for holding @c N elements of type @c T + +The function calls @c cudaMallocManaged to allocate N*sizeof(T) bytes +of memory and returns a pointer to the starting address of the shared memory. +*/ +template +T* cuda_malloc_shared(size_t N) { + T* ptr {nullptr}; + TF_CHECK_CUDA( + cudaMallocManaged(&ptr, N*sizeof(T)), + "failed to allocate shared memory (", N*sizeof(T), "bytes)" + ) + return ptr; +} + +/** +@brief frees memory on the GPU device + +@tparam T pointer type +@param ptr device pointer to memory to free +@param d device context identifier + +This methods call @c cudaFree to free the memory space pointed to by @c ptr +using the given device context. +*/ +template +void cuda_free(T* ptr, int d) { + cudaScopedDevice ctx(d); + TF_CHECK_CUDA(cudaFree(ptr), "failed to free memory ", ptr, " on GPU ", d); +} + +/** +@brief frees memory on the GPU device + +@tparam T pointer type +@param ptr device pointer to memory to free + +This methods call @c cudaFree to free the memory space pointed to by @c ptr +using the current device context of the caller. +*/ +template +void cuda_free(T* ptr) { + TF_CHECK_CUDA(cudaFree(ptr), "failed to free memory ", ptr); +} + +/** +@brief copies data between host and device asynchronously through a stream + +@param stream stream identifier +@param dst destination memory address +@param src source memory address +@param count size in bytes to copy + +The method calls @c cudaMemcpyAsync with the given @c stream +using @c cudaMemcpyDefault to infer the memory space of the source and +the destination pointers. The memory areas may not overlap. +*/ +inline void cuda_memcpy_async( + cudaStream_t stream, void* dst, const void* src, size_t count +) { + TF_CHECK_CUDA( + cudaMemcpyAsync(dst, src, count, cudaMemcpyDefault, stream), + "failed to perform cudaMemcpyAsync" + ); +} + +/** +@brief initializes or sets GPU memory to the given value byte by byte + +@param stream stream identifier +@param devPtr pointer to GPU mempry +@param value value to set for each byte of the specified memory +@param count size in bytes to set + +The method calls @c cudaMemsetAsync with the given @c stream +to fill the first @c count bytes of the memory area pointed to by @c devPtr +with the constant byte value @c value. +*/ +inline void cuda_memset_async( + cudaStream_t stream, void* devPtr, int value, size_t count +){ + TF_CHECK_CUDA( + cudaMemsetAsync(devPtr, value, count, stream), + "failed to perform cudaMemsetAsync" + ); +} + +// ---------------------------------------------------------------------------- +// Shared Memory +// ---------------------------------------------------------------------------- +// +// Because dynamically sized shared memory arrays are declared "extern", +// we can't templatize them directly. To get around this, we declare a +// simple wrapper struct that will declare the extern array with a different +// name depending on the type. This avoids compiler errors about duplicate +// definitions. +// +// To use dynamically allocated shared memory in a templatized __global__ or +// __device__ function, just replace code like this: +// +// template +// __global__ void +// foo( T* g_idata, T* g_odata) +// { +// // Shared mem size is determined by the host app at run time +// extern __shared__ T sdata[]; +// ... +// doStuff(sdata); +// ... +// } +// +// With this: +// +// template +// __global__ void +// foo( T* g_idata, T* g_odata) +// { +// // Shared mem size is determined by the host app at run time +// cudaSharedMemory smem; +// T* sdata = smem.get(); +// ... +// doStuff(sdata); +// ... +// } +// ---------------------------------------------------------------------------- + +// This is the un-specialized struct. Note that we prevent instantiation of this +// struct by putting an undefined symbol in the function body so it won't compile. +/** +@private +*/ +template +struct cudaSharedMemory +{ + // Ensure that we won't compile any un-specialized types + __device__ T *get() + { + extern __device__ void error(void); + error(); + return NULL; + } +}; + +// Following are the specializations for the following types. +// int, uint, char, uchar, short, ushort, long, ulong, bool, float, and double +// One could also specialize it for user-defined types. + +/** +@private +*/ +template <> +struct cudaSharedMemory +{ + __device__ int *get() + { + extern __shared__ int s_int[]; + return s_int; + } +}; + +/** +@private +*/ +template <> +struct cudaSharedMemory +{ + __device__ unsigned int *get() + { + extern __shared__ unsigned int s_uint[]; + return s_uint; + } +}; + +/** +@private +*/ +template <> +struct cudaSharedMemory +{ + __device__ char *get() + { + extern __shared__ char s_char[]; + return s_char; + } +}; + +/** +@private +*/ +template <> +struct cudaSharedMemory +{ + __device__ unsigned char *get() + { + extern __shared__ unsigned char s_uchar[]; + return s_uchar; + } +}; + +/** +@private +*/ +template <> +struct cudaSharedMemory +{ + __device__ short *get() + { + extern __shared__ short s_short[]; + return s_short; + } +}; + +/** +@private +*/ +template <> +struct cudaSharedMemory +{ + __device__ unsigned short *get() + { + extern __shared__ unsigned short s_ushort[]; + return s_ushort; + } +}; + +/** +@private +*/ +template <> +struct cudaSharedMemory +{ + __device__ long *get() + { + extern __shared__ long s_long[]; + return s_long; + } +}; + +/** +@private +*/ +template <> +struct cudaSharedMemory +{ + __device__ unsigned long *get() + { + extern __shared__ unsigned long s_ulong[]; + return s_ulong; + } +}; + +//template <> +//struct cudaSharedMemory +//{ +// __device__ size_t *get() +// { +// extern __shared__ size_t s_sizet[]; +// return s_sizet; +// } +//}; + +/** +@private +*/ +template <> +struct cudaSharedMemory +{ + __device__ bool *get() + { + extern __shared__ bool s_bool[]; + return s_bool; + } +}; + +/** +@private +*/ +template <> +struct cudaSharedMemory +{ + __device__ float *get() + { + extern __shared__ float s_float[]; + return s_float; + } +}; + +/** +@private +*/ +template <> +struct cudaSharedMemory +{ + __device__ double *get() + { + extern __shared__ double s_double[]; + return s_double; + } +}; + + + +// ---------------------------------------------------------------------------- +// cudaDeviceAllocator +// ---------------------------------------------------------------------------- + +/** +@class cudaDeviceAllocator + +@brief class to create a CUDA device allocator + +@tparam T element type + +A %cudaDeviceAllocator enables device-specific allocation for +standard library containers. It is typically passed as template parameter +when declaring standard library containers (e.g. std::vector). +*/ +template +class cudaDeviceAllocator { + + public: + + /** + @brief element type + */ + using value_type = T; + + /** + @brief element pointer type + */ + using pointer = T*; + + /** + @brief element reference type + */ + using reference = T&; + + /** + @brief const element pointer type + */ + using const_pointer = const T*; + + /** + @brief constant element reference type + */ + using const_reference = const T&; + + /** + @brief size type + */ + using size_type = std::size_t; + + /** + @brief pointer difference type + */ + using difference_type = std::ptrdiff_t; + + /** + @brief its member type @c U is the equivalent allocator type to allocate elements of type U + */ + template + struct rebind { + /** + @brief allocator of a different data type + */ + using other = cudaDeviceAllocator; + }; + + /** + @brief Constructs a device allocator object. + */ + cudaDeviceAllocator() noexcept {} + + /** + @brief Constructs a device allocator object from another device allocator object. + */ + cudaDeviceAllocator( const cudaDeviceAllocator& ) noexcept {} + + /** + @brief Constructs a device allocator object from another device allocator + object with a different element type. + */ + template + cudaDeviceAllocator( const cudaDeviceAllocator& ) noexcept {} + + /** + @brief Destructs the device allocator object. + */ + ~cudaDeviceAllocator() noexcept {} + + /** + @brief Returns the address of x. + + This effectively means returning &x. + + @param x reference to an object + @return a pointer to the object + */ + pointer address( reference x ) { return &x; } + + /** + @brief Returns the address of x. + + This effectively means returning &x. + + @param x reference to an object + @return a pointer to the object + */ + const_pointer address( const_reference x ) const { return &x; } + + /** + @brief allocates block of storage. + + Attempts to allocate a block of storage with a size large enough to contain + @c n elements of member type, @c value_type, and returns a pointer + to the first element. + + The storage is aligned appropriately for object of type @c value_type, + but they are not constructed. + + The block of storage is allocated using cudaMalloc and throws std::bad_alloc + if it cannot allocate the total amount of storage requested. + + @param n number of elements (each of size sizeof(value_type)) to be allocated + @return a pointer to the initial element in the block of storage. + */ + pointer allocate( size_type n, std::allocator::const_pointer = 0 ) + { + void* ptr = NULL; + TF_CHECK_CUDA( + cudaMalloc( &ptr, n*sizeof(T) ), + "failed to allocate ", n, " elements (", n*sizeof(T), "bytes)" + ) + return static_cast(ptr); + } + + /** + @brief Releases a block of storage previously allocated with member allocate and not yet released + + The elements in the array are not destroyed by a call to this member function. + + @param ptr pointer to a block of storage previously allocated with allocate + */ + void deallocate( pointer ptr, size_type ) + { + if(ptr){ + cudaFree(ptr); + } + } + + /** + @brief returns the maximum number of elements that could potentially + be allocated by this allocator + + A call to member allocate with the value returned by this function + can still fail to allocate the requested storage. + + @return the nubmer of elements that might be allcoated as maximum + by a call to member allocate + */ + size_type max_size() const noexcept { return size_type {-1}; } + + /** + @brief ignored to avoid de-referencing device pointer from the host + */ + void construct( pointer, const_reference) { } + + /** + @brief ignored to avoid de-referencing device pointer from the host + */ + void destroy( pointer) { } + + /** + @brief compares two allocator of different types using @c == + + Device allocators of different types are always equal to each other + because the storage allocated by the allocator @c a1 can be deallocated + through @c a2. + */ + template + bool operator == (const cudaDeviceAllocator&) const noexcept { + return true; + } + + /** + @brief compares two allocator of different types using @c != + + Device allocators of different types are always equal to each other + because the storage allocated by the allocator @c a1 can be deallocated + through @c a2. + */ + template + bool operator != (const cudaDeviceAllocator&) const noexcept { + return false; + } + +}; + +// ---------------------------------------------------------------------------- +// cudaUSMAllocator +// ---------------------------------------------------------------------------- + +/** +@class cudaUSMAllocator + +@brief class to create a unified shared memory (USM) allocator + +@tparam T element type + +A %cudaUSMAllocator enables using unified shared memory (USM) allocation for +standard library containers. It is typically passed as template parameter +when declaring standard library containers (e.g. std::vector). +*/ +template +class cudaUSMAllocator { + + public: + + /** + @brief element type + */ + using value_type = T; + + /** + @brief element pointer type + */ + using pointer = T*; + + /** + @brief element reference type + */ + using reference = T&; + + /** + @brief const element pointer type + */ + using const_pointer = const T*; + + /** + @brief constant element reference type + */ + using const_reference = const T&; + + /** + @brief size type + */ + using size_type = std::size_t; + + /** + @brief pointer difference type + */ + using difference_type = std::ptrdiff_t; + + /** + @brief its member type @c U is the equivalent allocator type to allocate elements of type U + */ + template + struct rebind { + /** + @brief allocator of a different data type + */ + using other = cudaUSMAllocator; + }; + + /** + @brief Constructs a device allocator object. + */ + cudaUSMAllocator() noexcept {} + + /** + @brief Constructs a device allocator object from another device allocator object. + */ + cudaUSMAllocator( const cudaUSMAllocator& ) noexcept {} + + /** + @brief Constructs a device allocator object from another device allocator + object with a different element type. + */ + template + cudaUSMAllocator( const cudaUSMAllocator& ) noexcept {} + + /** + @brief Destructs the device allocator object. + */ + ~cudaUSMAllocator() noexcept {} + + /** + @brief Returns the address of x. + + This effectively means returning &x. + + @param x reference to an object + @return a pointer to the object + */ + pointer address( reference x ) { return &x; } + + /** + @brief Returns the address of x. + + This effectively means returning &x. + + @param x reference to an object + @return a pointer to the object + */ + const_pointer address( const_reference x ) const { return &x; } + + /** + @brief allocates block of storage. + + Attempts to allocate a block of storage with a size large enough to contain + @c n elements of member type, @c value_type, and returns a pointer + to the first element. + + The storage is aligned appropriately for object of type @c value_type, + but they are not constructed. + + The block of storage is allocated using cudaMalloc and throws std::bad_alloc + if it cannot allocate the total amount of storage requested. + + @param n number of elements (each of size sizeof(value_type)) to be allocated + @return a pointer to the initial element in the block of storage. + */ + pointer allocate( size_type n, std::allocator::const_pointer = 0 ) + { + void* ptr {nullptr}; + TF_CHECK_CUDA( + cudaMallocManaged( &ptr, n*sizeof(T) ), + "failed to allocate ", n, " elements (", n*sizeof(T), "bytes)" + ) + return static_cast(ptr); + } + + /** + @brief Releases a block of storage previously allocated with member allocate and not yet released + + The elements in the array are not destroyed by a call to this member function. + + @param ptr pointer to a block of storage previously allocated with allocate + */ + void deallocate( pointer ptr, size_type ) + { + if(ptr){ + cudaFree(ptr); + } + } + + /** + @brief returns the maximum number of elements that could potentially + be allocated by this allocator + + A call to member allocate with the value returned by this function + can still fail to allocate the requested storage. + + @return the nubmer of elements that might be allcoated as maximum + by a call to member allocate + */ + size_type max_size() const noexcept { return size_type {-1}; } + + /** + @brief Constructs an element object on the location pointed by ptr. + @param ptr pointer to a location with enough storage soace to contain + an element of type @c value_type + + @param val value to initialize the constructed element to + */ + void construct( pointer ptr, const_reference val ) { + new ((void*)ptr) value_type(val); + } + + /** + @brief destroys in-place the object pointed by @c ptr + + Notice that this does not deallocate the storage for the element but calls + its destructor. + + @param ptr pointer to the object to be destroye + */ + void destroy( pointer ptr ) { + ptr->~value_type(); + } + + /** + @brief compares two allocator of different types using @c == + + USM allocators of different types are always equal to each other + because the storage allocated by the allocator @c a1 can be deallocated + through @c a2. + */ + template + bool operator == (const cudaUSMAllocator&) const noexcept { + return true; + } + + /** + @brief compares two allocator of different types using @c != + + USM allocators of different types are always equal to each other + because the storage allocated by the allocator @c a1 can be deallocated + through @c a2. + */ + template + bool operator != (const cudaUSMAllocator&) const noexcept { + return false; + } + +}; + +// ---------------------------------------------------------------------------- +// GPU vector object +// ---------------------------------------------------------------------------- + +//template +//using cudaDeviceVector = std::vector, cudaDeviceAllocator>>; + +//template +//using cudaUSMVector = std::vector>; + +/** +@private +*/ +template +class cudaDeviceVector { + + public: + + cudaDeviceVector() = default; + + cudaDeviceVector(size_t N) : _N {N} { + if(N) { + TF_CHECK_CUDA( + cudaMalloc(&_data, N*sizeof(T)), + "failed to allocate device memory (", N*sizeof(T), " bytes)" + ); + } + } + + cudaDeviceVector(cudaDeviceVector&& rhs) : + _data{rhs._data}, _N {rhs._N} { + rhs._data = nullptr; + rhs._N = 0; + } + + ~cudaDeviceVector() { + if(_data) { + cudaFree(_data); + } + } + + cudaDeviceVector& operator = (cudaDeviceVector&& rhs) { + if(_data) { + cudaFree(_data); + } + _data = rhs._data; + _N = rhs._N; + rhs._data = nullptr; + rhs._N = 0; + return *this; + } + + size_t size() const { return _N; } + + T* data() { return _data; } + const T* data() const { return _data; } + + cudaDeviceVector(const cudaDeviceVector&) = delete; + cudaDeviceVector& operator = (const cudaDeviceVector&) = delete; + + private: + + T* _data {nullptr}; + size_t _N {0}; +}; + + +} // end of namespace tf ----------------------------------------------------- + + + + + + diff --git a/bundled/taskflow-3.6.0/include/cuda/cuda_meta.hpp b/bundled/taskflow-3.6.0/include/cuda/cuda_meta.hpp new file mode 100644 index 0000000000..b08eb29b08 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/cuda_meta.hpp @@ -0,0 +1,452 @@ +#pragma once + +#include "cuda_execution_policy.hpp" + +namespace tf { + +// default warp size +inline constexpr unsigned CUDA_WARP_SIZE = 32; + +// empty type +struct cudaEmpty { }; + +// ---------------------------------------------------------------------------- +// iterator unrolling +// ---------------------------------------------------------------------------- + +// Template unrolled looping construct. +template +struct cudaIterate { + template + __device__ static void eval(F f) { + f(i); + cudaIterate::eval(f); + } +}; + +template +struct cudaIterate { + template + __device__ static void eval(F) { } +}; + +template +__device__ void cuda_iterate(F f) { + cudaIterate::eval(f); +} + +template +__device__ void cuda_iterate(F f) { + cuda_iterate<0, count>(f); +} + +template +__device__ T reduce(const T(&x)[count]) { + T y; + cuda_iterate([&](auto i) { y = i ? x[i] + y : x[i]; }); + return y; +} + +template +__device__ void fill(T(&x)[count], T val) { + cuda_iterate([&](auto i) { x[i] = val; }); +} + +// Invoke unconditionally. +template +__device__ void cuda_strided_iterate(F f, unsigned tid) { + cuda_iterate([=](auto i) { f(i, nt * i + tid); }); +} + +// Check range. +template +__device__ void cuda_strided_iterate(F f, unsigned tid, unsigned count) { + // Unroll the first vt0 elements of each thread. + if(vt0 > 1 && count >= nt * vt0) { + cuda_strided_iterate(f, tid); // No checking + } else { + cuda_iterate([=](auto i) { + auto j = nt * i + tid; + if(j < count) f(i, j); + }); + } + + // TODO: seems dummy when vt0 == vt + cuda_iterate([=](auto i) { + auto j = nt * i + tid; + if(j < count) f(i, j); + }); +} + +template +__device__ void cuda_thread_iterate(F f, unsigned tid) { + cuda_iterate([=](auto i) { f(i, vt * tid + i); }); +} + +// ---------------------------------------------------------------------------- +// cudaRange +// ---------------------------------------------------------------------------- + +// cudaRange +struct cudaRange { + unsigned begin, end; + __device__ unsigned size() const { return end - begin; } + __device__ unsigned count() const { return size(); } + __device__ bool valid() const { return end > begin; } +}; + +inline __device__ cudaRange cuda_get_tile(unsigned b, unsigned nv, unsigned count) { + return cudaRange { nv * b, min(count, nv * (b + 1)) }; +} + + +// ---------------------------------------------------------------------------- +// cudaArray +// ---------------------------------------------------------------------------- + +template +struct cudaArray { + T data[size]; + + __device__ T operator[](unsigned i) const { return data[i]; } + __device__ T& operator[](unsigned i) { return data[i]; } + + cudaArray() = default; + cudaArray(const cudaArray&) = default; + cudaArray& operator=(const cudaArray&) = default; + + // Fill the array with x. + __device__ cudaArray(T x) { + cuda_iterate([&](unsigned i) { data[i] = x; }); + } +}; + +template +struct cudaArray { + __device__ T operator[](unsigned) const { return T(); } + __device__ T& operator[](unsigned) { return *(T*)nullptr; } +}; + +template +struct cudaKVArray { + cudaArray keys; + cudaArray vals; +}; + +// ---------------------------------------------------------------------------- +// thread reg <-> global mem +// ---------------------------------------------------------------------------- + +template +__device__ auto cuda_mem_to_reg_strided(I mem, unsigned tid, unsigned count) { + using T = typename std::iterator_traits::value_type; + cudaArray x; + cuda_strided_iterate( + [&](auto i, auto j) { x[i] = mem[j]; }, tid, count + ); + return x; +} + +template +__device__ void cuda_reg_to_mem_strided( + cudaArray x, unsigned tid, unsigned count, it_t mem) { + + cuda_strided_iterate( + [=](auto i, auto j) { mem[j] = x[i]; }, tid, count + ); +} + +template +__device__ auto cuda_transform_mem_to_reg_strided( + I mem, unsigned tid, unsigned count, O op +) { + using T = std::invoke_result_t::value_type>; + cudaArray x; + cuda_strided_iterate( + [&](auto i, auto j) { x[i] = op(mem[j]); }, tid, count + ); + return x; +} + +// ---------------------------------------------------------------------------- +// thread reg <-> shared +// ---------------------------------------------------------------------------- + +template +__device__ void cuda_reg_to_shared_thread( + cudaArray x, unsigned tid, T (&shared)[shared_size], bool sync = true +) { + + static_assert(shared_size >= nt * vt, + "reg_to_shared_thread must have at least nt * vt storage"); + + cuda_thread_iterate([&](auto i, auto j) { shared[j] = x[i]; }, tid); + + if(sync) __syncthreads(); +} + +template +__device__ auto cuda_shared_to_reg_thread( + const T (&shared)[shared_size], unsigned tid, bool sync = true +) { + + static_assert(shared_size >= nt * vt, + "reg_to_shared_thread must have at least nt * vt storage"); + + cudaArray x; + cuda_thread_iterate([&](auto i, auto j) { + x[i] = shared[j]; + }, tid); + + if(sync) __syncthreads(); + + return x; +} + +template +__device__ void cuda_reg_to_shared_strided( + cudaArray x, unsigned tid, T (&shared)[shared_size], bool sync = true +) { + + static_assert(shared_size >= nt * vt, + "reg_to_shared_strided must have at least nt * vt storage"); + + cuda_strided_iterate( + [&](auto i, auto j) { shared[j] = x[i]; }, tid + ); + + if(sync) __syncthreads(); +} + +template +__device__ auto cuda_shared_to_reg_strided( + const T (&shared)[shared_size], unsigned tid, bool sync = true +) { + + static_assert(shared_size >= nt * vt, + "shared_to_reg_strided must have at least nt * vt storage"); + + cudaArray x; + cuda_strided_iterate([&](auto i, auto j) { x[i] = shared[j]; }, tid); + if(sync) __syncthreads(); + + return x; +} + +template< + unsigned nt, unsigned vt, unsigned vt0 = vt, typename T, typename it_t, + unsigned shared_size +> +__device__ auto cuda_reg_to_mem_thread( + cudaArray x, unsigned tid, + unsigned count, it_t mem, T (&shared)[shared_size] +) { + cuda_reg_to_shared_thread(x, tid, shared); + auto y = cuda_shared_to_reg_strided(shared, tid); + cuda_reg_to_mem_strided(y, tid, count, mem); +} + +template< + unsigned nt, unsigned vt, unsigned vt0 = vt, typename T, typename it_t, + unsigned shared_size +> +__device__ auto cuda_mem_to_reg_thread( + it_t mem, unsigned tid, unsigned count, T (&shared)[shared_size] +) { + + auto x = cuda_mem_to_reg_strided(mem, tid, count); + cuda_reg_to_shared_strided(x, tid, shared); + auto y = cuda_shared_to_reg_thread(shared, tid); + return y; +} + +template +__device__ auto cuda_shared_gather( + const T(&data)[S], cudaArray indices, bool sync = true +) { + + static_assert(S >= nt * vt, + "shared_gather must have at least nt * vt storage"); + + cudaArray x; + cuda_iterate([&](auto i) { x[i] = data[indices[i]]; }); + + if(sync) __syncthreads(); + + return x; +} + + + +// ---------------------------------------------------------------------------- +// reg<->reg +// ---------------------------------------------------------------------------- + +template +__device__ auto cuda_reg_thread_to_strided( + cudaArray x, unsigned tid, T (&shared)[S] +) { + cuda_reg_to_shared_thread(x, tid, shared); + return cuda_shared_to_reg_strided(shared, tid); +} + +template +__device__ auto cuda_reg_strided_to_thread( + cudaArray x, unsigned tid, T (&shared)[S] +) { + cuda_reg_to_shared_strided(x, tid, shared); + return cuda_shared_to_reg_thread(shared, tid); +} + +// ---------------------------------------------------------------------------- +// cudaLoadStoreIterator +// ---------------------------------------------------------------------------- + +template +struct cudaLoadStoreIterator : std::iterator_traits { + + L load; + S store; + I base; + + cudaLoadStoreIterator(L load_, S store_, I base_) : + load(load_), store(store_), base(base_) { } + + struct assign_t { + L load; + S store; + I index; + + __device__ assign_t& operator=(T rhs) { + static_assert(!std::is_same::value, + "load_iterator is being stored to."); + store(rhs, index); + return *this; + } + __device__ operator T() const { + static_assert(!std::is_same::value, + "store_iterator is being loaded from."); + return load(index); + } + }; + + __device__ assign_t operator[](I index) const { + return assign_t { load, store, base + index }; + } + + __device__ assign_t operator*() const { + return assign_t { load, store, base }; + } + + __device__ cudaLoadStoreIterator operator+(I offset) const { + cudaLoadStoreIterator cp = *this; + cp += offset; + return cp; + } + + __device__ cudaLoadStoreIterator& operator+=(I offset) { + base += offset; + return *this; + } + + __device__ cudaLoadStoreIterator operator-(I offset) const { + cudaLoadStoreIterator cp = *this; + cp -= offset; + return cp; + } + + __device__ cudaLoadStoreIterator& operator-=(I offset) { + base -= offset; + return *this; + } +}; + +//template +//struct trivial_load_functor { +// template +// __device__ T operator()(I index) const { +// return T(); +// } +//}; + +//template +//struct trivial_store_functor { +// template +// __device__ void operator()(T v, I index) const { } +//}; + +template +auto cuda_make_load_store_iterator(L load, S store, I base = 0) { + return cudaLoadStoreIterator(load, store, base); +} + +template +auto cuda_make_load_iterator(L load, I base = 0) { + return cuda_make_load_store_iterator(load, cudaEmpty(), base); +} + +template +auto cuda_make_store_iterator(S store, I base = 0) { + return cuda_make_load_store_iterator(cudaEmpty(), store, base); +} + +// ---------------------------------------------------------------------------- +// swap +// ---------------------------------------------------------------------------- + +template +__device__ void cuda_swap(T& a, T& b) { + auto c = a; + a = b; + b = c; +} + +// ---------------------------------------------------------------------------- +// launch kernel +// ---------------------------------------------------------------------------- + +template +__global__ void cuda_kernel(F f, args_t... args) { + f(threadIdx.x, blockIdx.x, args...); +} + +// ---------------------------------------------------------------------------- +// operators +// ---------------------------------------------------------------------------- + +template +struct cuda_plus{ + __device__ T operator()(T a, T b) const { return a + b; } +}; + + template +struct cuda_minus{ + __device__ T operator()(T a, T b) const { return a - b; } +}; + +template +struct cuda_multiplies{ + __device__ T operator()(T a, T b) const { return a * b; } +}; + +template +struct cuda_maximum{ + __device__ T operator()(T a, T b) const { return a > b ? a : b; } +}; + +template +struct cuda_minimum{ + __device__ T operator()(T a, T b) const { return a < b ? a : b; } +}; + +template +struct cuda_less{ + __device__ T operator()(T a, T b) const { return a < b; } +}; + +template +struct cuda_greater{ + __device__ T operator()(T a, T b) const { return a > b; } +}; + +} // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-3.6.0/include/cuda/cuda_object.hpp b/bundled/taskflow-3.6.0/include/cuda/cuda_object.hpp new file mode 100644 index 0000000000..e30d3a52da --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/cuda_object.hpp @@ -0,0 +1,287 @@ +#pragma once + +#include "cuda_error.hpp" + +namespace tf { + +/** +@brief per-thread object pool to manage CUDA device object + +@tparam H object type +@tparam C function object to create a library object +@tparam D function object to delete a library object + +A CUDA device object has a lifetime associated with a device, +for example, @c cudaStream_t, @c cublasHandle_t, etc. +Creating a device object is typically expensive (e.g., 10-200 ms) +and destroying it may trigger implicit device synchronization. +For applications tha intensively make use of device objects, +it is desirable to reuse them as much as possible. + +There exists an one-to-one relationship between CUDA devices in CUDA Runtime API +and CUcontexts in the CUDA Driver API within a process. +The specific context which the CUDA Runtime API uses for a device +is called the device's primary context. +From the perspective of the CUDA Runtime API, +a device and its primary context are synonymous. + +We design the device object pool in a decentralized fashion by keeping +(1) a global pool to keep track of potentially usable objects and +(2) a per-thread pool to footprint objects with shared ownership. +The global pool does not own the object and therefore does not destruct any of them. +The per-thread pool keeps the footprints of objects with shared ownership +and will destruct them if the thread holds the last reference count after it joins. +The motivation of this decentralized control is to avoid device objects +from being destroyed while the context had been destroyed due to driver shutdown. + +*/ +template +class cudaPerThreadDeviceObjectPool { + + public: + + /** + @brief structure to store a context object + */ + struct Object { + + int device; + H value; + + Object(int); + ~Object(); + + Object(const Object&) = delete; + Object(Object&&) = delete; + }; + + private: + + // Master thread hold the storage to the pool. + // Due to some ordering, cuda context may be destroyed when the master + // program thread destroys the cuda object. + // Therefore, we use a decentralized approach to let child thread + // destroy cuda objects while the master thread only keeps a weak reference + // to those objects for reuse. + struct cudaGlobalDeviceObjectPool { + + std::shared_ptr acquire(int); + void release(int, std::weak_ptr); + + std::mutex mutex; + std::unordered_map>> pool; + }; + + public: + + /** + @brief default constructor + */ + cudaPerThreadDeviceObjectPool() = default; + + /** + @brief acquires a device object with shared ownership + */ + std::shared_ptr acquire(int); + + /** + @brief releases a device object with moved ownership + */ + void release(std::shared_ptr&&); + + /** + @brief queries the number of device objects with shared ownership + */ + size_t footprint_size() const; + + private: + + inline static cudaGlobalDeviceObjectPool _shared_pool; + + std::unordered_set> _footprint; +}; + +// ---------------------------------------------------------------------------- +// cudaPerThreadDeviceObject::cudaHanale definition +// ---------------------------------------------------------------------------- + +template +cudaPerThreadDeviceObjectPool::Object::Object(int d) : + device {d} { + cudaScopedDevice ctx(device); + value = C{}(); +} + +template +cudaPerThreadDeviceObjectPool::Object::~Object() { + cudaScopedDevice ctx(device); + D{}(value); +} + +// ---------------------------------------------------------------------------- +// cudaPerThreadDeviceObject::cudaHanaldePool definition +// ---------------------------------------------------------------------------- + +template +std::shared_ptr::Object> +cudaPerThreadDeviceObjectPool::cudaGlobalDeviceObjectPool::acquire(int d) { + std::scoped_lock lock(mutex); + if(auto itr = pool.find(d); itr != pool.end()) { + while(!itr->second.empty()) { + auto sptr = itr->second.back().lock(); + itr->second.pop_back(); + if(sptr) { + return sptr; + } + } + } + return nullptr; +} + +template +void cudaPerThreadDeviceObjectPool::cudaGlobalDeviceObjectPool::release( + int d, std::weak_ptr ptr +) { + std::scoped_lock lock(mutex); + pool[d].push_back(ptr); +} + +// ---------------------------------------------------------------------------- +// cudaPerThreadDeviceObject definition +// ---------------------------------------------------------------------------- + +template +std::shared_ptr::Object> +cudaPerThreadDeviceObjectPool::acquire(int d) { + + auto ptr = _shared_pool.acquire(d); + + if(!ptr) { + ptr = std::make_shared(d); + } + + return ptr; +} + +template +void cudaPerThreadDeviceObjectPool::release( + std::shared_ptr&& ptr +) { + _shared_pool.release(ptr->device, ptr); + _footprint.insert(std::move(ptr)); +} + +template +size_t cudaPerThreadDeviceObjectPool::footprint_size() const { + return _footprint.size(); +} + +// ---------------------------------------------------------------------------- +// cudaObject +// ---------------------------------------------------------------------------- + +/** +@class cudaObject + +@brief class to create an RAII-styled and move-only wrapper for CUDA objects +*/ +template +class cudaObject { + + public: + + /** + @brief constructs a CUDA object from the given one + */ + explicit cudaObject(T obj) : object(obj) {} + + /** + @brief constructs a new CUDA object + */ + cudaObject() : object{ C{}() } {} + + /** + @brief disabled copy constructor + */ + cudaObject(const cudaObject&) = delete; + + /** + @brief move constructor + */ + cudaObject(cudaObject&& rhs) : object{rhs.object} { + rhs.object = nullptr; + } + + /** + @brief destructs the CUDA object + */ + ~cudaObject() { D{}(object); } + + /** + @brief disabled copy assignment + */ + cudaObject& operator = (const cudaObject&) = delete; + + /** + @brief move assignment + */ + cudaObject& operator = (cudaObject&& rhs) { + D {} (object); + object = rhs.object; + rhs.object = nullptr; + return *this; + } + + /** + @brief implicit conversion to the native CUDA stream (cudaObject_t) + + Returns the underlying stream of type @c cudaObject_t. + */ + operator T () const { + return object; + } + + /** + @brief deletes the current CUDA object (if any) and creates a new one + */ + void create() { + D {} (object); + object = C{}(); + } + + /** + @brief resets this CUDA object to the given one + */ + void reset(T new_obj) { + D {} (object); + object = new_obj; + } + + /** + @brief deletes the current CUDA object + */ + void clear() { + reset(nullptr); + } + + /** + @brief releases the ownership of the CUDA object + */ + T release() { + auto tmp = object; + object = nullptr; + return tmp; + } + + protected: + + /** + @brief the CUDA object + */ + T object; +}; + +} // end of namespace tf ----------------------------------------------------- + + + diff --git a/bundled/taskflow-3.6.0/include/cuda/cuda_optimizer.hpp b/bundled/taskflow-3.6.0/include/cuda/cuda_optimizer.hpp new file mode 100644 index 0000000000..60efed1a79 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/cuda_optimizer.hpp @@ -0,0 +1,404 @@ +#pragma once + +#include "cuda_graph.hpp" + +/** +@file cuda_optimizer.hpp +@brief %cudaFlow capturing algorithms include file +*/ + +namespace tf { + +// ---------------------------------------------------------------------------- +// cudaFlowOptimizerBase +// ---------------------------------------------------------------------------- + +/** +@private + +@brief class to provide helper common methods for optimization algorithms +*/ +class cudaFlowOptimizerBase { + + protected: + + std::vector _toposort(cudaFlowGraph&); + std::vector> _levelize(cudaFlowGraph&); +}; + +// Function: _toposort +inline std::vector cudaFlowOptimizerBase::_toposort(cudaFlowGraph& graph) { + + std::vector res; + std::queue bfs; + + res.reserve(graph._nodes.size()); + + // insert the first level of nodes into the queue + for(auto& u : graph._nodes) { + + auto hu = std::get_if(&u->_handle); + hu->level = u->_dependents.size(); + + if(hu->level == 0) { + bfs.push(u.get()); + } + } + + // levelize the graph using bfs + while(!bfs.empty()) { + + auto u = bfs.front(); + bfs.pop(); + + res.push_back(u); + + for(auto v : u->_successors) { + auto hv = std::get_if(&v->_handle); + if(--hv->level == 0) { + bfs.push(v); + } + } + } + + return res; +} + +// Function: _levelize +inline std::vector> +cudaFlowOptimizerBase::_levelize(cudaFlowGraph& graph) { + + std::queue bfs; + + size_t max_level = 0; + + // insert the first level of nodes into the queue + for(auto& u : graph._nodes) { + + auto hu = std::get_if(&u->_handle); + hu->level = u->_dependents.size(); + + if(hu->level == 0) { + bfs.push(u.get()); + } + } + + // levelize the graph using bfs + while(!bfs.empty()) { + + auto u = bfs.front(); + bfs.pop(); + + auto hu = std::get_if(&u->_handle); + + for(auto v : u->_successors) { + auto hv = std::get_if(&v->_handle); + if(--hv->level == 0) { + hv->level = hu->level + 1; + if(hv->level > max_level) { + max_level = hv->level; + } + bfs.push(v); + } + } + } + + // set level_graph and each node's idx + std::vector> level_graph(max_level+1); + for(auto& u : graph._nodes) { + auto hu = std::get_if(&u->_handle); + hu->lid = level_graph[hu->level].size(); + level_graph[hu->level].emplace_back(u.get()); + + //for(auto s : u->_successors) { + // assert(hu.level < std::get_if(&s->_handle)->level); + //} + } + + return level_graph; +} + +// ---------------------------------------------------------------------------- +// class definition: cudaFlowSequentialOptimizer +// ---------------------------------------------------------------------------- + +/** +@class cudaFlowSequentialOptimizer + +@brief class to capture a CUDA graph using a sequential stream + +A sequential capturing algorithm finds a topological order of +the described graph and captures dependent GPU tasks using a single stream. +All GPU tasks run sequentially without breaking inter dependencies. +*/ +class cudaFlowSequentialOptimizer : public cudaFlowOptimizerBase { + + friend class cudaFlowCapturer; + + public: + + /** + @brief constructs a sequential optimizer + */ + cudaFlowSequentialOptimizer() = default; + + private: + + cudaGraph_t _optimize(cudaFlowGraph& graph); +}; + +inline cudaGraph_t cudaFlowSequentialOptimizer::_optimize(cudaFlowGraph& graph) { + + // acquire per-thread stream and turn it into capture mode + // we must use ThreadLocal mode to avoid clashing with CUDA global states + + cudaStream stream; + + stream.begin_capture(cudaStreamCaptureModeThreadLocal); + + auto ordered = _toposort(graph); + for(auto node : ordered) { + std::get_if(&node->_handle)->work(stream); + } + + return stream.end_capture(); +} + +// ---------------------------------------------------------------------------- +// class definition: cudaFlowLinearOptimizer +// ---------------------------------------------------------------------------- + +/** +@class cudaFlowLinearOptimizer + +@brief class to capture a linear CUDA graph using a sequential stream + +A linear capturing algorithm is a special case of tf::cudaFlowSequentialOptimizer +and assumes the input task graph to be a single linear chain of tasks +(i.e., a straight line). +This assumption allows faster optimization during the capturing process. +If the input task graph is not a linear chain, the behavior is undefined. +*/ +class cudaFlowLinearOptimizer : public cudaFlowOptimizerBase { + + friend class cudaFlowCapturer; + + public: + + /** + @brief constructs a linear optimizer + */ + cudaFlowLinearOptimizer() = default; + + private: + + cudaGraph_t _optimize(cudaFlowGraph& graph); +}; + +inline cudaGraph_t cudaFlowLinearOptimizer::_optimize(cudaFlowGraph& graph) { + + // acquire per-thread stream and turn it into capture mode + // we must use ThreadLocal mode to avoid clashing with CUDA global states + cudaStream stream; + + stream.begin_capture(cudaStreamCaptureModeThreadLocal); + + // find the source node + cudaFlowNode* src {nullptr}; + for(auto& u : graph._nodes) { + if(u->_dependents.size() == 0) { + src = u.get(); + while(src) { + std::get_if(&src->_handle)->work(stream); + src = src->_successors.empty() ? nullptr : src->_successors[0]; + } + break; + } + // ideally, there should be only one source + } + + return stream.end_capture(); +} + +// ---------------------------------------------------------------------------- +// class definition: cudaFlowRoundRobinOptimizer +// ---------------------------------------------------------------------------- + +/** +@class cudaFlowRoundRobinOptimizer + +@brief class to capture a CUDA graph using a round-robin algorithm + +A round-robin capturing algorithm levelizes the user-described graph +and assign streams to nodes in a round-robin order level by level. +The algorithm is based on the following paper published in Euro-Par 2021: + + Dian-Lun Lin and Tsung-Wei Huang, "Efficient GPU Computation using %Task Graph Parallelism," European Conference on Parallel and Distributed Computing (Euro-Par), 2021 + +The round-robin optimization algorithm is best suited for large %cudaFlow graphs +that compose hundreds of or thousands of GPU operations +(e.g., kernels and memory copies) with many of them being able to run in parallel. +You can configure the number of streams to the optimizer to adjust the +maximum kernel currency in the captured CUDA graph. +*/ +class cudaFlowRoundRobinOptimizer : public cudaFlowOptimizerBase { + + friend class cudaFlowCapturer; + + public: + + /** + @brief constructs a round-robin optimizer with 4 streams by default + */ + cudaFlowRoundRobinOptimizer() = default; + + /** + @brief constructs a round-robin optimizer with the given number of streams + */ + explicit cudaFlowRoundRobinOptimizer(size_t num_streams); + + /** + @brief queries the number of streams used by the optimizer + */ + size_t num_streams() const; + + /** + @brief sets the number of streams used by the optimizer + */ + void num_streams(size_t n); + + private: + + size_t _num_streams {4}; + + cudaGraph_t _optimize(cudaFlowGraph& graph); + + void _reset(std::vector>& graph); + +}; + +// Constructor +inline cudaFlowRoundRobinOptimizer::cudaFlowRoundRobinOptimizer(size_t num_streams) : + _num_streams {num_streams} { + + if(num_streams == 0) { + TF_THROW("number of streams must be at least one"); + } +} + +// Function: num_streams +inline size_t cudaFlowRoundRobinOptimizer::num_streams() const { + return _num_streams; +} + +// Procedure: num_streams +inline void cudaFlowRoundRobinOptimizer::num_streams(size_t n) { + if(n == 0) { + TF_THROW("number of streams must be at least one"); + } + _num_streams = n; +} + +inline void cudaFlowRoundRobinOptimizer::_reset( + std::vector>& graph +) { + //level == global id + //idx == stream id we want to skip + size_t id{0}; + for(auto& each_level: graph) { + for(auto& node: each_level) { + auto hn = std::get_if(&node->_handle); + hn->level = id++; + hn->idx = _num_streams; + hn->event = nullptr; + } + } +} + +// Function: _optimize +inline cudaGraph_t cudaFlowRoundRobinOptimizer::_optimize(cudaFlowGraph& graph) { + + // levelize the graph + auto levelized = _levelize(graph); + + // initialize the data structure + _reset(levelized); + + // begin to capture + std::vector streams(_num_streams); + + streams[0].begin_capture(cudaStreamCaptureModeThreadLocal); + + // reserve space for scoped events + std::vector events; + events.reserve((_num_streams >> 1) + levelized.size()); + + // fork + cudaEvent_t fork_event = events.emplace_back(); + streams[0].record(fork_event); + + for(size_t i = 1; i < streams.size(); ++i) { + streams[i].wait(fork_event); + } + + // assign streams to levelized nodes in a round-robin manner + for(auto& each_level: levelized) { + for(auto& node: each_level) { + auto hn = std::get_if(&node->_handle); + size_t sid = hn->lid % _num_streams; + + //wait events + cudaFlowNode* wait_node{nullptr}; + for(auto& pn: node->_dependents) { + auto phn = std::get_if(&pn->_handle); + size_t psid = phn->lid % _num_streams; + + //level == global id + //idx == stream id we want to skip + if(psid == hn->idx) { + if(wait_node == nullptr || + std::get_if(&wait_node->_handle)->level < phn->level) { + wait_node = pn; + } + } + else if(psid != sid) { + streams[sid].wait(phn->event); + } + } + + if(wait_node != nullptr) { + assert(std::get_if(&wait_node->_handle)->event); + streams[sid].wait(std::get_if(&wait_node->_handle)->event); + } + + //capture + hn->work(streams[sid]); + + //create/record stream + for(auto& sn: node->_successors) { + auto shn = std::get_if(&sn->_handle); + size_t ssid = shn->lid % _num_streams; + if(ssid != sid) { + if(!hn->event) { + hn->event = events.emplace_back(); + streams[sid].record(hn->event); + } + //idx == stream id we want to skip + shn->idx = sid; + } + } + } + } + + // join + for(size_t i=1; i<_num_streams; ++i) { + cudaEvent_t join_event = events.emplace_back(); + streams[i].record(join_event); + streams[0].wait(join_event); + } + + return streams[0].end_capture(); +} + + +} // end of namespace tf ----------------------------------------------------- + diff --git a/bundled/taskflow-3.6.0/include/cuda/cuda_stream.hpp b/bundled/taskflow-3.6.0/include/cuda/cuda_stream.hpp new file mode 100644 index 0000000000..f3e48f145f --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/cuda_stream.hpp @@ -0,0 +1,226 @@ +#pragma once + +#include "cuda_object.hpp" + +/** +@file cuda_stream.hpp +@brief CUDA stream utilities include file +*/ + +namespace tf { + + + +// ---------------------------------------------------------------------------- +// cudaStream +// ---------------------------------------------------------------------------- + +/** +@private +*/ +struct cudaStreamCreator { + cudaStream_t operator () () const { + cudaStream_t stream; + TF_CHECK_CUDA(cudaStreamCreate(&stream), "failed to create a CUDA stream"); + return stream; + } +}; + +/** +@private +*/ +struct cudaStreamDeleter { + void operator () (cudaStream_t stream) const { + if(stream) { + cudaStreamDestroy(stream); + } + } +}; + +/** +@class cudaStream + +@brief class to create an RAII-styled wrapper over a native CUDA stream + +A cudaStream object is an RAII-styled wrapper over a native CUDA stream +(@c cudaStream_t). +A cudaStream object is move-only. +*/ +class cudaStream : + + public cudaObject { + + public: + + /** + @brief constructs an RAII-styled object from the given CUDA stream + + Constructs a cudaStream object which owns @c stream. + */ + explicit cudaStream(cudaStream_t stream) : cudaObject(stream) { + } + + /** + @brief default constructor + */ + cudaStream() = default; + + /** + @brief synchronizes the associated stream + + Equivalently calling @c cudaStreamSynchronize to block + until this stream has completed all operations. + */ + void synchronize() const { + TF_CHECK_CUDA( + cudaStreamSynchronize(object), "failed to synchronize a CUDA stream" + ); + } + + /** + @brief begins graph capturing on the stream + + When a stream is in capture mode, all operations pushed into the stream + will not be executed, but will instead be captured into a graph, + which will be returned via cudaStream::end_capture. + + A thread's mode can be one of the following: + + @c cudaStreamCaptureModeGlobal: This is the default mode. + If the local thread has an ongoing capture sequence that was not initiated + with @c cudaStreamCaptureModeRelaxed at @c cuStreamBeginCapture, + or if any other thread has a concurrent capture sequence initiated with + @c cudaStreamCaptureModeGlobal, this thread is prohibited from potentially + unsafe API calls. + + + @c cudaStreamCaptureModeThreadLocal: If the local thread has an ongoing capture + sequence not initiated with @c cudaStreamCaptureModeRelaxed, + it is prohibited from potentially unsafe API calls. + Concurrent capture sequences in other threads are ignored. + + + @c cudaStreamCaptureModeRelaxed: The local thread is not prohibited + from potentially unsafe API calls. Note that the thread is still prohibited + from API calls which necessarily conflict with stream capture, for example, + attempting @c cudaEventQuery on an event that was last recorded + inside a capture sequence. + */ + void begin_capture(cudaStreamCaptureMode m = cudaStreamCaptureModeGlobal) const { + TF_CHECK_CUDA( + cudaStreamBeginCapture(object, m), + "failed to begin capture on stream ", object, " with thread mode ", m + ); + } + + /** + @brief ends graph capturing on the stream + + Equivalently calling @c cudaStreamEndCapture to + end capture on stream and returning the captured graph. + Capture must have been initiated on stream via a call to cudaStream::begin_capture. + If capture was invalidated, due to a violation of the rules of stream capture, + then a NULL graph will be returned. + */ + cudaGraph_t end_capture() const { + cudaGraph_t native_g; + TF_CHECK_CUDA( + cudaStreamEndCapture(object, &native_g), + "failed to end capture on stream ", object + ); + return native_g; + } + + /** + @brief records an event on the stream + + Equivalently calling @c cudaEventRecord to record an event on this stream, + both of which must be on the same CUDA context. + */ + void record(cudaEvent_t event) const { + TF_CHECK_CUDA( + cudaEventRecord(event, object), + "failed to record event ", event, " on stream ", object + ); + } + + /** + @brief waits on an event + + Equivalently calling @c cudaStreamWaitEvent to make all future work + submitted to stream wait for all work captured in event. + */ + void wait(cudaEvent_t event) const { + TF_CHECK_CUDA( + cudaStreamWaitEvent(object, event, 0), + "failed to wait for event ", event, " on stream ", object + ); + } +}; + +// ---------------------------------------------------------------------------- +// cudaEvent +// ---------------------------------------------------------------------------- + +/** +@private +*/ +struct cudaEventCreator { + + cudaEvent_t operator () () const { + cudaEvent_t event; + TF_CHECK_CUDA(cudaEventCreate(&event), "failed to create a CUDA event"); + return event; + } + + cudaEvent_t operator () (unsigned int flag) const { + cudaEvent_t event; + TF_CHECK_CUDA( + cudaEventCreateWithFlags(&event, flag), + "failed to create a CUDA event with flag=", flag + ); + return event; + } +}; + +/** +@private +*/ +struct cudaEventDeleter { + void operator () (cudaEvent_t event) const { + cudaEventDestroy(event); + } +}; + +/** +@class cudaEvent + +@brief class to create an RAII-styled wrapper over a native CUDA event + +A cudaEvent object is an RAII-styled wrapper over a native CUDA event +(@c cudaEvent_t). +A cudaEvent object is move-only. +*/ +class cudaEvent : + public cudaObject { + + public: + + /** + @brief constructs an RAII-styled CUDA event object from the given CUDA event + */ + explicit cudaEvent(cudaEvent_t event) : cudaObject(event) { } + + /** + @brief constructs an RAII-styled CUDA event object + */ + cudaEvent() = default; + + /** + @brief constructs an RAII-styled CUDA event object with the given flag + */ + explicit cudaEvent(unsigned int flag) : cudaObject(cudaEventCreator{}(flag)) { } +}; + + +} // end of namespace tf ----------------------------------------------------- + + + diff --git a/bundled/taskflow-3.6.0/include/cuda/cuda_task.hpp b/bundled/taskflow-3.6.0/include/cuda/cuda_task.hpp new file mode 100644 index 0000000000..92fac9ccc6 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/cuda_task.hpp @@ -0,0 +1,274 @@ +#pragma once + +#include "cuda_graph.hpp" + +/** +@file cuda_task.hpp +@brief cudaTask include file +*/ + +namespace tf { + +// ---------------------------------------------------------------------------- +// cudaTask Types +// ---------------------------------------------------------------------------- + +/** +@enum cudaTaskType + +@brief enumeration of all %cudaTask types +*/ +enum class cudaTaskType : int { + /** @brief empty task type */ + EMPTY = 0, + /** @brief host task type */ + HOST, + /** @brief memory set task type */ + MEMSET, + /** @brief memory copy task type */ + MEMCPY, + /** @brief memory copy task type */ + KERNEL, + /** @brief subflow (child graph) task type */ + SUBFLOW, + /** @brief capture task type */ + CAPTURE, + /** @brief undefined task type */ + UNDEFINED +}; + +/** +@brief convert a cuda_task type to a human-readable string +*/ +constexpr const char* to_string(cudaTaskType type) { + switch(type) { + case cudaTaskType::EMPTY: return "empty"; + case cudaTaskType::HOST: return "host"; + case cudaTaskType::MEMSET: return "memset"; + case cudaTaskType::MEMCPY: return "memcpy"; + case cudaTaskType::KERNEL: return "kernel"; + case cudaTaskType::SUBFLOW: return "subflow"; + case cudaTaskType::CAPTURE: return "capture"; + default: return "undefined"; + } +} + +// ---------------------------------------------------------------------------- +// cudaTask +// ---------------------------------------------------------------------------- + +/** +@class cudaTask + +@brief class to create a task handle over an internal node of a %cudaFlow graph +*/ +class cudaTask { + + friend class cudaFlow; + friend class cudaFlowCapturer; + friend class cudaFlowCapturerBase; + + friend std::ostream& operator << (std::ostream&, const cudaTask&); + + public: + + /** + @brief constructs an empty cudaTask + */ + cudaTask() = default; + + /** + @brief copy-constructs a cudaTask + */ + cudaTask(const cudaTask&) = default; + + /** + @brief copy-assigns a cudaTask + */ + cudaTask& operator = (const cudaTask&) = default; + + /** + @brief adds precedence links from this to other tasks + + @tparam Ts parameter pack + + @param tasks one or multiple tasks + + @return @c *this + */ + template + cudaTask& precede(Ts&&... tasks); + + /** + @brief adds precedence links from other tasks to this + + @tparam Ts parameter pack + + @param tasks one or multiple tasks + + @return @c *this + */ + template + cudaTask& succeed(Ts&&... tasks); + + /** + @brief assigns a name to the task + + @param name a @std_string acceptable string + + @return @c *this + */ + cudaTask& name(const std::string& name); + + /** + @brief queries the name of the task + */ + const std::string& name() const; + + /** + @brief queries the number of successors + */ + size_t num_successors() const; + + /** + @brief queries the number of dependents + */ + size_t num_dependents() const; + + /** + @brief queries if the task is associated with a cudaFlowNode + */ + bool empty() const; + + /** + @brief queries the task type + */ + cudaTaskType type() const; + + /** + @brief dumps the task through an output stream + + @tparam T output stream type with insertion operator (<<) defined + @param ostream an output stream target + */ + template + void dump(T& ostream) const; + + /** + @brief applies an visitor callable to each successor of the task + */ + template + void for_each_successor(V&& visitor) const; + + /** + @brief applies an visitor callable to each dependents of the task + */ + template + void for_each_dependent(V&& visitor) const; + + private: + + cudaTask(cudaFlowNode*); + + cudaFlowNode* _node {nullptr}; +}; + +// Constructor +inline cudaTask::cudaTask(cudaFlowNode* node) : _node {node} { +} + +// Function: precede +template +cudaTask& cudaTask::precede(Ts&&... tasks) { + (_node->_precede(tasks._node), ...); + return *this; +} + +// Function: succeed +template +cudaTask& cudaTask::succeed(Ts&&... tasks) { + (tasks._node->_precede(_node), ...); + return *this; +} + +// Function: empty +inline bool cudaTask::empty() const { + return _node == nullptr; +} + +// Function: name +inline cudaTask& cudaTask::name(const std::string& name) { + _node->_name = name; + return *this; +} + +// Function: name +inline const std::string& cudaTask::name() const { + return _node->_name; +} + +// Function: num_successors +inline size_t cudaTask::num_successors() const { + return _node->_successors.size(); +} + +// Function: num_dependents +inline size_t cudaTask::num_dependents() const { + return _node->_dependents.size(); +} + +// Function: type +inline cudaTaskType cudaTask::type() const { + switch(_node->_handle.index()) { + case cudaFlowNode::EMPTY: return cudaTaskType::EMPTY; + case cudaFlowNode::HOST: return cudaTaskType::HOST; + case cudaFlowNode::MEMSET: return cudaTaskType::MEMSET; + case cudaFlowNode::MEMCPY: return cudaTaskType::MEMCPY; + case cudaFlowNode::KERNEL: return cudaTaskType::KERNEL; + case cudaFlowNode::SUBFLOW: return cudaTaskType::SUBFLOW; + case cudaFlowNode::CAPTURE: return cudaTaskType::CAPTURE; + default: return cudaTaskType::UNDEFINED; + } +} + +// Procedure: dump +template +void cudaTask::dump(T& os) const { + os << "cudaTask "; + if(_node->_name.empty()) os << _node; + else os << _node->_name; + os << " [type=" << to_string(type()) << ']'; +} + +// Function: for_each_successor +template +void cudaTask::for_each_successor(V&& visitor) const { + for(size_t i=0; i<_node->_successors.size(); ++i) { + visitor(cudaTask(_node->_successors[i])); + } +} + +// Function: for_each_dependent +template +void cudaTask::for_each_dependent(V&& visitor) const { + for(size_t i=0; i<_node->_dependents.size(); ++i) { + visitor(cudaTask(_node->_dependents[i])); + } +} + +// ---------------------------------------------------------------------------- +// global ostream +// ---------------------------------------------------------------------------- + +/** +@brief overload of ostream inserter operator for cudaTask +*/ +inline std::ostream& operator << (std::ostream& os, const cudaTask& ct) { + ct.dump(os); + return os; +} + +} // end of namespace tf ----------------------------------------------------- + + + diff --git a/bundled/taskflow-3.6.0/include/cuda/cudaflow.hpp b/bundled/taskflow-3.6.0/include/cuda/cudaflow.hpp new file mode 100644 index 0000000000..61d5c84dc3 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/cuda/cudaflow.hpp @@ -0,0 +1,1024 @@ +#pragma once + +#include "../taskflow.hpp" +#include "cuda_task.hpp" +#include "cuda_capturer.hpp" + +/** +@file taskflow/cuda/cudaflow.hpp +@brief cudaFlow include file +*/ + +namespace tf { + +// ---------------------------------------------------------------------------- +// class definition: cudaFlow +// ---------------------------------------------------------------------------- + +/** +@class cudaFlow + +@brief class to create a %cudaFlow task dependency graph + +A %cudaFlow is a high-level interface over CUDA Graph to perform GPU operations +using the task dependency graph model. +The class provides a set of methods for creating and launch different tasks +on one or multiple CUDA devices, +for instance, kernel tasks, data transfer tasks, and memory operation tasks. +The following example creates a %cudaFlow of two kernel tasks, @c task1 and +@c task2, where @c task1 runs before @c task2. + +@code{.cpp} +tf::Taskflow taskflow; +tf::Executor executor; + +taskflow.emplace([&](tf::cudaFlow& cf){ + // create two kernel tasks + tf::cudaTask task1 = cf.kernel(grid1, block1, shm_size1, kernel1, args1); + tf::cudaTask task2 = cf.kernel(grid2, block2, shm_size2, kernel2, args2); + + // kernel1 runs before kernel2 + task1.precede(task2); +}); + +executor.run(taskflow).wait(); +@endcode + +A %cudaFlow is a task (tf::Task) created from tf::Taskflow +and will be run by @em one worker thread in the executor. +That is, the callable that describes a %cudaFlow +will be executed sequentially. +Inside a %cudaFlow task, different GPU tasks (tf::cudaTask) may run +in parallel scheduled by the CUDA runtime. + +Please refer to @ref GPUTaskingcudaFlow for details. +*/ +class cudaFlow { + + public: + + /** + @brief constructs a %cudaFlow + */ + cudaFlow(); + + /** + @brief destroys the %cudaFlow and its associated native CUDA graph + and executable graph + */ + ~cudaFlow() = default; + + /** + @brief default move constructor + */ + cudaFlow(cudaFlow&&) = default; + + /** + @brief default move assignment operator + */ + cudaFlow& operator = (cudaFlow&&) = default; + + /** + @brief queries the emptiness of the graph + */ + bool empty() const; + + /** + @brief queries the number of tasks + */ + size_t num_tasks() const; + + /** + @brief clears the %cudaFlow object + */ + void clear(); + + /** + @brief dumps the %cudaFlow graph into a DOT format through an + output stream + */ + void dump(std::ostream& os) const; + + /** + @brief dumps the native CUDA graph into a DOT format through an + output stream + + The native CUDA graph may be different from the upper-level %cudaFlow + graph when flow capture is involved. + */ + void dump_native_graph(std::ostream& os) const; + + // ------------------------------------------------------------------------ + // Graph building routines + // ------------------------------------------------------------------------ + + /** + @brief creates a no-operation task + + @return a tf::cudaTask handle + + An empty node performs no operation during execution, + but can be used for transitive ordering. + For example, a phased execution graph with 2 groups of @c n nodes + with a barrier between them can be represented using an empty node + and @c 2*n dependency edges, + rather than no empty node and @c n^2 dependency edges. + */ + cudaTask noop(); + + /** + @brief creates a host task that runs a callable on the host + + @tparam C callable type + + @param callable a callable object with neither arguments nor return + (i.e., constructible from @c std::function) + + @return a tf::cudaTask handle + + A host task can only execute CPU-specific functions and cannot do any CUDA calls + (e.g., @c cudaMalloc). + */ + template + cudaTask host(C&& callable); + + /** + @brief updates parameters of a host task + + The method is similar to tf::cudaFlow::host but operates on a task + of type tf::cudaTaskType::HOST. + */ + template + void host(cudaTask task, C&& callable); + + /** + @brief creates a kernel task + + @tparam F kernel function type + @tparam ArgsT kernel function parameters type + + @param g configured grid + @param b configured block + @param s configured shared memory size in bytes + @param f kernel function + @param args arguments to forward to the kernel function by copy + + @return a tf::cudaTask handle + */ + template + cudaTask kernel(dim3 g, dim3 b, size_t s, F f, ArgsT... args); + + /** + @brief updates parameters of a kernel task + + The method is similar to tf::cudaFlow::kernel but operates on a task + of type tf::cudaTaskType::KERNEL. + The kernel function name must NOT change. + */ + template + void kernel( + cudaTask task, dim3 g, dim3 b, size_t shm, F f, ArgsT... args + ); + + /** + @brief creates a memset task that fills untyped data with a byte value + + @param dst pointer to the destination device memory area + @param v value to set for each byte of specified memory + @param count size in bytes to set + + @return a tf::cudaTask handle + + A memset task fills the first @c count bytes of device memory area + pointed by @c dst with the byte value @c v. + */ + cudaTask memset(void* dst, int v, size_t count); + + /** + @brief updates parameters of a memset task + + The method is similar to tf::cudaFlow::memset but operates on a task + of type tf::cudaTaskType::MEMSET. + The source/destination memory may have different address values but + must be allocated from the same contexts as the original + source/destination memory. + */ + void memset(cudaTask task, void* dst, int ch, size_t count); + + /** + @brief creates a memcpy task that copies untyped data in bytes + + @param tgt pointer to the target memory block + @param src pointer to the source memory block + @param bytes bytes to copy + + @return a tf::cudaTask handle + + A memcpy task transfers @c bytes of data from a source location + to a target location. Direction can be arbitrary among CPUs and GPUs. + */ + cudaTask memcpy(void* tgt, const void* src, size_t bytes); + + /** + @brief updates parameters of a memcpy task + + The method is similar to tf::cudaFlow::memcpy but operates on a task + of type tf::cudaTaskType::MEMCPY. + The source/destination memory may have different address values but + must be allocated from the same contexts as the original + source/destination memory. + */ + void memcpy(cudaTask task, void* tgt, const void* src, size_t bytes); + + /** + @brief creates a memset task that sets a typed memory block to zero + + @tparam T element type (size of @c T must be either 1, 2, or 4) + @param dst pointer to the destination device memory area + @param count number of elements + + @return a tf::cudaTask handle + + A zero task zeroes the first @c count elements of type @c T + in a device memory area pointed by @c dst. + */ + template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr + > + cudaTask zero(T* dst, size_t count); + + /** + @brief updates parameters of a memset task to a zero task + + The method is similar to tf::cudaFlow::zero but operates on + a task of type tf::cudaTaskType::MEMSET. + + The source/destination memory may have different address values but + must be allocated from the same contexts as the original + source/destination memory. + */ + template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr + > + void zero(cudaTask task, T* dst, size_t count); + + /** + @brief creates a memset task that fills a typed memory block with a value + + @tparam T element type (size of @c T must be either 1, 2, or 4) + + @param dst pointer to the destination device memory area + @param value value to fill for each element of type @c T + @param count number of elements + + @return a tf::cudaTask handle + + A fill task fills the first @c count elements of type @c T with @c value + in a device memory area pointed by @c dst. + The value to fill is interpreted in type @c T rather than byte. + */ + template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr + > + cudaTask fill(T* dst, T value, size_t count); + + /** + @brief updates parameters of a memset task to a fill task + + The method is similar to tf::cudaFlow::fill but operates on a task + of type tf::cudaTaskType::MEMSET. + + The source/destination memory may have different address values but + must be allocated from the same contexts as the original + source/destination memory. + */ + template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr + > + void fill(cudaTask task, T* dst, T value, size_t count); + + /** + @brief creates a memcopy task that copies typed data + + @tparam T element type (non-void) + + @param tgt pointer to the target memory block + @param src pointer to the source memory block + @param num number of elements to copy + + @return a tf::cudaTask handle + + A copy task transfers num*sizeof(T) bytes of data from a source location + to a target location. Direction can be arbitrary among CPUs and GPUs. + */ + template , void>* = nullptr + > + cudaTask copy(T* tgt, const T* src, size_t num); + + /** + @brief updates parameters of a memcpy task to a copy task + + The method is similar to tf::cudaFlow::copy but operates on a task + of type tf::cudaTaskType::MEMCPY. + The source/destination memory may have different address values but + must be allocated from the same contexts as the original + source/destination memory. + */ + template , void>* = nullptr + > + void copy(cudaTask task, T* tgt, const T* src, size_t num); + + // ------------------------------------------------------------------------ + // run method + // ------------------------------------------------------------------------ + /** + @brief offloads the %cudaFlow onto a GPU asynchronously via a stream + + @param stream stream for performing this operation + + Offloads the present %cudaFlow onto a GPU asynchronously via + the given stream. + + An offloaded %cudaFlow forces the underlying graph to be instantiated. + After the instantiation, you should not modify the graph topology + but update node parameters. + */ + void run(cudaStream_t stream); + + /** + @brief acquires a reference to the underlying CUDA graph + */ + cudaGraph_t native_graph(); + + /** + @brief acquires a reference to the underlying CUDA graph executable + */ + cudaGraphExec_t native_executable(); + + // ------------------------------------------------------------------------ + // generic algorithms + // ------------------------------------------------------------------------ + + /** + @brief runs a callable with only a single kernel thread + + @tparam C callable type + + @param c callable to run by a single kernel thread + + @return a tf::cudaTask handle + */ + template + cudaTask single_task(C c); + + /** + @brief updates a single-threaded kernel task + + This method is similar to cudaFlow::single_task but operates + on an existing task. + */ + template + void single_task(cudaTask task, C c); + + /** + @brief applies a callable to each dereferenced element of the data array + + @tparam I iterator type + @tparam C callable type + + @param first iterator to the beginning (inclusive) + @param last iterator to the end (exclusive) + @param callable a callable object to apply to the dereferenced iterator + + @return a tf::cudaTask handle + + This method is equivalent to the parallel execution of the following loop on a GPU: + + @code{.cpp} + for(auto itr = first; itr != last; itr++) { + callable(*itr); + } + @endcode + */ + template + cudaTask for_each(I first, I last, C callable); + + /** + @brief updates parameters of a kernel task created from + tf::cudaFlow::for_each + + The type of the iterators and the callable must be the same as + the task created from tf::cudaFlow::for_each. + */ + template + void for_each(cudaTask task, I first, I last, C callable); + + /** + @brief applies a callable to each index in the range with the step size + + @tparam I index type + @tparam C callable type + + @param first beginning index + @param last last index + @param step step size + @param callable the callable to apply to each element in the data array + + @return a tf::cudaTask handle + + This method is equivalent to the parallel execution of the following loop on a GPU: + + @code{.cpp} + // step is positive [first, last) + for(auto i=first; ilast; i+=step) { + callable(i); + } + @endcode + */ + template + cudaTask for_each_index(I first, I last, I step, C callable); + + /** + @brief updates parameters of a kernel task created from + tf::cudaFlow::for_each_index + + The type of the iterators and the callable must be the same as + the task created from tf::cudaFlow::for_each_index. + */ + template + void for_each_index( + cudaTask task, I first, I last, I step, C callable + ); + + /** + @brief applies a callable to a source range and stores the result in a target range + + @tparam I input iterator type + @tparam O output iterator type + @tparam C unary operator type + + @param first iterator to the beginning of the input range + @param last iterator to the end of the input range + @param output iterator to the beginning of the output range + @param op the operator to apply to transform each element in the range + + @return a tf::cudaTask handle + + This method is equivalent to the parallel execution of the following loop on a GPU: + + @code{.cpp} + while (first != last) { + *output++ = callable(*first++); + } + @endcode + */ + template + cudaTask transform(I first, I last, O output, C op); + + /** + @brief updates parameters of a kernel task created from + tf::cudaFlow::transform + + The type of the iterators and the callable must be the same as + the task created from tf::cudaFlow::for_each. + */ + template + void transform(cudaTask task, I first, I last, O output, C c); + + /** + @brief creates a task to perform parallel transforms over two ranges of items + + @tparam I1 first input iterator type + @tparam I2 second input iterator type + @tparam O output iterator type + @tparam C unary operator type + + @param first1 iterator to the beginning of the input range + @param last1 iterator to the end of the input range + @param first2 iterato + @param output iterator to the beginning of the output range + @param op binary operator to apply to transform each pair of items in the + two input ranges + + @return cudaTask handle + + This method is equivalent to the parallel execution of the following loop on a GPU: + + @code{.cpp} + while (first1 != last1) { + *output++ = op(*first1++, *first2++); + } + @endcode + */ + template + cudaTask transform(I1 first1, I1 last1, I2 first2, O output, C op); + + /** + @brief updates parameters of a kernel task created from + tf::cudaFlow::transform + + The type of the iterators and the callable must be the same as + the task created from tf::cudaFlow::for_each. + */ + template + void transform( + cudaTask task, I1 first1, I1 last1, I2 first2, O output, C c + ); + + // ------------------------------------------------------------------------ + // subflow + // ------------------------------------------------------------------------ + + /** + @brief constructs a subflow graph through tf::cudaFlowCapturer + + @tparam C callable type constructible from + @c std::function + + @param callable the callable to construct a capture flow + + @return a tf::cudaTask handle + + A captured subflow forms a sub-graph to the %cudaFlow and can be used to + capture custom (or third-party) kernels that cannot be directly constructed + from the %cudaFlow. + + Example usage: + + @code{.cpp} + taskflow.emplace([&](tf::cudaFlow& cf){ + + tf::cudaTask my_kernel = cf.kernel(my_arguments); + + // create a flow capturer to capture custom kernels + tf::cudaTask my_subflow = cf.capture([&](tf::cudaFlowCapturer& capturer){ + capturer.on([&](cudaStream_t stream){ + invoke_custom_kernel_with_stream(stream, custom_arguments); + }); + }); + + my_kernel.precede(my_subflow); + }); + @endcode + */ + template + cudaTask capture(C&& callable); + + /** + @brief updates the captured child graph + + The method is similar to tf::cudaFlow::capture but operates on a task + of type tf::cudaTaskType::SUBFLOW. + The new captured graph must be topologically identical to the original + captured graph. + */ + template + void capture(cudaTask task, C callable); + + private: + + cudaFlowGraph _cfg; + cudaGraphExec _exe {nullptr}; +}; + +// Construct a standalone cudaFlow +inline cudaFlow::cudaFlow() { + _cfg._native_handle.create(); +} + +// Procedure: clear +inline void cudaFlow::clear() { + _exe.clear(); + _cfg.clear(); + _cfg._native_handle.create(); +} + +// Function: empty +inline bool cudaFlow::empty() const { + return _cfg._nodes.empty(); +} + +// Function: num_tasks +inline size_t cudaFlow::num_tasks() const { + return _cfg._nodes.size(); +} + +// Procedure: dump +inline void cudaFlow::dump(std::ostream& os) const { + _cfg.dump(os, nullptr, ""); +} + +// Procedure: dump +inline void cudaFlow::dump_native_graph(std::ostream& os) const { + cuda_dump_graph(os, _cfg._native_handle); +} + +// ---------------------------------------------------------------------------- +// Graph building methods +// ---------------------------------------------------------------------------- + +// Function: noop +inline cudaTask cudaFlow::noop() { + + auto node = _cfg.emplace_back( + _cfg, std::in_place_type_t{} + ); + + TF_CHECK_CUDA( + cudaGraphAddEmptyNode( + &node->_native_handle, _cfg._native_handle, nullptr, 0 + ), + "failed to create a no-operation (empty) node" + ); + + return cudaTask(node); +} + +// Function: host +template +cudaTask cudaFlow::host(C&& c) { + + auto node = _cfg.emplace_back( + _cfg, std::in_place_type_t{}, std::forward(c) + ); + + auto h = std::get_if(&node->_handle); + + cudaHostNodeParams p; + p.fn = cudaFlowNode::Host::callback; + p.userData = h; + + TF_CHECK_CUDA( + cudaGraphAddHostNode( + &node->_native_handle, _cfg._native_handle, nullptr, 0, &p + ), + "failed to create a host node" + ); + + return cudaTask(node); +} + +// Function: kernel +template +cudaTask cudaFlow::kernel( + dim3 g, dim3 b, size_t s, F f, ArgsT... args +) { + + auto node = _cfg.emplace_back( + _cfg, std::in_place_type_t{}, (void*)f + ); + + cudaKernelNodeParams p; + void* arguments[sizeof...(ArgsT)] = { (void*)(&args)... }; + p.func = (void*)f; + p.gridDim = g; + p.blockDim = b; + p.sharedMemBytes = s; + p.kernelParams = arguments; + p.extra = nullptr; + + TF_CHECK_CUDA( + cudaGraphAddKernelNode( + &node->_native_handle, _cfg._native_handle, nullptr, 0, &p + ), + "failed to create a kernel task" + ); + + return cudaTask(node); +} + +// Function: zero +template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* +> +cudaTask cudaFlow::zero(T* dst, size_t count) { + + auto node = _cfg.emplace_back( + _cfg, std::in_place_type_t{} + ); + + auto p = cuda_get_zero_parms(dst, count); + + TF_CHECK_CUDA( + cudaGraphAddMemsetNode( + &node->_native_handle, _cfg._native_handle, nullptr, 0, &p + ), + "failed to create a memset (zero) task" + ); + + return cudaTask(node); +} + +// Function: fill +template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* +> +cudaTask cudaFlow::fill(T* dst, T value, size_t count) { + + auto node = _cfg.emplace_back( + _cfg, std::in_place_type_t{} + ); + + auto p = cuda_get_fill_parms(dst, value, count); + + TF_CHECK_CUDA( + cudaGraphAddMemsetNode( + &node->_native_handle, _cfg._native_handle, nullptr, 0, &p + ), + "failed to create a memset (fill) task" + ); + + return cudaTask(node); +} + +// Function: copy +template < + typename T, + std::enable_if_t, void>* +> +cudaTask cudaFlow::copy(T* tgt, const T* src, size_t num) { + + auto node = _cfg.emplace_back( + _cfg, std::in_place_type_t{} + ); + + auto p = cuda_get_copy_parms(tgt, src, num); + + TF_CHECK_CUDA( + cudaGraphAddMemcpyNode( + &node->_native_handle, _cfg._native_handle, nullptr, 0, &p + ), + "failed to create a memcpy (copy) task" + ); + + return cudaTask(node); +} + +// Function: memset +inline cudaTask cudaFlow::memset(void* dst, int ch, size_t count) { + + auto node = _cfg.emplace_back( + _cfg, std::in_place_type_t{} + ); + + auto p = cuda_get_memset_parms(dst, ch, count); + + TF_CHECK_CUDA( + cudaGraphAddMemsetNode( + &node->_native_handle, _cfg._native_handle, nullptr, 0, &p + ), + "failed to create a memset task" + ); + + return cudaTask(node); +} + +// Function: memcpy +inline cudaTask cudaFlow::memcpy(void* tgt, const void* src, size_t bytes) { + + auto node = _cfg.emplace_back( + _cfg, std::in_place_type_t{} + ); + + auto p = cuda_get_memcpy_parms(tgt, src, bytes); + + TF_CHECK_CUDA( + cudaGraphAddMemcpyNode( + &node->_native_handle, _cfg._native_handle, nullptr, 0, &p + ), + "failed to create a memcpy task" + ); + + return cudaTask(node); +} + +// ------------------------------------------------------------------------ +// update methods +// ------------------------------------------------------------------------ + +// Function: host +template +void cudaFlow::host(cudaTask task, C&& c) { + + if(task.type() != cudaTaskType::HOST) { + TF_THROW(task, " is not a host task"); + } + + auto h = std::get_if(&task._node->_handle); + + h->func = std::forward(c); +} + +// Function: update kernel parameters +template +void cudaFlow::kernel( + cudaTask task, dim3 g, dim3 b, size_t s, F f, ArgsT... args +) { + + if(task.type() != cudaTaskType::KERNEL) { + TF_THROW(task, " is not a kernel task"); + } + + cudaKernelNodeParams p; + + void* arguments[sizeof...(ArgsT)] = { (void*)(&args)... }; + p.func = (void*)f; + p.gridDim = g; + p.blockDim = b; + p.sharedMemBytes = s; + p.kernelParams = arguments; + p.extra = nullptr; + + TF_CHECK_CUDA( + cudaGraphExecKernelNodeSetParams(_exe, task._node->_native_handle, &p), + "failed to update kernel parameters on ", task + ); +} + +// Function: update copy parameters +template , void>*> +void cudaFlow::copy(cudaTask task, T* tgt, const T* src, size_t num) { + + if(task.type() != cudaTaskType::MEMCPY) { + TF_THROW(task, " is not a memcpy task"); + } + + auto p = cuda_get_copy_parms(tgt, src, num); + + TF_CHECK_CUDA( + cudaGraphExecMemcpyNodeSetParams(_exe, task._node->_native_handle, &p), + "failed to update memcpy parameters on ", task + ); +} + +// Function: update memcpy parameters +inline void cudaFlow::memcpy( + cudaTask task, void* tgt, const void* src, size_t bytes +) { + + if(task.type() != cudaTaskType::MEMCPY) { + TF_THROW(task, " is not a memcpy task"); + } + + auto p = cuda_get_memcpy_parms(tgt, src, bytes); + + TF_CHECK_CUDA( + cudaGraphExecMemcpyNodeSetParams(_exe, task._node->_native_handle, &p), + "failed to update memcpy parameters on ", task + ); +} + +// Procedure: memset +inline void cudaFlow::memset(cudaTask task, void* dst, int ch, size_t count) { + + if(task.type() != cudaTaskType::MEMSET) { + TF_THROW(task, " is not a memset task"); + } + + auto p = cuda_get_memset_parms(dst, ch, count); + + TF_CHECK_CUDA( + cudaGraphExecMemsetNodeSetParams(_exe, task._node->_native_handle, &p), + "failed to update memset parameters on ", task + ); +} + +// Procedure: fill +template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* +> +void cudaFlow::fill(cudaTask task, T* dst, T value, size_t count) { + + if(task.type() != cudaTaskType::MEMSET) { + TF_THROW(task, " is not a memset task"); + } + + auto p = cuda_get_fill_parms(dst, value, count); + + TF_CHECK_CUDA( + cudaGraphExecMemsetNodeSetParams(_exe, task._node->_native_handle, &p), + "failed to update memset parameters on ", task + ); +} + +// Procedure: zero +template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* +> +void cudaFlow::zero(cudaTask task, T* dst, size_t count) { + + if(task.type() != cudaTaskType::MEMSET) { + TF_THROW(task, " is not a memset task"); + } + + auto p = cuda_get_zero_parms(dst, count); + + TF_CHECK_CUDA( + cudaGraphExecMemsetNodeSetParams(_exe, task._node->_native_handle, &p), + "failed to update memset parameters on ", task + ); +} + +// Function: capture +template +void cudaFlow::capture(cudaTask task, C c) { + + if(task.type() != cudaTaskType::SUBFLOW) { + TF_THROW(task, " is not a subflow task"); + } + + // insert a subflow node + // construct a captured flow from the callable + auto node_handle = std::get_if(&task._node->_handle); + //node_handle->graph.clear(); + + cudaFlowCapturer capturer; + c(capturer); + + // obtain the optimized captured graph + capturer._cfg._native_handle.reset(capturer.capture()); + node_handle->cfg = std::move(capturer._cfg); + + TF_CHECK_CUDA( + cudaGraphExecChildGraphNodeSetParams( + _exe, + task._node->_native_handle, + node_handle->cfg._native_handle + ), + "failed to update a captured child graph" + ); +} + +// ---------------------------------------------------------------------------- +// captured flow +// ---------------------------------------------------------------------------- + +// Function: capture +template +cudaTask cudaFlow::capture(C&& c) { + + // insert a subflow node + auto node = _cfg.emplace_back( + _cfg, std::in_place_type_t{} + ); + + // construct a captured flow from the callable + auto node_handle = std::get_if(&node->_handle); + + // perform capturing + cudaFlowCapturer capturer; + c(capturer); + + // obtain the optimized captured graph + capturer._cfg._native_handle.reset(capturer.capture()); + + // move capturer's cudaFlow graph into node + node_handle->cfg = std::move(capturer._cfg); + + TF_CHECK_CUDA( + cudaGraphAddChildGraphNode( + &node->_native_handle, + _cfg._native_handle, + nullptr, + 0, + node_handle->cfg._native_handle + ), + "failed to add a cudaFlow capturer task" + ); + + return cudaTask(node); +} + +// ---------------------------------------------------------------------------- +// run method +// ---------------------------------------------------------------------------- + +// Procedure: run +inline void cudaFlow::run(cudaStream_t stream) { + if(!_exe) { + _exe.instantiate(_cfg._native_handle); + } + _exe.launch(stream); + _cfg._state = cudaFlowGraph::OFFLOADED; +} + +// Function: native_cfg +inline cudaGraph_t cudaFlow::native_graph() { + return _cfg._native_handle; +} + +// Function: native_executable +inline cudaGraphExec_t cudaFlow::native_executable() { + return _exe; +} + +} // end of namespace tf ----------------------------------------------------- + + diff --git a/bundled/taskflow-3.6.0/include/dsl/connection.hpp b/bundled/taskflow-3.6.0/include/dsl/connection.hpp new file mode 100644 index 0000000000..e4dad72cc3 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/dsl/connection.hpp @@ -0,0 +1,53 @@ +// 2020/08/28 - Created by netcan: https://github.com/netcan +#pragma once +#include "../core/flow_builder.hpp" +#include "task_trait.hpp" +#include "tuple_utils.hpp" +#include "type_list.hpp" + +namespace tf { +namespace dsl { +template class Connection { + using FROMs = typename TaskTrait::TaskList; + using TOs = typename TaskTrait::TaskList; + +public: + using FromTaskList = Unique_t>; + using ToTaskList = Unique_t>; +}; + +template > struct Chain; + +template struct Chainvoid, OUT> { + using From = F; + using type = OUT; +}; + +template +struct ChainT, OUT> { +private: + using To = typename Chain::From; + +public: + using From = F; + using type = typename Chain< + T, typename OUT::template appendTo>>::type; +}; + +template struct OneToOneLink { + template struct InstanceType { + constexpr void build(TasksCB &tasksCb) { + constexpr size_t TasksCBSize = std::tuple_size::value; + constexpr size_t FromTaskIndex = + TupleElementByF_v::template apply>; + constexpr size_t ToTaskIndex = + TupleElementByF_v::template apply>; + static_assert(FromTaskIndex < TasksCBSize && ToTaskIndex < TasksCBSize, + "fatal: not find TaskCb in TasksCB"); + std::get(tasksCb).task_.precede( + std::get(tasksCb).task_); + } + }; +}; +} // namespace dsl +}; // namespace tf diff --git a/bundled/taskflow-3.6.0/include/dsl/dsl.hpp b/bundled/taskflow-3.6.0/include/dsl/dsl.hpp new file mode 100644 index 0000000000..e4130e87ef --- /dev/null +++ b/bundled/taskflow-3.6.0/include/dsl/dsl.hpp @@ -0,0 +1,13 @@ +// TaskflowDSL is an experimental project that leverages C++17 to +// provide a dedicated interface for expressive taskflow programming +// +// Created by netcan: https://github.com/netcan + +#pragma once + +#include "dsl/task_dsl.hpp" + +namespace tf { + + +} // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-3.6.0/include/dsl/meta_macro.hpp b/bundled/taskflow-3.6.0/include/dsl/meta_macro.hpp new file mode 100644 index 0000000000..758bf689ce --- /dev/null +++ b/bundled/taskflow-3.6.0/include/dsl/meta_macro.hpp @@ -0,0 +1,72 @@ +// 2020/08/30 - Created by netcan: https://github.com/netcan +// ref https://github.com/Erlkoenig90/map-macro/ +#pragma once +#ifdef _MSC_VER +#define TF_EMPTY() +#define TF_GET_ARG_COUNT_(...) \ + TF_PASTE(TF_GET_ARG_COUNT_I(__VA_ARGS__, 64, 63, 62, 61, 60, 59, 58, 57, 56, \ + 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, \ + 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, \ + 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, \ + 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, \ + 6, 5, 4, 3, 2, 1, 0, ), \ + TF_EMPTY()) + +#else +#define TF_GET_ARG_COUNT_(...) \ + TF_GET_ARG_COUNT_I(__VA_ARGS__, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, \ + 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, \ + 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, \ + 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, \ + 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, ) +#endif + +#define TF_GET_ARG_COUNT(...) TF_GET_ARG_COUNT_(__dummy__, ##__VA_ARGS__) +#define TF_GET_ARG_COUNT_I( \ + e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, \ + e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, \ + e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, \ + e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, \ + e62, e63, e64, size, ...) \ + size + +#define TF_GET_FIRST(a, ...) a +#define TF_GET_SECOND(a, b, ...) b +#define TF_CONCATE(x, y) x##y +#define TF_PASTE(x, y) TF_CONCATE(x, y) + +#define TF_EVAL0(...) __VA_ARGS__ +#define TF_EVAL1(...) TF_EVAL0(TF_EVAL0(TF_EVAL0(__VA_ARGS__))) +#define TF_EVAL2(...) TF_EVAL1(TF_EVAL1(TF_EVAL1(__VA_ARGS__))) +#define TF_EVAL3(...) TF_EVAL2(TF_EVAL2(TF_EVAL2(__VA_ARGS__))) +#define TF_EVAL4(...) TF_EVAL3(TF_EVAL3(TF_EVAL3(__VA_ARGS__))) +#define TF_EVAL5(...) TF_EVAL4(TF_EVAL4(TF_EVAL4(__VA_ARGS__))) + +#ifdef _MSC_VER +// MSVC needs more evaluations +#define TF_EVAL6(...) TF_EVAL5(TF_EVAL5(TF_EVAL5(__VA_ARGS__))) +#define TF_EVAL(...) TF_EVAL6(TF_EVAL6(__VA_ARGS__)) +#else +#define TF_EVAL(...) TF_EVAL5(__VA_ARGS__) +#endif + +#define TF_MAP_END(...) +#define TF_MAP_OUT + +#define EMPTY() +#define DEFER(id) id EMPTY() + +#define TF_MAP_GET_END2() 0, TF_MAP_END +#define TF_MAP_GET_END1(...) TF_MAP_GET_END2 +#define TF_MAP_GET_END(...) TF_MAP_GET_END1 +#define TF_MAP_NEXT0(test, next, ...) next TF_MAP_OUT +#define TF_MAP_NEXT1(test, next) DEFER(TF_MAP_NEXT0)(test, next, 0) +#define TF_MAP_NEXT(test, next) TF_MAP_NEXT1(TF_MAP_GET_END test, next) + +#define TF_MAP0(f, x, peek, ...) \ + f(x) DEFER(TF_MAP_NEXT(peek, TF_MAP1))(f, peek, __VA_ARGS__) +#define TF_MAP1(f, x, peek, ...) \ + f(x) DEFER(TF_MAP_NEXT(peek, TF_MAP0))(f, peek, __VA_ARGS__) + +#define TF_MAP(f, ...) \ + TF_EVAL(TF_MAP1(f, __VA_ARGS__, ()()(), ()()(), ()()(), 0)) diff --git a/bundled/taskflow-3.6.0/include/dsl/task_analyzer.hpp b/bundled/taskflow-3.6.0/include/dsl/task_analyzer.hpp new file mode 100644 index 0000000000..295c50bca7 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/dsl/task_analyzer.hpp @@ -0,0 +1,40 @@ +// 2020/08/28 - Created by netcan: https://github.com/netcan +#pragma once +#include "connection.hpp" +#include "type_list.hpp" +#include + +namespace tf { +namespace dsl { +template class TaskAnalyzer { + template + struct BuildOneToOneLink; + + template + struct BuildOneToOneLink, Ts> { + using type = Concat_t::type...>; + }; + + template + struct BuildOneToOneLink, + std::enable_if_t>> { + using type = TypeList...>; + }; + + template class OneToOneLinkSetF { + using FromTaskList = typename Link::FromTaskList; + using ToTaskList = typename Link::ToTaskList; + + public: + using type = typename BuildOneToOneLink::type; + }; + +public: + using AllTasks = Unique_t< + Concat_t>; + using OneToOneLinkSet = + Unique_t, OneToOneLinkSetF>>>; +}; + +} // namespace dsl +} // namespace tf diff --git a/bundled/taskflow-3.6.0/include/dsl/task_dsl.hpp b/bundled/taskflow-3.6.0/include/dsl/task_dsl.hpp new file mode 100644 index 0000000000..9b362cfb65 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/dsl/task_dsl.hpp @@ -0,0 +1,104 @@ +// 2020/08/28 - Created by netcan: https://github.com/netcan +#pragma once +#include "../core/flow_builder.hpp" +#include "meta_macro.hpp" +#include "task_analyzer.hpp" +#include "task_trait.hpp" + +namespace tf { +namespace dsl { +struct EmptyContext {}; +template class TaskDsl { + using Links = Unique_t::type...>>>; + using Analyzer = typename Links::template exportTo; + + using AllTasks = typename Analyzer::AllTasks; + + template struct TaskCbWithContext { + using type = TaskCb; + }; + using TasksCB = + typename Map_t::template exportTo; + + using OneToOneLinkSet = typename Analyzer::OneToOneLinkSet; + template struct OneToOneLinkInstanceType { + using type = typename OneToOneLink::template InstanceType; + }; + using OneToOneLinkInstances = + typename Map_t::template exportTo; + +public: + constexpr TaskDsl(FlowBuilder &flow_builder, const CONTEXT &context = {}) { + build_tasks_cb(flow_builder, context, + std::make_index_sequence{}); + build_links(std::make_index_sequence{}); + } + + template Task &get_task() { + constexpr size_t TasksCBSize = std::tuple_size::value; + constexpr size_t TaskIndex = + TupleElementByF_v::template apply>; + static_assert(TaskIndex < TasksCBSize, "fatal: not find TaskCb in TasksCB"); + return std::get(tasksCb_).task_; + } + +private: + template + void build_tasks_cb(FlowBuilder &flow_builder, const CONTEXT &context, + std::index_sequence) { + auto _ = {0, (std::get(tasksCb_).build(flow_builder, context), 0)...}; + (void)_; + } + + template void build_links(std::index_sequence) { + auto _ = {0, (std::get(links_).build(tasksCb_), 0)...}; + (void)_; + } + +private: + TasksCB tasksCb_; + OneToOneLinkInstances links_; +}; + +template +constexpr TaskDsl taskDsl(FlowBuilder &flow_builder, + CONTEXT &&context = {}) { + return {flow_builder, context}; +} + +} // namespace dsl +} // namespace tf + +/////////////////////////////////////////////////////////////////////////////// +#define TF_CHAIN(link) , link->void +#define TF_CONTEXT_1(name) tf::dsl::EmptyContext +#define TF_CONTEXT_2(name, context) context +#define TF_CAPTURE_THIS_1 +#define TF_CAPTURE_THIS_2 *this + +/////////////////////////////////////////////////////////////////////////////// +// make_task(TASK_NAME, { return a action lambda }) +#define make_task(name, ...) \ + struct TF_GET_FIRST name : tf::dsl::TaskSignature, \ + TF_PASTE(TF_CONTEXT_, TF_GET_ARG_COUNT name) \ + name { \ + using _ContextType = TF_PASTE(TF_CONTEXT_, TF_GET_ARG_COUNT name) name; \ + TF_GET_FIRST name(const _ContextType &context) : _ContextType(context) {} \ + auto operator()() { \ + return [TF_PASTE(TF_CAPTURE_THIS_, TF_GET_ARG_COUNT name)] __VA_ARGS__; \ + } \ + } + +// some_tasks(A, B, C) means SomeTask +#define some_tasks(...) auto (*)(tf::dsl::SomeTask<__VA_ARGS__>) +// same as some_tasks +#define fork_tasks(...) some_tasks(__VA_ARGS__) +// same as some_tasks +#define merge_tasks(...) some_tasks(__VA_ARGS__) +// task(A) means a task A +#define task(Task) auto (*)(Task) +// taskbuild(...) build a task dsl graph +#define build_taskflow(...) tf::dsl::taskDsl + diff --git a/bundled/taskflow-3.6.0/include/dsl/task_trait.hpp b/bundled/taskflow-3.6.0/include/dsl/task_trait.hpp new file mode 100644 index 0000000000..bc8eeb6059 --- /dev/null +++ b/bundled/taskflow-3.6.0/include/dsl/task_trait.hpp @@ -0,0 +1,46 @@ +// 2020/08/28 - Created by netcan: https://github.com/netcan +#pragma once +#include "../core/flow_builder.hpp" +#include "../core/task.hpp" +#include "type_list.hpp" +#include + +namespace tf { +namespace dsl { +struct TaskSignature {}; + +template struct TaskCb { + using TaskType = TASK; + void build(FlowBuilder &build, const CONTEXT &context) { + task_ = build.emplace(TaskType{context}()); + } + + Task task_; +}; + +template struct IsTask { + template struct apply { + constexpr static bool value = + std::is_same::value; + }; +}; + +template struct TaskTrait; + +template struct SomeTask { + using TaskList = + Unique_t::TaskList...>>>; +}; + +// a task self +template +struct TaskTrait< + TASK, std::enable_if_t::value>> { + using TaskList = TypeList; +}; + +template struct TaskTrait> { + using TaskList = typename SomeTask::TaskList; +}; +} // namespace dsl +} // namespace tf diff --git a/bundled/taskflow-3.6.0/include/dsl/tuple_utils.hpp b/bundled/taskflow-3.6.0/include/dsl/tuple_utils.hpp new file mode 100644 index 0000000000..633ba0e41f --- /dev/null +++ b/bundled/taskflow-3.6.0/include/dsl/tuple_utils.hpp @@ -0,0 +1,43 @@ +// 2020/08/28 - Created by netcan: https://github.com/netcan +#pragma once +#include +#include + +namespace tf { +namespace dsl { +namespace detail { +// get tuple element index by f, if not exists then index >= tuple_size +template class F, typename = void> +struct TupleElementByF { + constexpr static size_t Index = 0; +}; + +template