From: Wolfgang Bangerth Date: Fri, 2 May 2025 23:16:25 +0000 (-0600) Subject: Update TaskFlow to 3.10. X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=07c09e2fca5710505eae0d49a93367d924469c4f;p=dealii.git Update TaskFlow to 3.10. --- diff --git a/bundled/taskflow-3.10.0/README.md b/bundled/taskflow-3.10.0/README.md index addfb3e923..c78a74b30f 100644 --- a/bundled/taskflow-3.10.0/README.md +++ b/bundled/taskflow-3.10.0/README.md @@ -69,7 +69,7 @@ in parallel computing. Check out [Who is Using Taskflow](https://taskflow.github See a quick [presentation](https://taskflow.github.io/) and visit the [documentation][documentation] to learn more about Taskflow. -Technical details can be referred to our [IEEE TPDS paper][TPDS21]. +Technical details can be referred to our [IEEE TPDS paper][TPDS22]. # Start Your First Taskflow Program @@ -212,7 +212,7 @@ cond.precede(cond, stop); ## Offload a Task to a GPU -Taskflow supports GPU tasking for you to accelerate a wide range of scientific computing applications by harnessing the power of CPU-GPU collaborative computing using CUDA. +Taskflow supports GPU tasking for you to accelerate a wide range of scientific computing applications by harnessing the power of CPU-GPU collaborative computing using Nvidia CUDA Graph. ```cpp __global__ void saxpy(size_t N, float alpha, float* dx, float* dy) { @@ -221,22 +221,23 @@ __global__ void saxpy(size_t N, float alpha, float* dx, float* dy) { y[i] = a*x[i] + y[i]; } } -tf::Task cudaflow = taskflow.emplace([&](tf::cudaFlow& cf) { - - // data copy tasks - tf::cudaTask h2d_x = cf.copy(dx, hx.data(), N).name("h2d_x"); - tf::cudaTask h2d_y = cf.copy(dy, hy.data(), N).name("h2d_y"); - tf::cudaTask d2h_x = cf.copy(hx.data(), dx, N).name("d2h_x"); - tf::cudaTask d2h_y = cf.copy(hy.data(), dy, N).name("d2h_y"); - // kernel task with parameters to launch the saxpy kernel - tf::cudaTask saxpy = cf.kernel( - (N+255)/256, 256, 0, saxpy, N, 2.0f, dx, dy - ).name("saxpy"); - +// create a CUDA Graph task +tf::Task cudaflow = taskflow.emplace([&]() { + tf::cudaGraph cg; + tf::cudaTask h2d_x = cg.copy(dx, hx.data(), N); + tf::cudaTask h2d_y = cg.copy(dy, hy.data(), N); + tf::cudaTask d2h_x = cg.copy(hx.data(), dx, N); + tf::cudaTask d2h_y = cg.copy(hy.data(), dy, N); + tf::cudaTask saxpy = cg.kernel((N+255)/256, 256, 0, saxpy, N, 2.0f, dx, dy); saxpy.succeed(h2d_x, h2d_y) .precede(d2h_x, d2h_y); -}).name("cudaFlow"); + + // instantiate an executable CUDA graph and run it through a stream + tf::cudaGraphExec exec(cg); + tf::cudaStream stream; + stream.run(exec).synchronize(); +}).name("CUDA Graph Task"); ```

@@ -328,7 +329,6 @@ patterns using standard C++ syntaxes, such as parallel iterations, parallel reductions, and parallel sort. ```cpp -// standard parallel CPU algorithms tf::Task task1 = taskflow.for_each( // assign each element to 100 in parallel first, last, [] (auto& i) { i = 100; } ); @@ -338,17 +338,6 @@ tf::Task task2 = taskflow.reduce( // reduce a range of items in parallel tf::Task task3 = taskflow.sort( // sort a range of items in parallel first, last, [] (auto a, auto b) { return a < b; } ); - -// standard parallel GPU algorithms -tf::cudaTask cuda1 = cudaflow.for_each( // assign each element to 100 on GPU - dfirst, dlast, [] __device__ (auto i) { i = 100; } -); -tf::cudaTask cuda2 = cudaflow.reduce( // reduce a range of items on GPU - dfirst, dlast, init, [] __device__ (auto a, auto b) { return a + b; } -); -tf::cudaTask cuda3 = cudaflow.sort( // sort a range of items on GPU - dfirst, dlast, [] __device__ (auto a, auto b) { return a < b; } -); ``` Additionally, Taskflow provides composable graph building blocks for you to @@ -419,7 +408,7 @@ the following organizations for sponsoring the Taskflow project! | | | | | |:-------------------------:|:-------------------------:|:-------------------------:|:-------------------------:| || | | | -| | | | | +| | | | | # License diff --git a/bundled/taskflow-3.10.0/taskflow/algorithm/find.hpp b/bundled/taskflow-3.10.0/taskflow/algorithm/find.hpp index 60c79f8321..1f07bb4daf 100644 --- a/bundled/taskflow-3.10.0/taskflow/algorithm/find.hpp +++ b/bundled/taskflow-3.10.0/taskflow/algorithm/find.hpp @@ -4,61 +4,6 @@ namespace tf { -namespace detail { - -// Function: find_if_loop -template -bool find_if_loop( - std::atomic& offset, - Iterator& beg, - size_t& prev_e, - size_t curr_b, - size_t curr_e, - Predicate predicate -) { - // early prune - if(offset.load(std::memory_order_relaxed) < curr_b) { - return true; - } - std::advance(beg, curr_b - prev_e); - for(size_t x = curr_b; x -bool find_if_not_loop( - std::atomic& offset, - Iterator& beg, - size_t& prev_e, - size_t curr_b, - size_t curr_e, - Predicate predicate -) { - - // early prune - if(offset.load(std::memory_order_relaxed) < curr_b) { - return true; - } - std::advance(beg, curr_b - prev_e); - for(size_t x = curr_b; x auto make_find_if_task(B first, E last, T& result, UOP predicate, P part = P()) { @@ -79,7 +24,7 @@ auto make_find_if_task(B first, E last, T& result, UOP predicate, P part = P()) // only myself - no need to spawn another graph if(W <= 1 || N <= part.chunk_size()) { - part([&](){ result = std::find_if(beg, end, predicate); })(); + part([=, &result]() mutable { result = std::find_if(beg, end, predicate); })(); return; } @@ -89,31 +34,32 @@ auto make_find_if_task(B first, E last, T& result, UOP predicate, P part = P()) if(N < W) { W = N; } - - // we leverage smart pointer to let the last task update the result - std::shared_ptr> offset( - new std::atomic(N), - [=, &result](std::atomic* p) { - result = std::next(beg, p->load(std::memory_order_relaxed)); - delete p; - } - ); + + auto mutex = std::make_shared(); + const auto origin = beg; + result = std::next(origin, N); // static partitioner if constexpr(part.type() == PartitionerType::STATIC) { for(size_t w=0, curr_b=0; w lock(*mutex); + if(size_t offset = std::distance(origin, result); x < offset) { + result = std::next(origin, x); + } + return true; + } + } + prev_e = part_e; + return false; } ); - // must release the ownership before async is destroyed - // as the node deletion comes after the join counter reaches zero - offset.reset(); }); (++w == W || (curr_b += chunk_size) >= N) ? task() : rt.silent_async(task); } @@ -122,17 +68,23 @@ auto make_find_if_task(B first, E last, T& result, UOP predicate, P part = P()) else { auto next = std::make_shared>(0); for(size_t w=0; w lock(*mutex); + if(size_t offset = std::distance(origin, result); x < offset) { + result = std::next(origin, x); + } + return true; + } + } + prev_e = part_e; + return false; } ); - // must release the ownership before async is destroyed - // as the node deletion comes after the join counter reaches zero - offset.reset(); }); (++w == W) ? task() : rt.silent_async(task); } @@ -160,7 +112,7 @@ auto make_find_if_not_task(B first, E last, T& result, UOP predicate, P part = P // only myself - no need to spawn another graph if(W <= 1 || N <= part.chunk_size()) { - part([&](){ result = std::find_if_not(beg, end, predicate); })(); + part([=, &result] () mutable { result = std::find_if_not(beg, end, predicate); })(); return; } @@ -170,30 +122,31 @@ auto make_find_if_not_task(B first, E last, T& result, UOP predicate, P part = P W = N; } - // we leverage smart pointer to let the last task update the result - std::shared_ptr> offset( - new std::atomic(N), - [=, &result](std::atomic* p) { - result = std::next(beg, p->load(std::memory_order_relaxed)); - delete p; - } - ); - + auto mutex = std::make_shared(); + const auto origin = beg; + result = std::next(origin, N); + // static partitioner if constexpr(part.type() == PartitionerType::STATIC) { for(size_t w=0, curr_b=0; w lock(*mutex); + if(size_t offset = std::distance(origin, result); x < offset) { + result = std::next(origin, x); + } + return true; + } + } + prev_e = part_e; + return false; } ); - // must release the ownership before async is destroyed - // as the node deletion comes after the join counter reaches zero - offset.reset(); }); (++w == W || (curr_b += chunk_size) >= N) ? task() : rt.silent_async(task); } @@ -202,17 +155,23 @@ auto make_find_if_not_task(B first, E last, T& result, UOP predicate, P part = P else { auto next = std::make_shared>(0); for(size_t w=0; w lock(*mutex); + if(size_t offset = std::distance(origin, result); x < offset) { + result = std::next(origin, x); + } + return true; + } + } + prev_e = part_e; + return false; } ); - // must release the ownership before async is destroyed - // as the node deletion comes after the join counter reaches zero - offset.reset(); }); (++w == W) ? task() : rt.silent_async(task); } @@ -240,7 +199,7 @@ auto make_min_element_task(B first, E last, T& result, C comp, P part = P()) { // only myself - no need to spawn another graph if(W <= 1 || N <= part.chunk_size()) { - part([&](){ result = std::min_element(beg, end, comp); })(); + part([=, &result] () mutable { result = std::min_element(beg, end, comp); })(); return; } @@ -384,7 +343,7 @@ auto make_max_element_task(B first, E last, T& result, C comp, P part = P()) { // only myself - no need to spawn another graph if(W <= 1 || N <= part.chunk_size()) { - part([&](){ result = std::max_element(beg, end, comp); })(); + part([=, &result] () mutable { result = std::max_element(beg, end, comp); })(); return; } diff --git a/bundled/taskflow-3.10.0/taskflow/algorithm/for_each.hpp b/bundled/taskflow-3.10.0/taskflow/algorithm/for_each.hpp index 376ff6f2e5..03f70579d5 100644 --- a/bundled/taskflow-3.10.0/taskflow/algorithm/for_each.hpp +++ b/bundled/taskflow-3.10.0/taskflow/algorithm/for_each.hpp @@ -8,8 +8,6 @@ namespace tf { template auto make_for_each_task(B b, E e, C c, P part = P()) { - using namespace std::string_literals; - using B_t = std::decay_t>; using E_t = std::decay_t>; @@ -24,7 +22,7 @@ auto make_for_each_task(B b, E e, C c, P part = P()) { // the workload is sequentially doable if(W <= 1 || N <= part.chunk_size()) { - part([=](){ std::for_each(beg, end, c); })(); + part([=]() mutable { std::for_each(beg, end, c); })(); return; } @@ -79,8 +77,6 @@ auto make_for_each_task(B b, E e, C c, P part = P()) { template auto make_for_each_index_task(B b, E e, S s, C c, P part = P()){ - using namespace std::string_literals; - using B_t = std::decay_t>; using E_t = std::decay_t>; using S_t = std::decay_t>; @@ -102,7 +98,7 @@ auto make_for_each_index_task(B b, E e, S s, C c, P part = P()){ // only myself - no need to spawn another graph if(W <= 1 || N <= part.chunk_size()) { - part([&](){ + part([=]() mutable { for(size_t x=0; x>(0); for(size_t w=0; w(part_b) * inc + beg; for(size_t x=part_b; x -auto make_for_each_index_task(R range, C c, P part = P()){ +auto make_for_each_by_index_task(R range, C c, P part = P()){ using range_type = std::decay_t>; @@ -170,7 +166,7 @@ auto make_for_each_index_task(R range, C c, P part = P()){ // only myself - no need to spawn another graph if(W <= 1 || N <= part.chunk_size()) { - part([&](){ c(r); })(); + part([=]() mutable { c(r); })(); return; } @@ -231,11 +227,11 @@ Task FlowBuilder::for_each_index(B beg, E end, S inc, C c, P part){ ); } -// Function: for_each_index +// Function: for_each_by_index template -Task FlowBuilder::for_each_index(R range, C c, P part){ +Task FlowBuilder::for_each_by_index(R range, C c, P part){ return emplace( - make_for_each_index_task(range, c, part) + make_for_each_by_index_task(range, c, part) ); } diff --git a/bundled/taskflow-3.10.0/taskflow/algorithm/module.hpp b/bundled/taskflow-3.10.0/taskflow/algorithm/module.hpp index 566ac1918e..03ec3bd78d 100644 --- a/bundled/taskflow-3.10.0/taskflow/algorithm/module.hpp +++ b/bundled/taskflow-3.10.0/taskflow/algorithm/module.hpp @@ -18,7 +18,7 @@ auto Algorithm::make_module_task(T&& target) { } PreemptionGuard preemption_guard(rt); rt._executor._schedule_graph_with_parent( - rt._worker, graph.begin(), graph.end(), rt._parent, NSTATE::NONE + rt._worker, graph.begin(), graph.end(), rt._parent ); }; } diff --git a/bundled/taskflow-3.10.0/taskflow/algorithm/partitioner.hpp b/bundled/taskflow-3.10.0/taskflow/algorithm/partitioner.hpp index b09952778e..8c8df95569 100644 --- a/bundled/taskflow-3.10.0/taskflow/algorithm/partitioner.hpp +++ b/bundled/taskflow-3.10.0/taskflow/algorithm/partitioner.hpp @@ -314,7 +314,7 @@ class GuidedPartitioner : public PartitionerBase { if(curr_b >= N) { return; } - func(curr_b, std::min(curr_b + chunk_size, N)); + func(curr_b, (std::min)(curr_b + chunk_size, N)); } break; } @@ -325,7 +325,7 @@ class GuidedPartitioner : public PartitionerBase { q = chunk_size; } //size_t curr_e = (q <= r) ? curr_b + q : N; - size_t curr_e = std::min(curr_b + q, N); + size_t curr_e = (std::min)(curr_b + q, N); if(next.compare_exchange_strong(curr_b, curr_e, std::memory_order_relaxed, std::memory_order_relaxed)) { func(curr_b, curr_e); @@ -362,7 +362,7 @@ class GuidedPartitioner : public PartitionerBase { if(curr_b >= N) { return; } - if(func(curr_b, std::min(curr_b + chunk_size, N))) { + if(func(curr_b, (std::min)(curr_b + chunk_size, N))) { return; } } @@ -375,7 +375,7 @@ class GuidedPartitioner : public PartitionerBase { q = chunk_size; } //size_t curr_e = (q <= r) ? curr_b + q : N; - size_t curr_e = std::min(curr_b + q, N); + size_t curr_e = (std::min)(curr_b + q, N); if(next.compare_exchange_strong(curr_b, curr_e, std::memory_order_relaxed, std::memory_order_relaxed)) { if(func(curr_b, curr_e)) { @@ -477,7 +477,7 @@ class DynamicPartitioner : public PartitionerBase { size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); while(curr_b < N) { - func(curr_b, std::min(curr_b + chunk_size, N)); + func(curr_b, (std::min)(curr_b + chunk_size, N)); curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); } } @@ -496,7 +496,7 @@ class DynamicPartitioner : public PartitionerBase { size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); while(curr_b < N) { - if(func(curr_b, std::min(curr_b + chunk_size, N))) { + if(func(curr_b, (std::min)(curr_b + chunk_size, N))) { return; } curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); @@ -609,7 +609,7 @@ class StaticPartitioner : public PartitionerBase { ) { size_t stride = W * chunk_size; while(curr_b < N) { - size_t curr_e = std::min(curr_b + chunk_size, N); + size_t curr_e = (std::min)(curr_b + chunk_size, N); func(curr_b, curr_e); curr_b += stride; } @@ -626,7 +626,7 @@ class StaticPartitioner : public PartitionerBase { ) { size_t stride = W * chunk_size; while(curr_b < N) { - size_t curr_e = std::min(curr_b + chunk_size, N); + size_t curr_e = (std::min)(curr_b + chunk_size, N); if(func(curr_b, curr_e)) { return; } @@ -743,8 +743,8 @@ class RandomPartitioner : public PartitionerBase { std::swap(b1, b2); } - b1 = std::max(b1, size_t{1}); - b2 = std::max(b2, b1 + 1); + b1 = (std::max)(b1, size_t{1}); + b2 = (std::max)(b2, b1 + 1); return {b1, b2}; } @@ -772,7 +772,7 @@ class RandomPartitioner : public PartitionerBase { size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); while(curr_b < N) { - func(curr_b, std::min(curr_b + chunk_size, N)); + func(curr_b, (std::min)(curr_b + chunk_size, N)); chunk_size = dist(engine); curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); } @@ -797,7 +797,7 @@ class RandomPartitioner : public PartitionerBase { size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed); while(curr_b < N) { - if(func(curr_b, std::min(curr_b + chunk_size, N))){ + if(func(curr_b, (std::min)(curr_b + chunk_size, N))){ return; } chunk_size = dist(engine); @@ -808,7 +808,7 @@ class RandomPartitioner : public PartitionerBase { private: float _alpha {0.01f}; - float _beta {0.5f}; + float _beta {0.50f}; }; /** diff --git a/bundled/taskflow-3.10.0/taskflow/algorithm/reduce.hpp b/bundled/taskflow-3.10.0/taskflow/algorithm/reduce.hpp index e5396175d7..2eab24a667 100644 --- a/bundled/taskflow-3.10.0/taskflow/algorithm/reduce.hpp +++ b/bundled/taskflow-3.10.0/taskflow/algorithm/reduce.hpp @@ -13,7 +13,7 @@ auto make_reduce_task(B b, E e, T& init, O bop, P part = P()) { using B_t = std::decay_t>; using E_t = std::decay_t>; - return [=, &r=init] (Runtime& rt) mutable { + return [=, &init] (Runtime& rt) mutable { // fetch the iterator values B_t beg = b; @@ -24,7 +24,7 @@ auto make_reduce_task(B b, E e, T& init, O bop, P part = P()) { // only myself - no need to spawn another graph if(W <= 1 || N <= part.chunk_size()) { - part([&](){ for(; beg!=end; r = bop(r, *beg++)); })(); + part([=, &init] () mutable { for(; beg!=end; init = bop(init, *beg++)); })(); return; } @@ -45,13 +45,13 @@ auto make_reduce_task(B b, E e, T& init, O bop, P part = P()) { // variable sum need to avoid copy at the first step auto chunk_size = std::max(size_t{2}, part.adjusted_chunk_size(N, W, w)); - auto task = part([=, &r] () mutable { + auto task = part([=, &init] () mutable { std::advance(beg, curr_b); if(N - curr_b == 1) { std::lock_guard lock(*mutex); - r = bop(r, *beg); + init = bop(init, *beg); return; } @@ -79,7 +79,7 @@ auto make_reduce_task(B b, E e, T& init, O bop, P part = P()) { // final reduce std::lock_guard lock(*mutex); - r = bop(r, sum); + init = bop(init, sum); }); (++w == W || (curr_b += chunk_size) >= N) ? task() : rt.silent_async(task); @@ -91,7 +91,7 @@ auto make_reduce_task(B b, E e, T& init, O bop, P part = P()) { for(size_t w=0; wfetch_add(2, std::memory_order_relaxed); @@ -103,7 +103,7 @@ auto make_reduce_task(B b, E e, T& init, O bop, P part = P()) { if(N - s0 == 1) { std::lock_guard lock(*mutex); - r = bop(r, *beg); + init = bop(init, *beg); return; } @@ -125,7 +125,7 @@ auto make_reduce_task(B b, E e, T& init, O bop, P part = P()) { // final reduce std::lock_guard lock(*mutex); - r = bop(r, sum); + init = bop(init, sum); }); (++w == W) ? task() : rt.silent_async(task); } @@ -144,7 +144,7 @@ auto make_transform_reduce_task(B b, E e, T& init, BOP bop, UOP uop, P part = P( using B_t = std::decay_t>; using E_t = std::decay_t>; - return [=, &r=init] (Runtime& rt) mutable { + return [=, &init] (Runtime& rt) mutable { // fetch the iterator values B_t beg = b; @@ -155,7 +155,7 @@ auto make_transform_reduce_task(B b, E e, T& init, BOP bop, UOP uop, P part = P( // only myself - no need to spawn another graph if(W <= 1 || N <= part.chunk_size()) { - part([&](){ for(; beg!=end; r = bop(std::move(r), uop(*beg++))); })(); + part([=, &init] () mutable { for(; beg!=end; init = bop(std::move(init), uop(*beg++))); })(); return; } @@ -174,13 +174,13 @@ auto make_transform_reduce_task(B b, E e, T& init, BOP bop, UOP uop, P part = P( auto chunk_size = part.adjusted_chunk_size(N, W, w); - auto task = part([=, &r] () mutable { + auto task = part([=, &init] () mutable { std::advance(beg, curr_b); if(N - curr_b == 1) { std::lock_guard lock(*mutex); - r = bop(std::move(r), uop(*beg)); + init = bop(std::move(init), uop(*beg)); return; } @@ -209,7 +209,7 @@ auto make_transform_reduce_task(B b, E e, T& init, BOP bop, UOP uop, P part = P( // final reduce std::lock_guard lock(*mutex); - r = bop(std::move(r), std::move(sum)); + init = bop(std::move(init), std::move(sum)); }); (++w == W || (curr_b += chunk_size) >= N) ? task() : rt.silent_async(task); @@ -219,7 +219,7 @@ auto make_transform_reduce_task(B b, E e, T& init, BOP bop, UOP uop, P part = P( else { auto next = std::make_shared>(0); for(size_t w=0; wfetch_add(2, std::memory_order_relaxed); @@ -232,7 +232,7 @@ auto make_transform_reduce_task(B b, E e, T& init, BOP bop, UOP uop, P part = P( if(N - s0 == 1) { std::lock_guard lock(*mutex); - r = bop(std::move(r), uop(*beg)); + init = bop(std::move(init), uop(*beg)); return; } @@ -254,7 +254,7 @@ auto make_transform_reduce_task(B b, E e, T& init, BOP bop, UOP uop, P part = P( // final reduce std::lock_guard lock(*mutex); - r = bop(std::move(r), std::move(sum)); + init = bop(std::move(init), std::move(sum)); }); (++w == W) ? task() : rt.silent_async(task); } @@ -290,7 +290,7 @@ auto make_transform_reduce_task( // only myself - no need to spawn another graph if(W <= 1 || N <= part.chunk_size()) { - part([&](){ for(; beg1!=end1; r = bop_r(std::move(r), bop_t(*beg1++, *beg2++))); })(); + part([=, &r] () mutable { for(; beg1!=end1; r = bop_r(std::move(r), bop_t(*beg1++, *beg2++))); })(); return; } @@ -424,7 +424,7 @@ auto make_reduce_by_index_task(R range, T& init, L lop, G gop, P part = P()) { // only myself - no need to spawn another graph if(W <= 1 || N <= part.chunk_size()) { - part([&](){ init = lop(r, std::move(init)); })(); + part([=, &init] () mutable { init = lop(r, std::move(init)); })(); return; } @@ -452,7 +452,7 @@ auto make_reduce_by_index_task(R range, T& init, L lop, G gop, P part = P()) { // loop reduce part.loop(N, W, curr_b, chunk_size, [=, &tmp](size_t part_b, size_t part_e) mutable { - tmp = lop(r.discrete_domain(part_b, part_e), std::move(tmp)); + tmp = lop(r.discrete_domain(part_b, part_e), std::move(tmp)); }); // final reduce - tmp is guaranteed to have value diff --git a/bundled/taskflow-3.10.0/taskflow/algorithm/sort.hpp b/bundled/taskflow-3.10.0/taskflow/algorithm/sort.hpp index 1967516bb7..d30577972b 100644 --- a/bundled/taskflow-3.10.0/taskflow/algorithm/sort.hpp +++ b/bundled/taskflow-3.10.0/taskflow/algorithm/sort.hpp @@ -620,7 +620,7 @@ auto make_sort_task(B b, E e, C cmp) { detail::parallel_pdqsort> && std::is_arithmetic_v::value_type> - >(rt, beg, end, cmp, log2(end - beg)); + >(rt, beg, end, cmp, log2(size_t(end - beg))); }; } diff --git a/bundled/taskflow-3.10.0/taskflow/algorithm/transform.hpp b/bundled/taskflow-3.10.0/taskflow/algorithm/transform.hpp index 0ce692276b..1e8ef8e2a1 100644 --- a/bundled/taskflow-3.10.0/taskflow/algorithm/transform.hpp +++ b/bundled/taskflow-3.10.0/taskflow/algorithm/transform.hpp @@ -29,7 +29,7 @@ auto make_transform_task(B first1, E last1, O d_first, C c, P part = P()) { // only myself - no need to spawn another graph if(W <= 1 || N <= part.chunk_size()) { - part([=](){ std::transform(beg, end, d_beg, c); })(); + part([=]() mutable { std::transform(beg, end, d_beg, c); })(); return; } @@ -103,7 +103,7 @@ auto make_transform_task(B1 first1, E1 last1, B2 first2, O d_first, C c, P part // only myself - no need to spawn another graph if(W <= 1 || N <= part.chunk_size()) { - part([=](){ std::transform(beg1, end1, beg2, d_beg, c); })(); + part([=]() mutable { std::transform(beg1, end1, beg2, d_beg, c); })(); return; } diff --git a/bundled/taskflow-3.10.0/taskflow/core/async.hpp b/bundled/taskflow-3.10.0/taskflow/core/async.hpp index 95fd6b0559..65bcc5ff41 100644 --- a/bundled/taskflow-3.10.0/taskflow/core/async.hpp +++ b/bundled/taskflow-3.10.0/taskflow/core/async.hpp @@ -1,11 +1,40 @@ #pragma once #include "executor.hpp" +#include "runtime.hpp" // https://hackmd.io/@sysprog/concurrency-atomics namespace tf { +// ---------------------------------------------------------------------------- +// Async Helper Methods +// ---------------------------------------------------------------------------- + +// Procedure: _schedule_async_task +TF_FORCE_INLINE void Executor::_schedule_async_task(Node* node) { + (pt::this_worker) ? _schedule(*pt::this_worker, node) : _schedule(node); +} + +// Procedure: _tear_down_async +inline void Executor::_tear_down_async(Worker& worker, Node* node, Node*& cache) { + + // from executor + if(auto parent = node->_parent; parent == nullptr) { + _decrement_topology(); + } + // from runtime + else { + auto state = parent->_nstate; + if(parent->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1) { + if(state & NSTATE::PREEMPTED) { + _update_cache(worker, cache, parent); + } + } + } + recycle(node); +} + // ---------------------------------------------------------------------------- // Async // ---------------------------------------------------------------------------- @@ -23,6 +52,53 @@ auto Executor::async(F&& f) { return async(DefaultTaskParams{}, std::forward(f)); } +// Function: _async +template +auto Executor::_async(P&& params, F&& f, Topology* tpg, Node* parent) { + + // async task with runtime: [] (tf::Runtime&) -> void {} + if constexpr (is_runtime_task_v) { + + std::promise p; + auto fu{p.get_future()}; + + _schedule_async_task(animate( + NSTATE::NONE, ESTATE::ANCHORED, std::forward

(params), tpg, parent, 0, + std::in_place_type_t{}, + [p=MoC{std::move(p)}, f=std::forward(f)](Runtime& rt, bool reentered) mutable { + if(!reentered) { + f(rt); + } + else { + auto& eptr = rt._parent->_exception_ptr; + eptr ? p.object.set_exception(eptr) : p.object.set_value(); + } + } + )); + return fu; + } + // async task with closure: [] () -> auto { return ... } + else if constexpr (std::is_invocable_v){ + using R = std::invoke_result_t; + std::packaged_task p(std::forward(f)); + auto fu{p.get_future()}; + _schedule_async_task(animate( + NSTATE::NONE, ESTATE::NONE, std::forward

(params), tpg, parent, 0, + std::in_place_type_t{}, + [p=make_moc(std::move(p))]() mutable { p.object(); } + )); + return fu; + } + else { + static_assert(dependent_false_v, + "invalid async target - must be one of the following types:\n\ + (1) [] (tf::Runtime&) -> void {}\n\ + (2) [] () -> auto { ... return ... }\n" + ); + } +} + + // ---------------------------------------------------------------------------- // Silent Async // ---------------------------------------------------------------------------- @@ -31,9 +107,7 @@ auto Executor::async(F&& f) { template void Executor::silent_async(P&& params, F&& f) { _increment_topology(); - _silent_async( - std::forward

(params), std::forward(f), nullptr, nullptr - ); + _silent_async(std::forward

(params), std::forward(f), nullptr, nullptr); } // Function: silent_async @@ -42,35 +116,24 @@ void Executor::silent_async(F&& f) { silent_async(DefaultTaskParams{}, std::forward(f)); } -// ---------------------------------------------------------------------------- -// Async Helper Methods -// ---------------------------------------------------------------------------- - -// Procedure: _schedule_async_task -inline void Executor::_schedule_async_task(Node* node) { - // Here we don't use _this_worker since _schedule will check if the - // given worker belongs to this executor. - (pt::this_worker && pt::this_worker->_executor == this) ? _schedule(*pt::this_worker, node) : - _schedule(node); -} - -// Procedure: _tear_down_async -inline void Executor::_tear_down_async(Worker& worker, Node* node, Node*& cache) { - - // from executor - if(auto parent = node->_parent; parent == nullptr) { - _decrement_topology(); +// Function: _silent_async +template +void Executor::_silent_async(P&& params, F&& f, Topology* tpg, Node* parent) { + // silent task + if constexpr (is_runtime_task_v || is_static_task_v) { + _schedule_async_task(animate( + NSTATE::NONE, ESTATE::NONE, std::forward

(params), tpg, parent, 0, + std::in_place_type_t{}, std::forward(f) + )); } - // from runtime + // invalid silent async target else { - auto state = parent->_nstate; - if(parent->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1) { - if(state & NSTATE::PREEMPTED) { - _update_cache(worker, cache, parent); - } - } + static_assert(dependent_false_v, + "invalid silent_async target - must be one of the following types:\n\ + (1) [] (tf::Runtime&) -> void {}\n\ + (2) [] () -> void { ... }\n" + ); } - recycle(node); } // ---------------------------------------------------------------------------- @@ -121,7 +184,7 @@ tf::AsyncTask Executor::silent_dependent_async( size_t num_dependents = std::distance(first, last); AsyncTask task(animate( - std::forward

(params), nullptr, nullptr, num_dependents, + NSTATE::NONE, ESTATE::NONE, std::forward

(params), nullptr, nullptr, num_dependents, std::in_place_type_t{}, std::forward(func) )); @@ -177,7 +240,7 @@ auto Executor::dependent_async(P&& params, F&& func, I first, I last) { size_t num_dependents = std::distance(first, last); - // async with runtime: [] (tf::Runtime&) {} + // async with runtime: [] (tf::Runtime&) -> void {} if constexpr (is_runtime_task_v) { std::promise p; @@ -207,7 +270,7 @@ auto Executor::dependent_async(P&& params, F&& func, I first, I last) { return std::make_pair(std::move(task), std::move(fu)); } - // async without runtime: [] () {} + // async without runtime: [] () -> auto { return ... } else if constexpr(std::is_invocable_v) { using R = std::invoke_result_t; @@ -215,7 +278,7 @@ auto Executor::dependent_async(P&& params, F&& func, I first, I last) { auto fu{p.get_future()}; AsyncTask task(animate( - std::forward

(params), nullptr, nullptr, num_dependents, + NSTATE::NONE, ESTATE::NONE, std::forward

(params), nullptr, nullptr, num_dependents, std::in_place_type_t{}, [p=make_moc(std::move(p))] () mutable { p.object(); } )); @@ -246,25 +309,26 @@ inline void Executor::_process_async_dependent( auto& state = std::get_if(&(task._node->_handle))->state; - add_successor: + while (true) { - auto target = ASTATE::UNFINISHED; - - // acquires the lock - if(state.compare_exchange_weak(target, ASTATE::LOCKED, - std::memory_order_acq_rel, - std::memory_order_acquire)) { - task._node->_successors.push_back(node); - state.store(ASTATE::UNFINISHED, std::memory_order_release); - } - // dep's state is FINISHED, which means dep finished its callable already - // thus decrement the node's join counter by 1 - else if (target == ASTATE::FINISHED) { - num_dependents = node->_join_counter.fetch_sub(1, std::memory_order_acq_rel) - 1; - } - // another worker adding its async task to the same successors of this node - else { - goto add_successor; + auto target = ASTATE::UNFINISHED; + + // Try to acquire the lock + if (state.compare_exchange_strong(target, ASTATE::LOCKED, + std::memory_order_acq_rel, + std::memory_order_acquire)) { + task._node->_edges.push_back(node); + state.store(ASTATE::UNFINISHED, std::memory_order_release); + break; + } + + // If already finished, decrement the join counter + if (target == ASTATE::FINISHED) { + num_dependents = node->_join_counter.fetch_sub(1, std::memory_order_acq_rel) - 1; + break; + } + + // If locked by another worker, retry } } @@ -283,8 +347,8 @@ inline void Executor::_tear_down_dependent_async(Worker& worker, Node* node, Nod } // spawn successors whenever their dependencies are resolved - for(size_t i=0; i_successors.size(); ++i) { - if(auto s = node->_successors[i]; + for(size_t i=0; i_edges.size(); ++i) { + if(auto s = node->_edges[i]; s->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1 ) { _update_cache(worker, cache, s); diff --git a/bundled/taskflow-3.10.0/taskflow/core/atomic_notifier.hpp b/bundled/taskflow-3.10.0/taskflow/core/atomic_notifier.hpp index 851a11c86e..cb7b5c94db 100644 --- a/bundled/taskflow-3.10.0/taskflow/core/atomic_notifier.hpp +++ b/bundled/taskflow-3.10.0/taskflow/core/atomic_notifier.hpp @@ -8,6 +8,8 @@ namespace tf { +//----------------------------------------------------------------------------- + class AtomicNotifier { friend class Executor; @@ -27,7 +29,9 @@ class AtomicNotifier { void prepare_wait(Waiter*) noexcept; void cancel_wait(Waiter*) noexcept; void commit_wait(Waiter*) noexcept; + size_t size() const noexcept; + size_t num_waiters() const noexcept; private: @@ -48,115 +52,23 @@ class AtomicNotifier { std::vector _waiters; static constexpr uint64_t WAITER_INC {1}; - static constexpr size_t EPOCH_SHIFT {32}; + static constexpr uint64_t EPOCH_SHIFT {32}; static constexpr uint64_t EPOCH_INC {uint64_t(1) << EPOCH_SHIFT}; static constexpr uint64_t WAITER_MASK {EPOCH_INC - 1}; }; -inline void AtomicNotifier::notify_one() noexcept { - uint64_t prev = _state.fetch_add(EPOCH_INC, std::memory_order_acq_rel); - if(TF_UNLIKELY(prev & WAITER_MASK)) { // has waiter (typically unlikely) - _state.notify_one(); - } -} - -inline void AtomicNotifier::notify_all() noexcept { - uint64_t prev = _state.fetch_add(EPOCH_INC, std::memory_order_acq_rel); - if(TF_UNLIKELY(prev & WAITER_MASK)) { // has waiter (typically unlikely) - _state.notify_all(); - } -} - -inline void AtomicNotifier::notify_n(size_t n) noexcept { - if(n >= _waiters.size()) { - notify_all(); - } - else { - for(size_t k=0; kepoch = (prev >> EPOCH_SHIFT); -} - -inline void AtomicNotifier::cancel_wait(Waiter*) noexcept { - // memory_order_relaxed would suffice for correctness, but the faster - // #waiters gets to 0, the less likely it is that we'll do spurious wakeups - // (and thus system calls). - _state.fetch_sub(WAITER_INC, std::memory_order_seq_cst); -} - -inline void AtomicNotifier::commit_wait(Waiter* waiter) noexcept { - uint64_t prev = _state.load(std::memory_order_acquire); - while((prev >> EPOCH_SHIFT) == waiter->epoch) { - _state.wait(prev, std::memory_order_acquire); - prev = _state.load(std::memory_order_acquire); - } - // memory_order_relaxed would suffice for correctness, but the faster - // #waiters gets to 0, the less likely it is that we'll do spurious wakeups - // (and thus system calls) - _state.fetch_sub(WAITER_INC, std::memory_order_seq_cst); +inline size_t AtomicNotifier::num_waiters() const noexcept { + return _state.load(std::memory_order_relaxed) & WAITER_MASK; } -//----------------------------------------------------------------------------- - -class AtomicNotifierV2 { - - friend class Executor; - - public: - - struct Waiter { - alignas (2*TF_CACHELINE_SIZE) uint32_t epoch; - }; - - AtomicNotifierV2(size_t N) noexcept : _state(0), _waiters(N) {} - ~AtomicNotifierV2() { assert((_state.load() & WAITER_MASK) == 0); } - - void notify_one() noexcept; - void notify_all() noexcept; - void notify_n(size_t n) noexcept; - void prepare_wait(Waiter*) noexcept; - void cancel_wait(Waiter*) noexcept; - void commit_wait(Waiter*) noexcept; - size_t size() const noexcept; - - private: - - AtomicNotifierV2(const AtomicNotifierV2&) = delete; - AtomicNotifierV2(AtomicNotifierV2&&) = delete; - AtomicNotifierV2& operator=(const AtomicNotifierV2&) = delete; - AtomicNotifierV2& operator=(AtomicNotifierV2&&) = delete; - - // This requires 64-bit - static_assert(sizeof(int) == 4, "bad platform"); - static_assert(sizeof(uint32_t) == 4, "bad platform"); - static_assert(sizeof(uint64_t) == 8, "bad platform"); - static_assert(sizeof(std::atomic) == 8, "bad platform"); - - // _state stores the epoch in the most significant 32 bits and the - // waiter count in the least significant 32 bits. - std::atomic _state; - std::vector _waiters; - - static constexpr uint64_t WAITER_INC {1}; - static constexpr uint64_t EPOCH_SHIFT {32}; - static constexpr uint64_t EPOCH_INC {uint64_t(1) << EPOCH_SHIFT}; - static constexpr uint64_t WAITER_MASK {EPOCH_INC - 1}; -}; - -inline void AtomicNotifierV2::notify_one() noexcept { +inline void AtomicNotifier::notify_one() noexcept { std::atomic_thread_fence(std::memory_order_seq_cst); //if((_state.load(std::memory_order_acquire) & WAITER_MASK) != 0) { - // _state.fetch_add(EPOCH_INC, std::memory_order_release); + // _state.fetch_add(EPOCH_INC, std::memory_order_relaxed); // _state.notify_one(); //} @@ -168,10 +80,10 @@ inline void AtomicNotifierV2::notify_one() noexcept { } } -inline void AtomicNotifierV2::notify_all() noexcept { +inline void AtomicNotifier::notify_all() noexcept { std::atomic_thread_fence(std::memory_order_seq_cst); //if((_state.load(std::memory_order_acquire) & WAITER_MASK) != 0) { - // _state.fetch_add(EPOCH_INC, std::memory_order_release); + // _state.fetch_add(EPOCH_INC, std::memory_order_relaxed); // _state.notify_all(); //} for(uint64_t state = _state.load(std::memory_order_acquire); state & WAITER_MASK;) { @@ -182,7 +94,7 @@ inline void AtomicNotifierV2::notify_all() noexcept { } } -inline void AtomicNotifierV2::notify_n(size_t n) noexcept { +inline void AtomicNotifier::notify_n(size_t n) noexcept { if(n >= _waiters.size()) { notify_all(); } @@ -193,25 +105,21 @@ inline void AtomicNotifierV2::notify_n(size_t n) noexcept { } } -inline size_t AtomicNotifierV2::size() const noexcept { - return _waiters.size(); -} - -inline void AtomicNotifierV2::prepare_wait(Waiter* waiter) noexcept { +inline void AtomicNotifier::prepare_wait(Waiter* waiter) noexcept { auto prev = _state.fetch_add(WAITER_INC, std::memory_order_relaxed); waiter->epoch = (prev >> EPOCH_SHIFT); std::atomic_thread_fence(std::memory_order_seq_cst); } -inline void AtomicNotifierV2::cancel_wait(Waiter*) noexcept { - _state.fetch_sub(WAITER_INC, std::memory_order_relaxed); +inline void AtomicNotifier::cancel_wait(Waiter*) noexcept { + _state.fetch_sub(WAITER_INC, std::memory_order_seq_cst); } -inline void AtomicNotifierV2::commit_wait(Waiter* waiter) noexcept { - uint64_t prev = _state.load(std::memory_order_seq_cst); +inline void AtomicNotifier::commit_wait(Waiter* waiter) noexcept { + uint64_t prev = _state.load(std::memory_order_acquire); while((prev >> EPOCH_SHIFT) == waiter->epoch) { - _state.wait(prev, std::memory_order_seq_cst); - prev = _state.load(std::memory_order_seq_cst); + _state.wait(prev, std::memory_order_acquire); + prev = _state.load(std::memory_order_acquire); } // memory_order_relaxed would suffice for correctness, but the faster // #waiters gets to 0, the less likely it is that we'll do spurious wakeups diff --git a/bundled/taskflow-3.10.0/taskflow/core/declarations.hpp b/bundled/taskflow-3.10.0/taskflow/core/declarations.hpp index 9a36946855..f17a75f912 100644 --- a/bundled/taskflow-3.10.0/taskflow/core/declarations.hpp +++ b/bundled/taskflow-3.10.0/taskflow/core/declarations.hpp @@ -7,7 +7,6 @@ namespace tf { // ---------------------------------------------------------------------------- class Algorithm; -class AsyncTopology; class Node; class Graph; class FlowBuilder; @@ -46,13 +45,8 @@ class cudaFlowLinearOptimizer; class cudaFlowSequentialOptimizer; class cudaFlowRoundRobinOptimizer; -// ---------------------------------------------------------------------------- -// syclFlow -// ---------------------------------------------------------------------------- -class syclNode; -class syclGraph; -class syclTask; -class syclFlow; +template +class cudaGraphExecBase; // ---------------------------------------------------------------------------- // struct diff --git a/bundled/taskflow-3.10.0/taskflow/core/error.hpp b/bundled/taskflow-3.10.0/taskflow/core/error.hpp index 18a9f456d7..f4acc3dfdf 100644 --- a/bundled/taskflow-3.10.0/taskflow/core/error.hpp +++ b/bundled/taskflow-3.10.0/taskflow/core/error.hpp @@ -13,10 +13,11 @@ struct NSTATE { using underlying_type = int; - constexpr static underlying_type NONE = 0x00000000; - constexpr static underlying_type CONDITIONED = 0x10000000; - constexpr static underlying_type DETACHED = 0x20000000; - constexpr static underlying_type PREEMPTED = 0x40000000; + constexpr static underlying_type NONE = 0x00000000; + constexpr static underlying_type CONDITIONED = 0x10000000; + constexpr static underlying_type PREEMPTED = 0x20000000; + constexpr static underlying_type RETAIN_SUBFLOW = 0x40000000; + constexpr static underlying_type JOINED_SUBFLOW = 0x80000000; // mask to isolate state bits - non-state bits store # weak dependents constexpr static underlying_type MASK = 0xF0000000; @@ -29,10 +30,10 @@ struct ESTATE { using underlying_type = int; - constexpr static underlying_type NONE = 0; - constexpr static underlying_type EXCEPTION = 1; - constexpr static underlying_type CANCELLED = 2; - constexpr static underlying_type ANCHORED = 4; + constexpr static underlying_type NONE = 0x00000000; + constexpr static underlying_type EXCEPTION = 0x10000000; + constexpr static underlying_type CANCELLED = 0x20000000; + constexpr static underlying_type ANCHORED = 0x40000000; }; using estate_t = ESTATE::underlying_type; diff --git a/bundled/taskflow-3.10.0/taskflow/core/executor.hpp b/bundled/taskflow-3.10.0/taskflow/core/executor.hpp index 2403eb86a8..e30cc9a80f 100644 --- a/bundled/taskflow-3.10.0/taskflow/core/executor.hpp +++ b/bundled/taskflow-3.10.0/taskflow/core/executor.hpp @@ -516,6 +516,16 @@ class Executor { @endcode */ size_t num_workers() const noexcept; + + /** + @brief queries the number of workers that are currently not making any stealing attempts + */ + size_t num_waiters() const noexcept; + + /** + @brief queries the number of queues used in the work-stealing loop + */ + size_t num_queues() const noexcept; /** @brief queries the number of running topologies at the time of this call @@ -713,7 +723,7 @@ class Executor { /** @brief runs the given function asynchronously - when the given dependents finish + when the given predecessors finish @tparam F callable type @tparam Tasks task types convertible to tf::AsyncTask @@ -745,7 +755,7 @@ class Executor { /** @brief runs the given function asynchronously - when the given dependents finish + when the given predecessors finish @tparam F callable type @tparam Tasks task types convertible to tf::AsyncTask @@ -781,7 +791,7 @@ class Executor { /** @brief runs the given function asynchronously - when the given range of dependents finish + when the given range of predecessors finish @tparam F callable type @tparam I iterator type @@ -818,7 +828,7 @@ class Executor { /** @brief runs the given function asynchronously - when the given range of dependents finish + when the given range of predecessors finish @tparam F callable type @tparam I iterator type @@ -861,7 +871,7 @@ class Executor { /** @brief runs the given function asynchronously - when the given dependents finish + when the given predecessors finish @tparam F callable type @tparam Tasks task types convertible to tf::AsyncTask @@ -903,7 +913,7 @@ class Executor { /** @brief runs the given function asynchronously - when the given dependents finish + when the given predecessors finish @tparam P task parameters type @tparam F callable type @@ -949,7 +959,7 @@ class Executor { /** @brief runs the given function asynchronously - when the given range of dependents finish + when the given range of predecessors finish @tparam F callable type @tparam I iterator type @@ -994,7 +1004,7 @@ class Executor { /** @brief runs the given function asynchronously - when the given range of dependents finish + when the given range of predecessors finish @tparam P task parameters type @tparam F callable type @@ -1043,29 +1053,22 @@ class Executor { private: - const size_t _MAX_STEALS; - std::mutex _taskflows_mutex; std::vector _workers; DefaultNotifier _notifier; #if __cplusplus >= TF_CPP20 - std::latch _latch; std::atomic _num_topologies {0}; - std::atomic_flag _done = ATOMIC_FLAG_INIT; #else - Latch _latch; std::condition_variable _topology_cv; std::mutex _topology_mutex; size_t _num_topologies {0}; - std::atomic _done {0}; #endif - std::list _taskflows; - Freelist _freelist; + Freelist _buffers; std::shared_ptr _worker_interface; std::unordered_set> _observers; @@ -1074,7 +1077,7 @@ class Executor { void _observer_epilogue(Worker&, Node*); void _spawn(size_t); void _exploit_task(Worker&, Node*&); - void _explore_task(Worker&, Node*&); + bool _explore_task(Worker&, Node*&); void _schedule(Worker&, Node*); void _schedule(Node*); void _set_up_topology(Worker*, Topology*); @@ -1104,14 +1107,14 @@ class Executor { bool _invoke_runtime_task_impl(Worker&, Node*, std::function&); template - I _set_up_graph(I, I, Topology*, Node*, nstate_t); + I _set_up_graph(I, I, Topology*, Node*); template void _corun_until(Worker&, P&&); template void _corun_graph(Worker&, Node*, I, I); - + template void _schedule(Worker&, I, I); @@ -1119,7 +1122,7 @@ class Executor { void _schedule(I, I); template - void _schedule_graph_with_parent(Worker&, I, I, Node*, nstate_t); + void _schedule_graph_with_parent(Worker&, I, I, Node*); template auto _async(P&&, F&&, Topology*, Node*); @@ -1133,11 +1136,9 @@ class Executor { // Constructor inline Executor::Executor(size_t N, std::shared_ptr wix) : - _MAX_STEALS ((N+1) << 1), - _workers (N), - _notifier (N), - _latch (N+1), - _freelist (N), + _workers (N), + _notifier (N), + _buffers (N), _worker_interface(std::move(wix)) { if(N == 0) { @@ -1159,12 +1160,14 @@ inline Executor::~Executor() { wait_for_all(); // shut down the scheduler + for(size_t i=0; i<_workers.size(); ++i) { + #if __cplusplus >= TF_CPP20 + _workers[i]._done.test_and_set(std::memory_order_relaxed); + #else + _workers[i]._done.store(true, std::memory_order_relaxed); + #endif + } -#if __cplusplus >= TF_CPP20 - _done.test_and_set(std::memory_order_relaxed); -#else - _done = true; -#endif _notifier.notify_all(); for(auto& w : _workers) { @@ -1177,6 +1180,22 @@ inline size_t Executor::num_workers() const noexcept { return _workers.size(); } +// Function: num_waiters +inline size_t Executor::num_waiters() const noexcept { +#if __cplusplus >= TF_CPP20 + return _notifier.num_waiters(); +#else + // Unfortunately, nonblocking notifier does not have an easy way to return + // the number of workers that are not making stealing attempts. + return 0; +#endif +} + +// Function: num_queues +inline size_t Executor::num_queues() const noexcept { + return _workers.size() + _buffers.size(); +} + // Function: num_topologies inline size_t Executor::num_topologies() const { #if __cplusplus >= TF_CPP20 @@ -1191,12 +1210,6 @@ inline size_t Executor::num_taskflows() const { return _taskflows.size(); } -// Function: _this_worker -//inline Worker* Executor::_this_worker() const { -// auto w = pt::this_worker; -// return (w && w->_executor == this) ? w : nullptr; -//} - // Function: this_worker_id inline int Executor::this_worker_id() const { auto w = pt::this_worker; @@ -1206,10 +1219,6 @@ inline int Executor::this_worker_id() const { // Procedure: _spawn inline void Executor::_spawn(size_t N) { - // Note: we can't declare latch here as a local variable - // since the main thread may leave quicker than other thread - // and then destroy it, causing the other thread to dangle - // with the latch for(size_t id=0; id( std::hash()(std::this_thread::get_id())) ); - w._rdvtm = std::uniform_int_distribution(0, 2*_workers.size()-2); // before entering the work-stealing loop, call the scheduler prologue if(_worker_interface) { @@ -1242,12 +1246,14 @@ inline void Executor::_spawn(size_t N) { // the previous worker may stop while the following workers // are still preparing for entering the scheduling loop try { + + // worker loop while(1) { - // execute the tasks. + // drain out the local queue _exploit_task(w, t); - // wait for tasks + // steal and wait for tasks if(_wait_for_task(w, t) == false) { break; } @@ -1264,13 +1270,15 @@ inline void Executor::_spawn(size_t N) { }); } - - _latch.arrive_and_wait(); } // Function: _corun_until template void Executor::_corun_until(Worker& w, P&& stop_predicate) { + + const size_t MAX_STEALS = ((num_queues() + 1) << 1); + + std::uniform_int_distribution udist(0, num_queues()-1); exploit: @@ -1281,25 +1289,25 @@ void Executor::_corun_until(Worker& w, P&& stop_predicate) { } else { size_t num_steals = 0; + size_t vtm = w._vtm; explore: + + //auto vtm = udist(w._rdgen); - //t = (w._id == w._vtm) ? _freelist.steal(w._id) : _workers[w._vtm]._wsq.steal(); - t = (w._vtm < _workers.size()) ? _workers[w._vtm]._wsq.steal() : - _freelist.steal(w._vtm - _workers.size()); + t = (vtm < _workers.size()) ? _workers[vtm]._wsq.steal() : + _buffers.steal(vtm - _workers.size()); if(t) { _invoke(w, t); + w._vtm = vtm; goto exploit; } else if(!stop_predicate()) { - if(num_steals++ > _MAX_STEALS) { + if(++num_steals > MAX_STEALS) { std::this_thread::yield(); } - // skip worker-id - //auto r = w._rdgen.random_range(0, 2*_workers.size()-2); - auto r = w._rdvtm(w._rdgen); - w._vtm = r + (r >= w._id); + vtm = udist(w._rdgen); goto explore; } else { @@ -1310,42 +1318,53 @@ void Executor::_corun_until(Worker& w, P&& stop_predicate) { } // Function: _explore_task -inline void Executor::_explore_task(Worker& w, Node*& t) { +inline bool Executor::_explore_task(Worker& w, Node*& t) { //assert(!t); + + const size_t MAX_STEALS = ((num_queues() + 1) << 1); + std::uniform_int_distribution udist(0, num_queues()-1); size_t num_steals = 0; + size_t vtm = w._vtm; + + // Make the worker steal immediately from the assigned victim. + while(true) { + + // Randomely generate a next victim. + //vtm = udist(w._rdgen); //w._rdvtm(); - // Here, we write do-while to make the worker steal at once - // from the assigned victim. - do { - //t = (w._id == w._vtm) ? _freelist.steal(w._id) : _workers[w._vtm]._wsq.steal(); - t = (w._vtm < _workers.size()) ? _workers[w._vtm]._wsq.steal() : - _freelist.steal(w._vtm - _workers.size()); + // If the worker's victim thread is within the worker pool, steal from the worker's queue. + // Otherwise, steal from the buffer, adjusting the victim index based on the worker pool size. + t = (vtm < _workers.size()) + ? _workers[vtm]._wsq.steal() + : _buffers.steal(vtm - _workers.size()); if(t) { + w._vtm = vtm; break; } - if (++num_steals > _MAX_STEALS) { + // Increment the steal count, and if it exceeds MAX_STEALS, yield the thread. + // If the number of *consecutive* empty steals reaches MAX_STEALS, exit the loop. + if (++num_steals > MAX_STEALS) { std::this_thread::yield(); - if (num_steals > _MAX_STEALS + 100) { + if(num_steals > 100 + MAX_STEALS) { break; } } - // skip worker-id - //auto r = w._rdgen.random_range(0, 2*_workers.size()-2); - auto r = w._rdvtm(w._rdgen); - w._vtm = r + (r >= w._id); - } -#if __cplusplus >= TF_CPP20 - // the _DONE can be checked later in wait_for_task? - while(!_done.test(std::memory_order_relaxed)); -#else - while(!_done); -#endif + #if __cplusplus >= TF_CPP20 + if(w._done.test(std::memory_order_relaxed)) { + #else + if(w._done.load(std::memory_order_relaxed)) { + #endif + return false; + } + vtm = udist(w._rdgen); //w._rdvtm(); + } + return true; } // Procedure: _exploit_task @@ -1357,56 +1376,65 @@ inline void Executor::_exploit_task(Worker& w, Node*& t) { } // Function: _wait_for_task -inline bool Executor::_wait_for_task(Worker& worker, Node*& t) { +inline bool Executor::_wait_for_task(Worker& w, Node*& t) { explore_task: - _explore_task(worker, t); - + if(_explore_task(w, t) == false) { + return false; + } + + // Go exploit the task if we successfully steal one. if(t) { return true; } - - // The last thief who successfully stole a task will wake up - // another thief worker to avoid starvation. -// if(t) { -// _notifier.notify_one(); -// return true; -// } - - // ---- 2PC guard ---- - _notifier.prepare_wait(worker._waiter); - if(!_freelist.empty()) { - _notifier.cancel_wait(worker._waiter); - worker._vtm = worker._id; - goto explore_task; + // Entering the 2PC guard as all queues should be empty after many stealing attempts. + _notifier.prepare_wait(w._waiter); + + // Condition #1: buffers should be empty + for(size_t vtm=0; vtm<_buffers.size(); ++vtm) { + if(!_buffers._buckets[vtm].queue.empty()) { + _notifier.cancel_wait(w._waiter); + w._vtm = vtm + _workers.size(); + goto explore_task; + } } - -#if __cplusplus >= TF_CPP20 - if(_done.test(std::memory_order_relaxed)) { -#else - if(_done) { -#endif - _notifier.cancel_wait(worker._waiter); - _notifier.notify_all(); - return false; + + // Condition #2: worker queues should be empty + // Note: We need to use index-based looping to avoid data race with _spawan + // which initializes other worker data structure at the same time + for(size_t vtm=0; vtm= TF_CPP20 + if(w._done.test(std::memory_order_relaxed)) { +#else + if(w._done.load(std::memory_order_relaxed)) { +#endif + _notifier.cancel_wait(w._waiter); + return false; + } + + // Now I really need to relinquish myself to others. + _notifier.commit_wait(w._waiter); goto explore_task; - } // Function: make_observer @@ -1448,22 +1476,23 @@ inline size_t Executor::num_observers() const noexcept { // Procedure: _schedule inline void Executor::_schedule(Worker& worker, Node* node) { - // caller is a worker to this pool - starting at v3.5 we do not use + // caller is a worker of this executor - starting at v3.5 we do not use // any complicated notification mechanism as the experimental result // has shown no significant advantage. if(worker._executor == this) { - worker._wsq.push(node, [&](){ _freelist.push(worker._id, node); }); + worker._wsq.push(node, [&](){ _buffers.push(node); }); _notifier.notify_one(); return; } - _freelist.push(node); + // caller is not a worker of this executor - go through the centralized queue + _buffers.push(node); _notifier.notify_one(); } // Procedure: _schedule inline void Executor::_schedule(Node* node) { - _freelist.push(node); + _buffers.push(node); _notifier.notify_one(); } @@ -1477,23 +1506,23 @@ void Executor::_schedule(Worker& worker, I first, I last) { return; } - // [NOTE]: We cannot use first/last as the for-loop condition - // (e.g., for(; first != last; ++first)) since when a node is inserted - // into the queue the node can run and finish immediately. - // If this is the last node in the graph, it will tear down the parent - // container which cause the last ++first to fail. - // This problem is specific to MSVC which has strict iterator arithmetics. + // NOTE: We cannot use first/last in the for-loop (e.g., for(; first != last; ++first)). + // This is because when a node v is inserted into the queue, v can run and finish + // immediately. If v is the last node in the graph, it will tear down the parent task vector + // which cause the last ++first to fail. This problem is specific to MSVC which has a stricter + // iterator implementation in std::vector than GCC/Clang. if(worker._executor == this) { for(size_t i=0; i -void Executor::_schedule_graph_with_parent( - Worker& worker, I beg, I end, Node* parent, nstate_t nstate -) { - auto send = _set_up_graph(beg, end, parent->_topology, parent, nstate); +void Executor::_schedule_graph_with_parent(Worker& worker, I beg, I end, Node* parent) { + auto send = _set_up_graph(beg, end, parent->_topology, parent); parent->_join_counter.fetch_add(send - beg, std::memory_order_relaxed); _schedule(worker, beg, send); } -inline void Executor::_update_cache(Worker& worker, Node*& cache, Node* node) { +TF_FORCE_INLINE void Executor::_update_cache(Worker& worker, Node*& cache, Node* node) { if(cache) { _schedule(worker, cache); } @@ -1648,12 +1680,12 @@ inline void Executor::_invoke(Worker& worker, Node* node) { // Reset the join counter with strong dependencies to support cycles. // + We must do this before scheduling the successors to avoid race - // condition on _dependents. + // condition on _predecessors. // + We must use fetch_add instead of direct assigning // because the user-space call on "invoke" may explicitly schedule // this task again (e.g., pipeline) which can access the join_counter. node->_join_counter.fetch_add( - node->num_dependents() - (node->_nstate & ~NSTATE::MASK), std::memory_order_relaxed + node->num_predecessors() - (node->_nstate & ~NSTATE::MASK), std::memory_order_relaxed ); // acquire the parent flow counter @@ -1667,8 +1699,8 @@ inline void Executor::_invoke(Worker& worker, Node* node) { case Node::CONDITION: case Node::MULTI_CONDITION: { for(auto cond : conds) { - if(cond >= 0 && static_cast(cond) < node->_successors.size()) { - auto s = node->_successors[cond]; + if(cond >= 0 && static_cast(cond) < node->_num_successors) { + auto s = node->_edges[cond]; // zeroing the join counter for invariant s->_join_counter.store(0, std::memory_order_relaxed); join_counter.fetch_add(1, std::memory_order_relaxed); @@ -1680,10 +1712,8 @@ inline void Executor::_invoke(Worker& worker, Node* node) { // non-condition task default: { - for(size_t i=0; i_successors.size(); ++i) { - //if(auto s = node->_successors[i]; --(s->_join_counter) == 0) { - if(auto s = node->_successors[i]; - s->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1) { + for(size_t i=0; i_num_successors; ++i) { + if(auto s = node->_edges[i]; s->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1) { join_counter.fetch_add(1, std::memory_order_relaxed); _update_cache(worker, cache, s); } @@ -1708,7 +1738,7 @@ inline void Executor::_tear_down_invoke(Worker& worker, Node* node, Node*& cache } } else { - // needs to fetch every data before join-counter becomes zero at which + // needs to fetch every data before join counter becomes zero at which // the node may be deleted auto state = parent->_nstate; if(parent->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1) { @@ -1782,12 +1812,12 @@ inline void Executor::_invoke_static_task(Worker& worker, Node* node) { // Procedure: _invoke_subflow_task inline bool Executor::_invoke_subflow_task(Worker& worker, Node* node) { + + auto& h = *std::get_if(&node->_handle); + auto& g = h.subgraph; if((node->_nstate & NSTATE::PREEMPTED) == 0) { - auto& h = *std::get_if(&node->_handle); - auto& g = h.subgraph; - // set up the subflow Subflow sf(*this, worker, node, g); @@ -1800,13 +1830,13 @@ inline bool Executor::_invoke_subflow_task(Worker& worker, Node* node) { // spawn the subflow if it is joinable and its graph is non-empty // implicit join is faster than Subflow::join as it does not involve corun - if(sf.joinable() && g.size() > sf._tag) { + if(sf.joinable() && g.size()) { // signal the executor to preempt this node node->_nstate |= NSTATE::PREEMPTED; // set up and schedule the graph - _schedule_graph_with_parent(worker, g.begin() + sf._tag, g.end(), node, NSTATE::NONE); + _schedule_graph_with_parent(worker, g.begin(), g.end(), node); return true; } } @@ -1814,6 +1844,11 @@ inline bool Executor::_invoke_subflow_task(Worker& worker, Node* node) { node->_nstate &= ~NSTATE::PREEMPTED; } + // the subflow has finished or joined + if((node->_nstate & NSTATE::RETAIN_SUBFLOW) == 0) { + g.clear(); + } + return false; } @@ -1857,13 +1892,13 @@ inline bool Executor::_invoke_module_task_impl(Worker& w, Node* node, Graph& gra if((node->_nstate & NSTATE::PREEMPTED) == 0) { // signal the executor to preempt this node node->_nstate |= NSTATE::PREEMPTED; - _schedule_graph_with_parent(w, graph.begin(), graph.end(), node, NSTATE::NONE); + _schedule_graph_with_parent(w, graph.begin(), graph.end(), node); return true; } + // second entry - already spawned - else { - node->_nstate &= ~NSTATE::PREEMPTED; - } + node->_nstate &= ~NSTATE::PREEMPTED; + return false; } @@ -1912,14 +1947,14 @@ inline bool Executor::_invoke_dependent_async_task(Worker& worker, Node* node) { _observer_epilogue(worker, node); break; - // void(Runtime&) + // void(Runtime&) - silent async case 1: if(_invoke_runtime_task_impl(worker, node, *std::get_if<1>(&work))) { return true; } break; - // void(Runtime&, bool) + // void(Runtime&, bool) - async case 2: if(_invoke_runtime_task_impl(worker, node, *std::get_if<2>(&work))) { return true; @@ -1995,16 +2030,16 @@ tf::Future Executor::run_until(Taskflow& f, P&& p, C&& c) { _increment_topology(); - // Need to check the empty under the lock since subflow task may - // define detached blocks that modify the taskflow at the same time - bool empty; - { - std::lock_guard lock(f._mutex); - empty = f.empty(); - } + //// Need to check the empty under the lock since subflow task may + //// define detached blocks that modify the taskflow at the same time + //bool empty; + //{ + // std::lock_guard lock(f._mutex); + // empty = f.empty(); + //} // No need to create a real topology but returns an dummy future - if(empty || p()) { + if(f.empty() || p()) { c(); std::promise promise; promise.set_value(); @@ -2082,7 +2117,7 @@ void Executor::_corun_graph(Worker& w, Node* p, I first, I last) { // anchor this parent as the blocking point { AnchorGuard anchor(p); - _schedule_graph_with_parent(w, first, last, p, NSTATE::NONE); + _schedule_graph_with_parent(w, first, last, p); _corun_until(w, [p] () -> bool { return p->_join_counter.load(std::memory_order_acquire) == 0; } ); @@ -2135,9 +2170,8 @@ inline void Executor::_set_up_topology(Worker* w, Topology* tpg) { // ---- under taskflow lock ---- auto& g = tpg->_taskflow._graph; - //g._clear_detached(); - auto send = _set_up_graph(g.begin(), g.end(), tpg, nullptr, NSTATE::NONE); + auto send = _set_up_graph(g.begin(), g.end(), tpg, nullptr); tpg->_join_counter.store(send - g.begin(), std::memory_order_relaxed); w ? _schedule(*w, g.begin(), send) : _schedule(g.begin(), send); @@ -2145,7 +2179,7 @@ inline void Executor::_set_up_topology(Worker* w, Topology* tpg) { // Function: _set_up_graph template -I Executor::_set_up_graph(I first, I last, Topology* tpg, Node* parent, nstate_t state) { +I Executor::_set_up_graph(I first, I last, Topology* tpg, Node* parent) { auto send = first; for(; first != last; ++first) { @@ -2153,27 +2187,16 @@ I Executor::_set_up_graph(I first, I last, Topology* tpg, Node* parent, nstate_t auto node = first->get(); node->_topology = tpg; node->_parent = parent; - node->_nstate = state; - node->_estate.store(0, std::memory_order_relaxed); + node->_nstate = NSTATE::NONE; + node->_estate.store(ESTATE::NONE, std::memory_order_relaxed); node->_set_up_join_counter(); node->_exception_ptr = nullptr; // move source to the first partition - if(node->num_dependents() == 0) { + // root, root, root, v1, v2, v3, v4, ... + if(node->num_predecessors() == 0) { std::iter_swap(send++, first); } - - // handle-specific clear - switch(node->_handle.index()) { - - // clear detached nodes - case Node::SUBFLOW: { - std::get_if(&node->_handle)->subgraph.clear(); - } break; - - default: - break; - } } return send; } @@ -2189,9 +2212,6 @@ inline void Executor::_tear_down_topology(Worker& worker, Topology* tpg) { if(!tpg->_exception_ptr && !tpg->cancelled() && !tpg->_pred()) { //assert(tpg->_join_counter == 0); std::lock_guard lock(f._mutex); - //auto& g = tpg->_taskflow._graph; - //tpg->_join_counter.store(tpg->_num_sources, std::memory_order_relaxed); - //_schedule(worker, g.begin(), g.begin() + tpg->_num_sources); _set_up_topology(&worker, tpg); } // case 2: the final run of this topology @@ -2251,34 +2271,13 @@ inline void Executor::_tear_down_topology(Worker& worker, Topology* tpg) { inline void Subflow::join() { if(!joinable()) { - TF_THROW("subflow already joined or detached"); + TF_THROW("subflow already joined"); } - // iterator to the begining of the subflow - auto gbeg = _graph.begin() + _tag; - - // join here since corun graph may throw exception - _tag |= JOINED_BIT; - - _executor._corun_graph(_worker, _parent, gbeg, _graph.end()); -} - -inline void Subflow::detach() { - - if(!joinable()) { - TF_THROW("subflow already joined or detached"); - } - - if(_graph.size() > _tag) { - auto sbeg = _graph.begin() + _tag; - auto send = _executor._set_up_graph( - sbeg, _graph.end(), _parent->_topology, nullptr, NSTATE::DETACHED - ); - _parent->_topology->_join_counter.fetch_add(send - sbeg, std::memory_order_relaxed); - _executor._schedule(_worker, sbeg, send); - } + _executor._corun_graph(_worker, _parent, _graph.begin(), _graph.end()); - _tag |= JOINED_BIT; + // join here since corun graph may throw exception + _parent->_nstate |= NSTATE::JOINED_SUBFLOW; } #endif diff --git a/bundled/taskflow-3.10.0/taskflow/core/flow_builder.hpp b/bundled/taskflow-3.10.0/taskflow/core/flow_builder.hpp index 75b0bfb84b..e8ae6e55c8 100644 --- a/bundled/taskflow-3.10.0/taskflow/core/flow_builder.hpp +++ b/bundled/taskflow-3.10.0/taskflow/core/flow_builder.hpp @@ -436,7 +436,7 @@ class FlowBuilder { tf::IndexRange range(0, 17, 2); // parallelize the sequence [0, 2, 4, 6, 8, 10, 12, 14, 16] - taskflow.for_each_index(range, [](tf::IndexRange range) { + taskflow.for_each_by_index(range, [](tf::IndexRange range) { // iterate each index in the subrange for(int i=range.begin(); i - Task for_each_index(R range, C callable, P part = P()); + Task for_each_by_index(R range, C callable, P part = P()); // ------------------------------------------------------------------------ // transform @@ -1259,7 +1259,7 @@ inline FlowBuilder::FlowBuilder(Graph& graph) : // Function: emplace template , void>*> Task FlowBuilder::emplace(C&& c) { - return Task(_graph._emplace_back("", nullptr, nullptr, 0, + return Task(_graph._emplace_back(NSTATE::NONE, ESTATE::NONE, DefaultTaskParams{}, nullptr, nullptr, 0, std::in_place_type_t{}, std::forward(c) )); } @@ -1267,7 +1267,7 @@ Task FlowBuilder::emplace(C&& c) { // Function: emplace template , void>*> Task FlowBuilder::emplace(C&& c) { - return Task(_graph._emplace_back("", nullptr, nullptr, 0, + return Task(_graph._emplace_back(NSTATE::NONE, ESTATE::NONE, DefaultTaskParams{}, nullptr, nullptr, 0, std::in_place_type_t{}, std::forward(c) )); } @@ -1275,7 +1275,7 @@ Task FlowBuilder::emplace(C&& c) { // Function: emplace template , void>*> Task FlowBuilder::emplace(C&& c) { - return Task(_graph._emplace_back("", nullptr, nullptr, 0, + return Task(_graph._emplace_back(NSTATE::NONE, ESTATE::NONE, DefaultTaskParams{}, nullptr, nullptr, 0, std::in_place_type_t{}, std::forward(c) )); } @@ -1283,7 +1283,7 @@ Task FlowBuilder::emplace(C&& c) { // Function: emplace template , void>*> Task FlowBuilder::emplace(C&& c) { - return Task(_graph._emplace_back("", nullptr, nullptr, 0, + return Task(_graph._emplace_back(NSTATE::NONE, ESTATE::NONE, DefaultTaskParams{}, nullptr, nullptr, 0, std::in_place_type_t{}, std::forward(c) )); } @@ -1291,11 +1291,28 @@ Task FlowBuilder::emplace(C&& c) { // Function: emplace template , void>*> Task FlowBuilder::emplace(C&& c) { - return Task(_graph._emplace_back("", nullptr, nullptr, 0, + return Task(_graph._emplace_back(NSTATE::NONE, ESTATE::NONE, DefaultTaskParams{}, nullptr, nullptr, 0, std::in_place_type_t{}, std::forward(c) )); } +// Function: composed_of +template +Task FlowBuilder::composed_of(T& object) { + auto node = _graph._emplace_back(NSTATE::NONE, ESTATE::NONE, DefaultTaskParams{}, nullptr, nullptr, 0, + std::in_place_type_t{}, object + ); + return Task(node); +} + +// Function: placeholder +inline Task FlowBuilder::placeholder() { + auto node = _graph._emplace_back(NSTATE::NONE, ESTATE::NONE, DefaultTaskParams{}, nullptr, nullptr, 0, + std::in_place_type_t{} + ); + return Task(node); +} + // Function: emplace template 1), void>*> auto FlowBuilder::emplace(C&&... cs) { @@ -1309,39 +1326,19 @@ inline void FlowBuilder::erase(Task task) { return; } - task.for_each_dependent([&] (Task dependent) { - auto& S = dependent._node->_successors; - if(auto I = std::find(S.begin(), S.end(), task._node); I != S.end()) { - S.erase(I); - } - }); + // remove task from its successors' predecessor list + for(size_t i=0; i_num_successors; ++i) { + task._node->_edges[i]->_remove_predecessors(task._node); + } - task.for_each_successor([&] (Task dependent) { - auto& D = dependent._node->_dependents; - if(auto I = std::find(D.begin(), D.end(), task._node); I != D.end()) { - D.erase(I); - } - }); + // remove task from its precedessors' successor list + for(size_t i=task._node->_num_successors; i_edges.size(); ++i) { + task._node->_edges[i]->_remove_successors(task._node); + } _graph._erase(task._node); } -// Function: composed_of -template -Task FlowBuilder::composed_of(T& object) { - auto node = _graph._emplace_back("", nullptr, nullptr, 0, - std::in_place_type_t{}, object - ); - return Task(node); -} - -// Function: placeholder -inline Task FlowBuilder::placeholder() { - auto node = _graph._emplace_back("", nullptr, nullptr, 0, - std::in_place_type_t{} - ); - return Task(node); -} // Procedure: _linearize template @@ -1380,8 +1377,7 @@ inline void FlowBuilder::linearize(std::initializer_list keys) { tf::Subflow is spawned from the execution of a task to dynamically manage a child graph that may depend on runtime variables. -You can explicitly join or detach a subflow by calling tf::Subflow::join -or tf::Subflow::detach, respectively. +You can explicitly join a subflow by calling tf::Subflow::join, respectively. By default, the %Taskflow runtime will implicitly join a subflow it is is joinable. The following example creates a taskflow graph that spawns a subflow from @@ -1434,28 +1430,11 @@ class Subflow : public FlowBuilder { */ void join(); - /** - @brief enables the subflow to detach from its parent task - - Performs an immediate action to detach the subflow. Once the subflow is detached, - it is considered finished and you may not modify the subflow anymore. - - @code{.cpp} - taskflow.emplace([](tf::Subflow& sf){ - sf.emplace([](){}); - sf.detach(); - }); - @endcode - - Only the worker that spawns this subflow can detach it. - */ - void detach(); - /** @brief queries if the subflow is joinable This member function queries if the subflow is joinable. - When a subflow is joined or detached, it becomes not joinable. + When a subflow is joined, it becomes not joinable. @code{.cpp} taskflow.emplace([](tf::Subflow& sf){ @@ -1477,12 +1456,26 @@ class Subflow : public FlowBuilder { @brief acquires the associated graph */ Graph& graph() { return _graph; } + + /** + @brief specifies whether to keep the subflow after it is joined + + @param flag `true` to retain the subflow after it is joined; `false` to discard it + + By default, the runtime automatically clears a spawned subflow once it is joined. + Setting this flag to `true` allows the application to retain the subflow's structure + for post-execution analysis like visualization. + */ + void retain(bool flag) noexcept; + + /** + @brief queries if the subflow will be retained after it is joined + @return `true` if the subflow will be retained after it is joined; `false` otherwise + */ + bool retain() const; private: - // with only the most significant bit set: 1000...000 - constexpr static size_t JOINED_BIT = (~size_t(0)) ^ ((~size_t(0)) >> 1); - Subflow(Executor&, Worker&, Node*, Graph&); Subflow() = delete; @@ -1492,8 +1485,6 @@ class Subflow : public FlowBuilder { Executor& _executor; Worker& _worker; Node* _parent; - - size_t _tag; }; // Constructor @@ -1501,25 +1492,18 @@ inline Subflow::Subflow(Executor& executor, Worker& worker, Node* parent, Graph& FlowBuilder {graph}, _executor {executor}, _worker {worker}, - _parent {parent}, - _tag {graph.size()} { - - // assert(_parent != nullptr); - // clear undetached nodes in reversed order - for(auto i = graph.rbegin(); i != graph.rend(); ++i) { - if((i->get()->_nstate & NSTATE::DETACHED) == 0) { - --_tag; - } - else { - break; - } - } - graph.resize(_tag); + _parent {parent} { + + // need to reset since there could have iterative control flow + _parent->_nstate &= ~(NSTATE::JOINED_SUBFLOW | NSTATE::RETAIN_SUBFLOW); + + // clear the graph + graph.clear(); } // Function: joinable inline bool Subflow::joinable() const noexcept { - return (_tag & JOINED_BIT) == 0; + return !(_parent->_nstate & NSTATE::JOINED_SUBFLOW); } // Function: executor @@ -1527,6 +1511,25 @@ inline Executor& Subflow::executor() noexcept { return _executor; } +// Function: retain +inline void Subflow::retain(bool flag) noexcept { + // default value is not to retain + if TF_LIKELY(flag == true) { + _parent->_nstate |= NSTATE::RETAIN_SUBFLOW; + } + else { + _parent->_nstate &= ~NSTATE::RETAIN_SUBFLOW; + } + + //_parent->_nstate = (_parent->_nstate & ~NSTATE::RETAIN_SUBFLOW) | + // (-static_cast(flag) & NSTATE::RETAIN_SUBFLOW); +} + +// Function: retain +inline bool Subflow::retain() const { + return _parent->_nstate & NSTATE::RETAIN_SUBFLOW; +} + } // end of namespace tf. --------------------------------------------------- diff --git a/bundled/taskflow-3.10.0/taskflow/core/freelist.hpp b/bundled/taskflow-3.10.0/taskflow/core/freelist.hpp index 658ff946b8..ab9431c3c7 100644 --- a/bundled/taskflow-3.10.0/taskflow/core/freelist.hpp +++ b/bundled/taskflow-3.10.0/taskflow/core/freelist.hpp @@ -10,41 +10,43 @@ namespace tf { template class Freelist { - struct Head { - std::mutex mutex; - UnboundedTaskQueue queue; - }; + friend class Executor; public: - - Freelist(size_t N) : _heads(N) {} - - void push(size_t w, T item) { - std::scoped_lock lock(_heads[w].mutex); - _heads[w].queue.push(item); + struct Bucket { + std::mutex mutex; + UnboundedTaskQueue queue; + }; + + // Here, we don't create just N task queues in the freelist as it will cause + // the work-stealing loop to spand a lot of time on stealing tasks. + // Experimentally speaking, we found floor_log2(N) is the best. + TF_FORCE_INLINE Freelist(size_t N) : _buckets(N < 4 ? 1 : floor_log2(N)) {} + + // Pointers are aligned to 8 bytes. We perform a simple hash to avoid contention caused + // by hashing to the same slot. + TF_FORCE_INLINE void push(T item) { + //auto b = reinterpret_cast(item) % _buckets.size(); + auto b = (reinterpret_cast(item) >> 16) % _buckets.size(); + std::scoped_lock lock(_buckets[b].mutex); + _buckets[b].queue.push(item); } - void push(T item) { - push(reinterpret_cast(item) % _heads.size(), item); + TF_FORCE_INLINE T steal(size_t w) { + return _buckets[w].queue.steal(); } - - T steal(size_t w) { - return _heads[w].queue.steal(); + + TF_FORCE_INLINE T steal_with_hint(size_t w, size_t& num_empty_steals) { + return _buckets[w].queue.steal_with_hint(num_empty_steals); } - - bool empty() const { - for(const auto& q : _heads) { - if(!q.queue.empty()) { - return false; - } - } - return true; + TF_FORCE_INLINE size_t size() const { + return _buckets.size(); } private: - std::vector _heads; + std::vector _buckets; }; diff --git a/bundled/taskflow-3.10.0/taskflow/core/graph.hpp b/bundled/taskflow-3.10.0/taskflow/core/graph.hpp index 2f6ed5d5a3..1f8e67c3d6 100644 --- a/bundled/taskflow-3.10.0/taskflow/core/graph.hpp +++ b/bundled/taskflow-3.10.0/taskflow/core/graph.hpp @@ -12,8 +12,7 @@ #include "../utility/math.hpp" #include "../utility/small_vector.hpp" #include "../utility/serializer.hpp" -#include "../utility/latch.hpp" -#include "../utility/mpmc.hpp" +#include "../utility/lazy_string.hpp" #include "error.hpp" #include "declarations.hpp" #include "semaphore.hpp" @@ -21,6 +20,7 @@ #include "topology.hpp" #include "tsq.hpp" + /** @file graph.hpp @brief graph include file @@ -284,30 +284,16 @@ class Node { Node() = default; - template - Node(const std::string&, Topology*, Node*, size_t, Args&&...); - - template - Node(nstate_t, estate_t, const std::string&, Topology*, Node*, size_t, Args&&...); - - template - Node(const TaskParams&, Topology*, Node*, size_t, Args&&...); - template Node(nstate_t, estate_t, const TaskParams&, Topology*, Node*, size_t, Args&&...); - template - Node(const DefaultTaskParams&, Topology*, Node*, size_t, Args&&...); - template Node(nstate_t, estate_t, const DefaultTaskParams&, Topology*, Node*, size_t, Args&&...); - //~Node(); - size_t num_successors() const; - size_t num_dependents() const; - size_t num_strong_dependents() const; - size_t num_weak_dependents() const; + size_t num_predecessors() const; + size_t num_strong_dependencies() const; + size_t num_weak_dependencies() const; const std::string& name() const; @@ -323,8 +309,8 @@ class Node { Topology* _topology {nullptr}; Node* _parent {nullptr}; - SmallVector _successors; - SmallVector _dependents; + size_t _num_successors {0}; + SmallVector _edges; std::atomic _join_counter {0}; @@ -334,9 +320,6 @@ class Node { std::exception_ptr _exception_ptr {nullptr}; - // free list - //Node* _freelist_next{nullptr}; - bool _is_cancelled() const; bool _is_conditioner() const; bool _is_preempted() const; @@ -345,6 +328,8 @@ class Node { void _precede(Node*); void _set_up_join_counter(); void _rethrow_exception(); + void _remove_successors(Node*); + void _remove_predecessors(Node*); }; // ---------------------------------------------------------------------------- @@ -457,59 +442,6 @@ Node::DependentAsync::DependentAsync(C&& c) : work {std::forward(c)} { // Definition for Node // ---------------------------------------------------------------------------- -// Constructor -template -Node::Node( - const std::string& name, - Topology* topology, - Node* parent, - size_t join_counter, - Args&&... args -) : - _name {name}, - _topology {topology}, - _parent {parent}, - _join_counter {join_counter}, - _handle {std::forward(args)...} { -} - -// Constructor -template -Node::Node( - nstate_t nstate, - estate_t estate, - const std::string& name, - Topology* topology, - Node* parent, - size_t join_counter, - Args&&... args -) : - _nstate {nstate}, - _estate {estate}, - _name {name}, - _topology {topology}, - _parent {parent}, - _join_counter {join_counter}, - _handle {std::forward(args)...} { -} - -// Constructor -template -Node::Node( - const TaskParams& params, - Topology* topology, - Node* parent, - size_t join_counter, - Args&&... args -) : - _name {params.name}, - _data {params.data}, - _topology {topology}, - _parent {parent}, - _join_counter {join_counter}, - _handle {std::forward(args)...} { -} - // Constructor template Node::Node( @@ -531,21 +463,6 @@ Node::Node( _handle {std::forward(args)...} { } -// Constructor -template -Node::Node( - const DefaultTaskParams&, - Topology* topology, - Node* parent, - size_t join_counter, - Args&&... args -) : - _topology {topology}, - _parent {parent}, - _join_counter {join_counter}, - _handle {std::forward(args)...} { -} - // Constructor template Node::Node( @@ -565,75 +482,64 @@ Node::Node( _handle {std::forward(args)...} { } -// Destructor -//inline Node::~Node() { -// // this is to avoid stack overflow -// if(_handle.index() == SUBFLOW) { -// auto& subgraph = std::get_if(&_handle)->subgraph; -// std::vector nodes; -// nodes.reserve(subgraph.size()); -// -// std::move( -// subgraph._nodes.begin(), subgraph._nodes.end(), std::back_inserter(nodes) -// ); -// subgraph._nodes.clear(); -// -// size_t i = 0; -// -// while(i < nodes.size()) { -// -// if(nodes[i]->_handle.index() == SUBFLOW) { -// auto& sbg = std::get_if(&(nodes[i]->_handle))->subgraph; -// std::move( -// sbg._nodes.begin(), sbg._nodes.end(), std::back_inserter(nodes) -// ); -// sbg._nodes.clear(); -// } -// -// ++i; -// } -// -// //auto& np = Graph::_node_pool(); -// for(i=0; iv +u successor layout: + s1, s2, s3, p1, p2, v (push_back v) + s1, s2, s3, v, p2, p1 (swap adj[num_successors] with adj[n-1]) +v predecessor layout: + s1, p1, p2, u (push_back u) +*/ inline void Node::_precede(Node* v) { - _successors.push_back(v); - v->_dependents.push_back(this); + _edges.push_back(v); + std::swap(_edges[_num_successors++], _edges[_edges.size() - 1]); + v->_edges.push_back(this); +} + +// Function: _remove_successors +inline void Node::_remove_successors(Node* node) { + auto sit = std::remove(_edges.begin(), _edges.begin() + _num_successors, node); + size_t new_num_successors = std::distance(_edges.begin(), sit); + std::move(_edges.begin() + _num_successors, _edges.end(), sit); + _edges.resize(_edges.size() - (_num_successors - new_num_successors)); + _num_successors = new_num_successors; +} + +// Function: _remove_predecessors +inline void Node::_remove_predecessors(Node* node) { + _edges.erase( + std::remove(_edges.begin() + _num_successors, _edges.end(), node), _edges.end() + ); } // Function: num_successors inline size_t Node::num_successors() const { - return _successors.size(); + return _num_successors; } -// Function: dependents -inline size_t Node::num_dependents() const { - return _dependents.size(); +// Function: predecessors +inline size_t Node::num_predecessors() const { + return _edges.size() - _num_successors; } -// Function: num_weak_dependents -inline size_t Node::num_weak_dependents() const { +// Function: num_weak_dependencies +inline size_t Node::num_weak_dependencies() const { size_t n = 0; - for(size_t i=0; i<_dependents.size(); i++) { - if(_dependents[i]->_is_conditioner()) { - n++; - } + for(size_t i=_num_successors; i<_edges.size(); i++) { + n += _edges[i]->_is_conditioner(); } return n; } -// Function: num_strong_dependents -inline size_t Node::num_strong_dependents() const { +// Function: num_strong_dependencies +inline size_t Node::num_strong_dependencies() const { size_t n = 0; - for(size_t i=0; i<_dependents.size(); i++) { - if(!_dependents[i]->_is_conditioner()) { - n++; - } + for(size_t i=_num_successors; i<_edges.size(); i++) { + n += !_edges[i]->_is_conditioner(); } return n; } @@ -665,14 +571,11 @@ inline bool Node::_is_cancelled() const { // Procedure: _set_up_join_counter inline void Node::_set_up_join_counter() { size_t c = 0; - for(auto p : _dependents) { - if(p->_is_conditioner()) { - //_nstate |= NSTATE::CONDITIONED; - _nstate = (_nstate + 1) | NSTATE::CONDITIONED; - } - else { - c++; - } + //for(auto p : _predecessors) { + for(size_t i=_num_successors; i<_edges.size(); i++) { + bool is_cond = _edges[i]->_is_conditioner(); + _nstate = (_nstate + is_cond) | (is_cond * NSTATE::CONDITIONED); // weak dependency + c += !is_cond; // strong dependency } _join_counter.store(c, std::memory_order_relaxed); } diff --git a/bundled/taskflow-3.10.0/taskflow/core/nonblocking_notifier.hpp b/bundled/taskflow-3.10.0/taskflow/core/nonblocking_notifier.hpp index bd356c6725..d29fff8be2 100644 --- a/bundled/taskflow-3.10.0/taskflow/core/nonblocking_notifier.hpp +++ b/bundled/taskflow-3.10.0/taskflow/core/nonblocking_notifier.hpp @@ -309,12 +309,35 @@ class NonblockingNotifierV1 { class NonblockingNotifierV2 { friend class Executor; + + // State_ layout: + // - low kWaiterBits is a stack of waiters committed wait + // (indexes in _waiters array are used as stack elements, + // kStackMask means empty stack). + // - next kWaiterBits is count of waiters in prewait state. + // - next kWaiterBits is count of pending signals. + // - remaining bits are ABA counter for the stack. + // (stored in Waiter node and incremented on push). + static const uint64_t kWaiterBits = 14; + static const uint64_t kStackMask = (1ull << kWaiterBits) - 1; + static const uint64_t kWaiterShift = kWaiterBits; + static const uint64_t kWaiterMask = ((1ull << kWaiterBits) - 1) << kWaiterShift; + static const uint64_t kWaiterInc = 1ull << kWaiterShift; + static const uint64_t kSignalShift = 2 * kWaiterBits; + static const uint64_t kSignalMask = ((1ull << kWaiterBits) - 1) << kSignalShift; + static const uint64_t kSignalInc = 1ull << kSignalShift; + static const uint64_t kEpochShift = 3 * kWaiterBits; + static const uint64_t kEpochBits = 64 - kEpochShift; + static const uint64_t kEpochMask = ((1ull << kEpochBits) - 1) << kEpochShift; + static const uint64_t kEpochInc = 1ull << kEpochShift; + + static_assert(kEpochBits >= 20, "not enough bits to prevent ABA problem"); public: struct Waiter { - alignas (2*TF_CACHELINE_SIZE) std::atomic next; - uint64_t epoch; + alignas (2*TF_CACHELINE_SIZE) std::atomic next{kStackMask}; + uint64_t epoch{0}; enum : unsigned { kNotSignaled = 0, kWaiting, @@ -322,11 +345,11 @@ class NonblockingNotifierV2 { }; #if __cplusplus >= TF_CPP20 - std::atomic state {0}; + std::atomic state {kNotSignaled}; #else std::mutex mu; std::condition_variable cv; - unsigned state; + unsigned state {kNotSignaled}; #endif }; @@ -342,14 +365,19 @@ class NonblockingNotifierV2 { // prepare_wait prepares for waiting. // After calling prepare_wait, the thread must re-check the wait predicate // and then call either cancel_wait or commit_wait. + //void prepare_wait(Waiter*) { + // uint64_t state = _state.load(std::memory_order_relaxed); + // for (;;) { + // //_check_state(state); + // uint64_t newstate = state + kWaiterInc; + // //_check_state(newstate); + // if (_state.compare_exchange_weak(state, newstate, std::memory_order_seq_cst)) return; + // } + //} + void prepare_wait(Waiter*) { - uint64_t state = _state.load(std::memory_order_relaxed); - for (;;) { - //_check_state(state); - uint64_t newstate = state + kWaiterInc; - //_check_state(newstate); - if (_state.compare_exchange_weak(state, newstate, std::memory_order_seq_cst)) return; - } + _state.fetch_add(kWaiterInc, std::memory_order_relaxed); + std::atomic_thread_fence(std::memory_order_seq_cst); } // commit_wait commits waiting after prepare_wait. @@ -424,28 +452,7 @@ class NonblockingNotifierV2 { } private: - // State_ layout: - // - low kWaiterBits is a stack of waiters committed wait - // (indexes in _waiters array are used as stack elements, - // kStackMask means empty stack). - // - next kWaiterBits is count of waiters in prewait state. - // - next kWaiterBits is count of pending signals. - // - remaining bits are ABA counter for the stack. - // (stored in Waiter node and incremented on push). - static const uint64_t kWaiterBits = 14; - static const uint64_t kStackMask = (1ull << kWaiterBits) - 1; - static const uint64_t kWaiterShift = kWaiterBits; - static const uint64_t kWaiterMask = ((1ull << kWaiterBits) - 1) << kWaiterShift; - static const uint64_t kWaiterInc = 1ull << kWaiterShift; - static const uint64_t kSignalShift = 2 * kWaiterBits; - static const uint64_t kSignalMask = ((1ull << kWaiterBits) - 1) << kSignalShift; - static const uint64_t kSignalInc = 1ull << kSignalShift; - static const uint64_t kEpochShift = 3 * kWaiterBits; - static const uint64_t kEpochBits = 64 - kEpochShift; - static const uint64_t kEpochMask = ((1ull << kEpochBits) - 1) << kEpochShift; - static const uint64_t kEpochInc = 1ull << kEpochShift; - - static_assert(kEpochBits >= 20, "not enough bits to prevent ABA problem"); + std::atomic _state; std::vector _waiters; diff --git a/bundled/taskflow-3.10.0/taskflow/core/observer.hpp b/bundled/taskflow-3.10.0/taskflow/core/observer.hpp index 3c1873efab..6e1bd03549 100644 --- a/bundled/taskflow-3.10.0/taskflow/core/observer.hpp +++ b/bundled/taskflow-3.10.0/taskflow/core/observer.hpp @@ -537,27 +537,27 @@ inline void TFProfObserver::Summary::dump_tsum(std::ostream& os) const { std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){ if(i.count == 0) return; - count_w = std::max(count_w, std::to_string(i.count).size()); + count_w = (std::max)(count_w, std::to_string(i.count).size()); }); std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){ if(i.count == 0) return; - time_w = std::max(time_w, std::to_string(i.total_span).size()); + time_w = (std::max)(time_w, std::to_string(i.total_span).size()); }); std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){ if(i.count == 0) return; - avg_w = std::max(time_w, std::to_string(i.avg_span()).size()); + avg_w = (std::max)(time_w, std::to_string(i.avg_span()).size()); }); std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){ if(i.count == 0) return; - min_w = std::max(min_w, std::to_string(i.min_span).size()); + min_w = (std::max)(min_w, std::to_string(i.min_span).size()); }); std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){ if(i.count == 0) return; - max_w = std::max(max_w, std::to_string(i.max_span).size()); + max_w = (std::max)(max_w, std::to_string(i.max_span).size()); }); os << std::setw(type_w) << "-Task-" @@ -590,32 +590,32 @@ inline void TFProfObserver::Summary::dump_wsum(std::ostream& os) const { std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){ if(i.count == 0) return; - l_w = std::max(l_w, std::to_string(i.level).size()); + l_w = (std::max)(l_w, std::to_string(i.level).size()); }); std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){ if(i.count == 0) return; - c_w = std::max(c_w, std::to_string(i.count).size()); + c_w = (std::max)(c_w, std::to_string(i.count).size()); }); std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){ if(i.count == 0) return; - d_w = std::max(d_w, std::to_string(i.total_span).size()); + d_w = (std::max)(d_w, std::to_string(i.total_span).size()); }); std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){ if(i.count == 0) return; - avg_w = std::max(avg_w, std::to_string(i.avg_span()).size()); + avg_w = (std::max)(avg_w, std::to_string(i.avg_span()).size()); }); std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){ if(i.count == 0) return; - min_w = std::max(min_w, std::to_string(i.min_span).size()); + min_w = (std::max)(min_w, std::to_string(i.min_span).size()); }); std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){ if(i.count == 0) return; - max_w = std::max(max_w, std::to_string(i.max_span).size()); + max_w = (std::max)(max_w, std::to_string(i.max_span).size()); }); os << std::setw(w_w) << "-Worker-" @@ -840,8 +840,8 @@ inline void TFProfObserver::summary(std::ostream& os) const { // update the entire span auto& s = _timeline.segments[w][l][i]; - view_beg = view_beg ? std::min(*view_beg, s.beg) : s.beg; - view_end = view_end ? std::max(*view_end, s.end) : s.end; + view_beg = view_beg ? (std::min)(*view_beg, s.beg) : s.beg; + view_end = view_end ? (std::max)(*view_end, s.end) : s.end; // update the task summary size_t t = duration_cast(s.end - s.beg).count(); @@ -849,19 +849,19 @@ inline void TFProfObserver::summary(std::ostream& os) const { auto& x = summary.tsum[static_cast(s.type)]; x.count += 1; x.total_span += t; - x.min_span = (x.count == 1) ? t : std::min(t, x.min_span); - x.max_span = (x.count == 1) ? t : std::max(t, x.max_span); + x.min_span = (x.count == 1) ? t : (std::min)(t, x.min_span); + x.max_span = (x.count == 1) ? t : (std::max)(t, x.max_span); // update the worker summary ws.total_span += t; - ws.min_span = (i == 0) ? t : std::min(t, ws.min_span); - ws.max_span = (i == 0) ? t : std::max(t, ws.max_span); + ws.min_span = (i == 0) ? t : (std::min)(t, ws.min_span); + ws.max_span = (i == 0) ? t : (std::max)(t, ws.max_span); auto&y = ws.tsum[static_cast(s.type)]; y.count += 1; y.total_span += t; - y.min_span = (y.count == 1) ? t : std::min(t, y.min_span); - y.max_span = (y.count == 1) ? t : std::max(t, y.max_span); + y.min_span = (y.count == 1) ? t : (std::min)(t, y.min_span); + y.max_span = (y.count == 1) ? t : (std::max)(t, y.max_span); // update the delay //if(i) { diff --git a/bundled/taskflow-3.10.0/taskflow/core/runtime.hpp b/bundled/taskflow-3.10.0/taskflow/core/runtime.hpp index 2634657af1..ad9a3fbdcc 100644 --- a/bundled/taskflow-3.10.0/taskflow/core/runtime.hpp +++ b/bundled/taskflow-3.10.0/taskflow/core/runtime.hpp @@ -11,7 +11,7 @@ namespace tf { A runtime object allows users to interact with the scheduling runtime inside a task, such as scheduling an active task, -spawning a subflow, and so on. +spawning an asynchronous task, corunning a graph target, and so on. @code{.cpp} tf::Task A, B, C, D; @@ -39,9 +39,9 @@ class Runtime { friend class PreemptionGuard; friend class Algorithm; - #define TF_RUNTIME_CHECK_CALLER(msg) \ - if(pt::this_worker == nullptr || pt::this_worker->_executor != &_executor) { \ - TF_THROW(msg); \ + #define TF_RUNTIME_CHECK_CALLER(msg) \ + if(pt::this_worker != &_worker) { \ + TF_THROW(msg); \ } public: @@ -102,6 +102,10 @@ class Runtime { going through the normal taskflow graph scheduling process. At this moment, task @c C is active because its parent taskflow is running. When the taskflow finishes, we will see both @c B and @c C in the output. + + @attention + This method can only be called by the parent worker of this runtime, + or the behavior is undefined. */ void schedule(Task task); @@ -115,7 +119,7 @@ class Runtime { function on the given arguments. The difference to tf::Executor::async is that the created asynchronous task pertains to the runtime object. - Applications can explicitly issue tf::Runtime::corun_all + Applications can explicitly issue tf::Runtime::corun to wait for all spawned asynchronous tasks to finish. For example: @@ -130,11 +134,11 @@ class Runtime { // spawn 100 asynchronous tasks from the worker of the runtime for(int i=0; i<100; i++) { - rt.async([&](){ counter++; }); + rt.silent_async([&](){ counter++; }); } // wait for the 100 asynchronous tasks to finish - rt.corun_all(); + rt.corun(); assert(counter == 102); }); @endcode @@ -158,7 +162,7 @@ class Runtime { } // wait for the 200 asynchronous tasks to finish - rt.corun_all(); + rt.corun(); assert(counter == 200); }); @endcode @@ -203,7 +207,7 @@ class Runtime { for(int i=0; i<100; i++) { rt.silent_async([&](){ counter++; }); } - rt.corun_all(); + rt.corun(); assert(counter == 100); }); @endcode @@ -225,7 +229,7 @@ class Runtime { @code{.cpp} taskflow.emplace([&](tf::Runtime& rt){ rt.silent_async("my task", [](){}); - rt.corun_all(); + rt.corun(); }); @endcode */ @@ -254,17 +258,16 @@ class Runtime { and returns when all tasks in the target completes. @attention - The method is not thread-safe as it modifies the anchor state of the node for exception handling. + This method can only be called by the parent worker of this runtime, + or the behavior is undefined. */ template void corun(T&& target); /** - @brief corun all asynchronous tasks spawned by this runtime with other workers + @brief corun all tasks spawned by this runtime with other workers - Coruns all asynchronous tasks (tf::Runtime::async, - tf::Runtime::silent_async) with other workers until all those - asynchronous tasks finish. + Coruns all tasks spawned by this runtime with other workers until all these tasks finish. @code{.cpp} std::atomic counter{0}; @@ -273,25 +276,35 @@ class Runtime { for(int i=0; i<100; i++) { rt.silent_async([&](){ counter++; }); } - rt.corun_all(); + rt.corun(); assert(counter == 100); // spawn another 100 async tasks and wait for(int i=0; i<100; i++) { rt.silent_async([&](){ counter++; }); } - rt.corun_all(); + rt.corun(); assert(counter == 200); }); @endcode @attention - The method is not thread-safe as it modifies the anchor state of the node for exception handling. + This method can only be called by the parent worker of this runtime, + or the behavior is undefined. */ - inline void corun_all(); + void corun(); - protected: - + /** + @brief equivalent to tf::Runtime::corun - just an alias for legacy purpose + */ + void corun_all(); + + /** + @brief This method verifies if the task has been cancelled. + */ + bool is_cancelled(); + +protected: /** @private */ @@ -338,8 +351,6 @@ inline Worker& Runtime::worker() { // Procedure: schedule inline void Runtime::schedule(Task task) { - TF_RUNTIME_CHECK_CALLER("schedule must be called by a worker of runtime's executor"); - auto node = task._node; // need to keep the invariant: when scheduling a task, the task must have // zero dependency (join counter is 0) @@ -355,16 +366,12 @@ inline void Runtime::schedule(Task task) { // Procedure: corun template void Runtime::corun(T&& target) { - static_assert(has_graph_v, "target must define a member function 'Graph& graph()'"); - - TF_RUNTIME_CHECK_CALLER("corun must be called by a worker of runtime's executor"); _executor._corun_graph(*pt::this_worker, _parent, target.graph().begin(), target.graph().end()); } -// Function: corun_all -inline void Runtime::corun_all() { - TF_RUNTIME_CHECK_CALLER("corun_all must be called by a worker of runtime's executor"); +// Function: corun +inline void Runtime::corun() { { AnchorGuard anchor(_parent); _executor._corun_until(_worker, [this] () -> bool { @@ -374,6 +381,13 @@ inline void Runtime::corun_all() { _parent->_rethrow_exception(); } +// Function: corun_all +inline void Runtime::corun_all() { + corun(); +} + +inline bool Runtime::is_cancelled() { return _parent->_is_cancelled(); } + // ------------------------------------ // Runtime::silent_async series // ------------------------------------ @@ -433,6 +447,7 @@ class PreemptionGuard { } ~PreemptionGuard() { + // If I am the last to join, then there is not need to preempt the runtime if(_runtime._parent->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1) { _runtime._preempted = false; _runtime._parent->_nstate &= ~NSTATE::PREEMPTED; @@ -506,6 +521,9 @@ inline bool Executor::_invoke_runtime_task_impl( _observer_epilogue(worker, node); // here, we cannot check the state from node->_nstate due to data race + // Ex: if preempted, another task may finish real quck and insert this parent task + // again into the scheduling queue. When running this parent task, it will jump to + // else branch below and modify tne nstate, thus incuring data race. if(rt._preempted) { return true; } @@ -522,75 +540,7 @@ inline bool Executor::_invoke_runtime_task_impl( } -// ---------------------------------------------------------------------------- -// Executor Members that Depend on Runtime -// ---------------------------------------------------------------------------- - -template -auto Executor::_async(P&& params, F&& f, Topology* tpg, Node* parent) { - - // async task with runtime: [] (tf::Runtime&) { ... } - if constexpr (is_runtime_task_v) { - std::promise p; - auto fu{p.get_future()}; - - _schedule_async_task(animate( - NSTATE::NONE, ESTATE::ANCHORED, std::forward

(params), tpg, parent, 0, - std::in_place_type_t{}, - [p=MoC{std::move(p)}, f=std::forward(f)](Runtime& rt, bool reentered) mutable { - if(!reentered) { - f(rt); - } - else { - auto& eptr = rt._parent->_exception_ptr; - eptr ? p.object.set_exception(eptr) : p.object.set_value(); - } - } - )); - return fu; - } - // async task with closure: [] () { ... } - else if constexpr (std::is_invocable_v){ - using R = std::invoke_result_t; - std::packaged_task p(std::forward(f)); - auto fu{p.get_future()}; - _schedule_async_task(animate( - std::forward

(params), tpg, parent, 0, - std::in_place_type_t{}, - [p=make_moc(std::move(p))]() mutable { p.object(); } - )); - return fu; - } - else { - static_assert(dependent_false_v, - "invalid async target - must be one of the following types:\n\ - (1) [] (tf::Runtime&) -> void {}\n\ - (2) [] () -> auto { ... return ... }\n" - ); - } - -} - -// Function: _silent_async -template -void Executor::_silent_async(P&& params, F&& f, Topology* tpg, Node* parent) { - // silent task - if constexpr (is_runtime_task_v || std::is_invocable_v) { - _schedule_async_task(animate( - std::forward

(params), tpg, parent, 0, - std::in_place_type_t{}, std::forward(f) - )); - } - // invalid silent async target - else { - static_assert(dependent_false_v, - "invalid silent_async target - must be one of the following types:\n\ - (1) [] (tf::Runtime&) -> void {}\n\ - (2) [] () -> auto { ... return ... }\n" - ); - } -} } // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-3.10.0/taskflow/core/task.hpp b/bundled/taskflow-3.10.0/taskflow/core/task.hpp index 46a15b95df..da0c158044 100644 --- a/bundled/taskflow-3.10.0/taskflow/core/task.hpp +++ b/bundled/taskflow-3.10.0/taskflow/core/task.hpp @@ -85,51 +85,102 @@ inline const char* to_string(TaskType type) { } // ---------------------------------------------------------------------------- -// Task Traits +// Static Task Trait // ---------------------------------------------------------------------------- /** -@brief determines if a callable is a dynamic task +@private +*/ +template +struct is_static_task : std::false_type {}; -A dynamic task is a callable object constructible from std::function. +/** +@private */ template -constexpr bool is_subflow_task_v = std::is_invocable_r_v; +struct is_static_task>> + : std::is_same, void> {}; /** -@brief determines if a callable is a condition task +@brief determines if a callable is a static task -A condition task is a callable object constructible from std::function. +A static task is a callable object constructible from std::function. */ template -constexpr bool is_condition_task_v = std::is_invocable_r_v; +constexpr bool is_static_task_v = is_static_task::value; + +// ---------------------------------------------------------------------------- +// Subflow Task Trait +// ---------------------------------------------------------------------------- /** -@brief determines if a callable is a multi-condition task +@private +*/ +template +struct is_subflow_task : std::false_type {}; -A multi-condition task is a callable object constructible from -std::function()>. +/** +@private */ template -constexpr bool is_multi_condition_task_v = std::is_invocable_r_v, C>; +struct is_subflow_task>> + : std::is_same, void> {}; /** -@brief determines if a callable is a static task +@brief determines if a callable is a subflow task -A static task is a callable object constructible from std::function. +A subflow task is a callable object constructible from std::function. +*/ +template +constexpr bool is_subflow_task_v = is_subflow_task::value; + +// ---------------------------------------------------------------------------- +// Runtime Task Trait +// ---------------------------------------------------------------------------- + +/** +@private +*/ +template +struct is_runtime_task : std::false_type {}; + +/** +@private */ template -constexpr bool is_static_task_v = std::is_invocable_r_v && - !is_condition_task_v && - !is_multi_condition_task_v; +struct is_runtime_task>> + : std::is_same, void> {}; /** @brief determines if a callable is a runtime task -A runtime task is a callable object constructible from std::function. +A runtime task is a callable object constructible from std::function. +*/ +template +constexpr bool is_runtime_task_v = is_runtime_task::value; + + +// ---------------------------------------------------------------------------- +// Condition Task Trait +// ---------------------------------------------------------------------------- + +/** +@brief determines if a callable is a condition task + +A condition task is a callable object constructible from std::function. +*/ +template +constexpr bool is_condition_task_v = std::is_invocable_r_v; + +/** +@brief determines if a callable is a multi-condition task + +A multi-condition task is a callable object constructible from +std::function()>. */ template -constexpr bool is_runtime_task_v = std::is_invocable_r_v; +constexpr bool is_multi_condition_task_v = std::is_invocable_r_v, C>; + // ---------------------------------------------------------------------------- // Task @@ -200,17 +251,17 @@ class Task { /** @brief queries the number of predecessors of the task */ - size_t num_dependents() const; + size_t num_predecessors() const; /** @brief queries the number of strong dependents of the task */ - size_t num_strong_dependents() const; + size_t num_strong_dependencies() const; /** @brief queries the number of weak dependents of the task */ - size_t num_weak_dependents() const; + size_t num_weak_dependencies() const; /** @brief assigns a name to the task @@ -351,7 +402,7 @@ class Task { @brief applies an visitor callable to each dependents of the task */ template - void for_each_dependent(V&& visitor) const; + void for_each_predecessor(V&& visitor) const; /** @brief obtains a hash value of the underlying node @@ -503,19 +554,19 @@ inline const std::string& Task::name() const { return _node->_name; } -// Function: num_dependents -inline size_t Task::num_dependents() const { - return _node->num_dependents(); +// Function: num_predecessors +inline size_t Task::num_predecessors() const { + return _node->num_predecessors(); } -// Function: num_strong_dependents -inline size_t Task::num_strong_dependents() const { - return _node->num_strong_dependents(); +// Function: num_strong_dependencies +inline size_t Task::num_strong_dependencies() const { + return _node->num_strong_dependencies(); } -// Function: num_weak_dependents -inline size_t Task::num_weak_dependents() const { - return _node->num_weak_dependents(); +// Function: num_weak_dependencies +inline size_t Task::num_weak_dependencies() const { + return _node->num_weak_dependencies(); } // Function: num_successors @@ -552,16 +603,16 @@ inline TaskType Task::type() const { // Function: for_each_successor template void Task::for_each_successor(V&& visitor) const { - for(size_t i=0; i<_node->_successors.size(); ++i) { - visitor(Task(_node->_successors[i])); + for(size_t i=0; i<_node->_num_successors; ++i) { + visitor(Task(_node->_edges[i])); } } -// Function: for_each_dependent +// Function: for_each_predecessor template -void Task::for_each_dependent(V&& visitor) const { - for(size_t i=0; i<_node->_dependents.size(); ++i) { - visitor(Task(_node->_dependents[i])); +void Task::for_each_predecessor(V&& visitor) const { + for(size_t i=_node->_num_successors; i<_node->_edges.size(); ++i) { + visitor(Task(_node->_edges[i])); } } @@ -654,17 +705,17 @@ class TaskView { /** @brief queries the number of predecessors of the task */ - size_t num_dependents() const; + size_t num_predecessors() const; /** @brief queries the number of strong dependents of the task */ - size_t num_strong_dependents() const; + size_t num_strong_dependencies() const; /** @brief queries the number of weak dependents of the task */ - size_t num_weak_dependents() const; + size_t num_weak_dependencies() const; /** @brief applies an visitor callable to each successor of the task @@ -676,7 +727,7 @@ class TaskView { @brief applies an visitor callable to each dependents of the task */ template - void for_each_dependent(V&& visitor) const; + void for_each_predecessor(V&& visitor) const; /** @brief queries the task type @@ -705,19 +756,19 @@ inline const std::string& TaskView::name() const { return _node._name; } -// Function: num_dependents -inline size_t TaskView::num_dependents() const { - return _node.num_dependents(); +// Function: num_predecessors +inline size_t TaskView::num_predecessors() const { + return _node.num_predecessors(); } -// Function: num_strong_dependents -inline size_t TaskView::num_strong_dependents() const { - return _node.num_strong_dependents(); +// Function: num_strong_dependencies +inline size_t TaskView::num_strong_dependencies() const { + return _node.num_strong_dependencies(); } -// Function: num_weak_dependents -inline size_t TaskView::num_weak_dependents() const { - return _node.num_weak_dependents(); +// Function: num_weak_dependencies +inline size_t TaskView::num_weak_dependencies() const { + return _node.num_weak_dependencies(); } // Function: num_successors @@ -749,17 +800,23 @@ inline size_t TaskView::hash_value() const { // Function: for_each_successor template void TaskView::for_each_successor(V&& visitor) const { - for(size_t i=0; i<_node._successors.size(); ++i) { - visitor(TaskView(*_node._successors[i])); + for(size_t i=0; i<_node._num_successors; ++i) { + visitor(TaskView(*_node._edges[i])); } + //for(size_t i=0; i<_node._successors.size(); ++i) { + // visitor(TaskView(*_node._successors[i])); + //} } -// Function: for_each_dependent +// Function: for_each_predecessor template -void TaskView::for_each_dependent(V&& visitor) const { - for(size_t i=0; i<_node._dependents.size(); ++i) { - visitor(TaskView(*_node._dependents[i])); +void TaskView::for_each_predecessor(V&& visitor) const { + for(size_t i=_node._num_successors; i<_node._edges.size(); ++i) { + visitor(TaskView(*_node._edges[i])); } + //for(size_t i=0; i<_node._predecessors.size(); ++i) { + // visitor(TaskView(*_node._predecessors[i])); + //} } } // end of namespace tf. ---------------------------------------------------- diff --git a/bundled/taskflow-3.10.0/taskflow/core/taskflow.hpp b/bundled/taskflow-3.10.0/taskflow/core/taskflow.hpp index 09d4ffdc04..30d131a9d1 100644 --- a/bundled/taskflow-3.10.0/taskflow/core/taskflow.hpp +++ b/bundled/taskflow-3.10.0/taskflow/core/taskflow.hpp @@ -264,13 +264,13 @@ class Taskflow : public FlowBuilder { a.precede(b, c, d); assert(a.num_successors() == 3); - assert(b.num_dependents() == 1); - assert(c.num_dependents() == 1); - assert(d.num_dependents() == 1); + assert(b.num_predecessors() == 1); + assert(c.num_predecessors() == 1); + assert(d.num_predecessors() == 1); taskflow.remove_dependency(a, b); assert(a.num_successors() == 2); - assert(b.num_dependents() == 0); + assert(b.num_predecessors() == 0); @endcode */ inline void remove_dependency(Task from, Task to); @@ -376,17 +376,11 @@ void Taskflow::for_each_task(V&& visitor) const { // Procedure: remove_dependency inline void Taskflow::remove_dependency(Task from, Task to) { - from._node->_successors.erase(std::remove_if( - from._node->_successors.begin(), from._node->_successors.end(), [&](Node* i){ - return i == to._node; - } - ), from._node->_successors.end()); - - to._node->_dependents.erase(std::remove_if( - to._node->_dependents.begin(), to._node->_dependents.end(), [&](Node* i){ - return i == from._node; - } - ), to._node->_dependents.end()); + // remove "to" from the succcessor list of "from" + from._node->_remove_successors(to._node); + + // remove "from" from the predecessor list of "to" + to._node->_remove_predecessors(from._node); } // Procedure: dump @@ -462,19 +456,19 @@ inline void Taskflow::_dump( os << "];\n"; - for(size_t s=0; s_successors.size(); ++s) { + for(size_t s=0; s_num_successors; ++s) { if(node->_is_conditioner()) { // case edge is dashed - os << 'p' << node << " -> p" << node->_successors[s] + os << 'p' << node << " -> p" << node->_edges[s] << " [style=dashed label=\"" << s << "\"];\n"; } else { - os << 'p' << node << " -> p" << node->_successors[s] << ";\n"; + os << 'p' << node << " -> p" << node->_edges[s] << ";\n"; } } // subflow join node if(node->_parent && node->_parent->_handle.index() == Node::SUBFLOW && - node->_successors.size() == 0 + node->_num_successors == 0 ) { os << 'p' << node << " -> p" << node->_parent << " [style=dashed color=blue];\n"; } @@ -530,8 +524,9 @@ inline void Taskflow::_dump( os << " [m" << dumper.visited[module] << "]\"];\n"; - for(const auto s : n->_successors) { - os << 'p' << n << "->" << 'p' << s << ";\n"; + //for(const auto s : n->_successors) { + for(size_t i=0; i_num_successors; ++i) { + os << 'p' << n << "->" << 'p' << n->_edges[i] << ";\n"; } } } diff --git a/bundled/taskflow-3.10.0/taskflow/core/tsq.hpp b/bundled/taskflow-3.10.0/taskflow/core/tsq.hpp index 10e9e12eb2..220be052ee 100644 --- a/bundled/taskflow-3.10.0/taskflow/core/tsq.hpp +++ b/bundled/taskflow-3.10.0/taskflow/core/tsq.hpp @@ -155,6 +155,19 @@ class UnboundedTaskQueue { */ T steal(); + /** + @brief attempts to steal a task with a hint mechanism + + @param num_empty_steals a reference to a counter tracking consecutive empty steal attempts + + This function tries to steal a task from the queue. If the steal attempt + is successful, the stolen task is returned. + Additionally, if the queue is empty, the provided counter `num_empty_steals` is incremented; + otherwise, `num_empty_steals` is reset to zero. + + */ + T steal_with_hint(size_t& num_empty_steals); + private: Array* resize_array(Array* a, int64_t b, int64_t t); @@ -181,16 +194,16 @@ UnboundedTaskQueue::~UnboundedTaskQueue() { // Function: empty template bool UnboundedTaskQueue::empty() const noexcept { - int64_t b = _bottom.load(std::memory_order_relaxed); int64_t t = _top.load(std::memory_order_relaxed); + int64_t b = _bottom.load(std::memory_order_relaxed); return (b <= t); } // Function: size template size_t UnboundedTaskQueue::size() const noexcept { - int64_t b = _bottom.load(std::memory_order_relaxed); int64_t t = _top.load(std::memory_order_relaxed); + int64_t b = _bottom.load(std::memory_order_relaxed); return static_cast(b >= t ? b - t : 0); } @@ -202,7 +215,7 @@ void UnboundedTaskQueue::push(T o) { int64_t t = _top.load(std::memory_order_acquire); Array* a = _array.load(std::memory_order_relaxed); - // queue is full + // queue is full with one additional item (b-t+1) if TF_UNLIKELY(a->capacity() - 1 < (b - t)) { a = resize_array(a, b, t); } @@ -268,6 +281,32 @@ T UnboundedTaskQueue::steal() { return item; } +// Function: steal +template +T UnboundedTaskQueue::steal_with_hint(size_t& num_empty_steals) { + + int64_t t = _top.load(std::memory_order_acquire); + std::atomic_thread_fence(std::memory_order_seq_cst); + int64_t b = _bottom.load(std::memory_order_acquire); + + T item {nullptr}; + + if(t < b) { + num_empty_steals = 0; + Array* a = _array.load(std::memory_order_consume); + item = a->pop(t); + if(!_top.compare_exchange_strong(t, t+1, + std::memory_order_seq_cst, + std::memory_order_relaxed)) { + return nullptr; + } + } + else { + ++num_empty_steals; + } + return item; +} + // Function: capacity template int64_t UnboundedTaskQueue::capacity() const noexcept { @@ -278,13 +317,21 @@ template typename UnboundedTaskQueue::Array* UnboundedTaskQueue::resize_array(Array* a, int64_t b, int64_t t) { + //Array* tmp = a->resize(b, t); + //_garbage.push_back(a); + //std::swap(a, tmp); + //_array.store(a, std::memory_order_release); + //// Note: the original paper using relaxed causes t-san to complain + ////_array.store(a, std::memory_order_relaxed); + //return a; + + Array* tmp = a->resize(b, t); _garbage.push_back(a); - std::swap(a, tmp); - _array.store(a, std::memory_order_release); + _array.store(tmp, std::memory_order_release); // Note: the original paper using relaxed causes t-san to complain //_array.store(a, std::memory_order_relaxed); - return a; + return tmp; } // ---------------------------------------------------------------------------- @@ -366,7 +413,7 @@ class BoundedTaskQueue { @tparam O data type @tparam C callable type @param item the item to perfect-forward to the queue - @param on_full callable to invoke when the queue is faull (insertion fails) + @param on_full callable to invoke when the queue is full (insertion fails) Only the owner thread can insert an item to the queue. @@ -378,7 +425,7 @@ class BoundedTaskQueue { @brief pops out an item from the queue Only the owner thread can pop out an item from the queue. - The return can be a @std_nullopt if this operation failed (empty queue). + The return can be a `nullptr` if this operation failed (empty queue). */ T pop(); @@ -386,24 +433,36 @@ class BoundedTaskQueue { @brief steals an item from the queue Any threads can try to steal an item from the queue. - The return can be a @std_nullopt if this operation failed (not necessary empty). + The return can be a `nullptr` if this operation failed (not necessary empty). */ T steal(); + + /** + @brief attempts to steal a task with a hint mechanism + + @param num_empty_steals a reference to a counter tracking consecutive empty steal attempts + + This function tries to steal a task from the queue. If the steal attempt + is successful, the stolen task is returned. + Additionally, if the queue is empty, the provided counter `num_empty_steals` is incremented; + otherwise, `num_empty_steals` is reset to zero. + */ + T steal_with_hint(size_t& num_empty_steals); }; // Function: empty template bool BoundedTaskQueue::empty() const noexcept { - int64_t b = _bottom.load(std::memory_order_relaxed); int64_t t = _top.load(std::memory_order_relaxed); + int64_t b = _bottom.load(std::memory_order_relaxed); return b <= t; } // Function: size template size_t BoundedTaskQueue::size() const noexcept { - int64_t b = _bottom.load(std::memory_order_relaxed); int64_t t = _top.load(std::memory_order_relaxed); + int64_t b = _bottom.load(std::memory_order_relaxed); return static_cast(b >= t ? b - t : 0); } @@ -415,8 +474,8 @@ bool BoundedTaskQueue::try_push(O&& o) { int64_t b = _bottom.load(std::memory_order_relaxed); int64_t t = _top.load(std::memory_order_acquire); - // queue is full - if TF_UNLIKELY((b - t) >= BufferSize - 1) { + // queue is full with one additional item (b-t+1) + if TF_UNLIKELY((b - t) > BufferSize - 1) { return false; } @@ -438,8 +497,8 @@ void BoundedTaskQueue::push(O&& o, C&& on_full) { int64_t b = _bottom.load(std::memory_order_relaxed); int64_t t = _top.load(std::memory_order_acquire); - // queue is full - if TF_UNLIKELY((b - t) >= BufferSize - 1) { + // queue is full with one additional item (b-t+1) + if TF_UNLIKELY((b - t) > BufferSize - 1) { on_full(); return; } @@ -503,10 +562,34 @@ T BoundedTaskQueue::steal() { return item; } +// Function: steal +template +T BoundedTaskQueue::steal_with_hint(size_t& num_empty_steals) { + int64_t t = _top.load(std::memory_order_acquire); + std::atomic_thread_fence(std::memory_order_seq_cst); + int64_t b = _bottom.load(std::memory_order_acquire); + + T item {nullptr}; + + if(t < b) { + num_empty_steals = 0; + item = _buffer[t & BufferMask].load(std::memory_order_relaxed); + if(!_top.compare_exchange_strong(t, t+1, + std::memory_order_seq_cst, + std::memory_order_relaxed)) { + return nullptr; + } + } + else { + ++num_empty_steals; + } + return item; +} + // Function: capacity template constexpr size_t BoundedTaskQueue::capacity() const { - return static_cast(BufferSize - 1); + return static_cast(BufferSize); } @@ -725,3 +808,4 @@ constexpr size_t BoundedTaskQueue::capacity() const { } // end of namespace tf ----------------------------------------------------- + diff --git a/bundled/taskflow-3.10.0/taskflow/core/worker.hpp b/bundled/taskflow-3.10.0/taskflow/core/worker.hpp index 7b13a64fe9..174a50e6f7 100644 --- a/bundled/taskflow-3.10.0/taskflow/core/worker.hpp +++ b/bundled/taskflow-3.10.0/taskflow/core/worker.hpp @@ -17,14 +17,22 @@ namespace tf { // Default Notifier // ---------------------------------------------------------------------------- + /** @private */ #ifdef TF_ENABLE_ATOMIC_NOTIFIER - using DefaultNotifier = AtomicNotifierV2; -#else - //using DefaultNotifier = AtomicNotifierV2; + using DefaultNotifier = AtomicNotifier; +#elif TF_ENABLE_NONBLOCKING_NOTIFIER_V1 + using DefaultNotifier = NonblockingNotifierV1; +#elif TF_ENABLE_NONBLOCKING_NOTIFIER_V2 using DefaultNotifier = NonblockingNotifierV2; +#else + #if __cplusplus >= TF_CPP20 + using DefaultNotifier = AtomicNotifier; + #else + using DefaultNotifier = NonblockingNotifierV2; + #endif #endif // ---------------------------------------------------------------------------- @@ -80,15 +88,29 @@ class Worker { std::thread& thread() { return _thread; } private: + + #if __cplusplus >= TF_CPP20 + std::atomic_flag _done = ATOMIC_FLAG_INIT; + #else + std::atomic _done {false}; + #endif size_t _id; size_t _vtm; Executor* _executor {nullptr}; DefaultNotifier::Waiter* _waiter; - std::default_random_engine _rdgen; - std::uniform_int_distribution _rdvtm; std::thread _thread; + + std::default_random_engine _rdgen; + //std::uniform_int_distribution _udist; + BoundedTaskQueue _wsq; + + //TF_FORCE_INLINE size_t _rdvtm() { + // auto r = _udist(_rdgen); + // return r + (r >= _id); + //} + }; diff --git a/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/for_each.hpp b/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/for_each.hpp index 38a6f85977..551cca178d 100644 --- a/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/for_each.hpp +++ b/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/for_each.hpp @@ -14,12 +14,12 @@ namespace detail { /** @private */ -template +template __global__ void cuda_for_each_kernel(I first, unsigned count, C c) { - auto tid = threadIdx.x; - auto bid = blockIdx.x; - auto tile = cuda_get_tile(bid, nt*vt, count); - cuda_strided_iterate( + auto tid = threadIdx.x; + auto bid = blockIdx.x; + auto tile = cuda_get_tile(bid, E::nv, count); + cuda_strided_iterate( [=](auto, auto j) { c(*(first + tile.begin + j)); }, @@ -28,12 +28,12 @@ __global__ void cuda_for_each_kernel(I first, unsigned count, C c) { } /** @private */ -template +template __global__ void cuda_for_each_index_kernel(I first, I inc, unsigned count, C c) { auto tid = threadIdx.x; auto bid = blockIdx.x; - auto tile = cuda_get_tile(bid, nt*vt, count); - cuda_strided_iterate( + auto tile = cuda_get_tile(bid, E::nv, count); + cuda_strided_iterate( [=]__device__(auto, auto j) { c(first + inc*(tile.begin+j)); }, @@ -43,268 +43,62 @@ __global__ void cuda_for_each_index_kernel(I first, I inc, unsigned count, C c) } // end of namespace detail ------------------------------------------------- -// ---------------------------------------------------------------------------- -// cuda standard algorithms: single_task/for_each/for_each_index -// ---------------------------------------------------------------------------- - -/** -@brief runs a callable asynchronously using one kernel thread - -@tparam P execution policy type -@tparam C closure type - -@param p execution policy -@param c closure to run by one kernel thread - -The function launches a single kernel thread to run the given callable -through the stream in the execution policy object. -*/ -template -void cuda_single_task(P&& p, C c) { - cuda_kernel<<<1, 1, 0, p.stream()>>>( - [=]__device__(auto, auto) mutable { c(); } - ); -} - -/** -@brief performs asynchronous parallel iterations over a range of items - -@tparam P execution policy type -@tparam I input iterator type -@tparam C unary operator type - -@param p execution policy object -@param first iterator to the beginning of the range -@param last iterator to the end of the range -@param c unary operator to apply to each dereferenced iterator - -This function is equivalent to a parallel execution of the following loop -on a GPU: - -@code{.cpp} -for(auto itr = first; itr != last; itr++) { - c(*itr); -} -@endcode -*/ -template -void cuda_for_each(P&& p, I first, I last, C c) { - - using E = std::decay_t

; - - unsigned count = std::distance(first, last); - - if(count == 0) { - return; - } - - detail::cuda_for_each_kernel<<>>( - first, count, c - ); -} - -/** -@brief performs asynchronous parallel iterations over - an index-based range of items - -@tparam P execution policy type -@tparam I input index type -@tparam C unary operator type - -@param p execution policy object -@param first index to the beginning of the range -@param last index to the end of the range -@param inc step size between successive iterations -@param c unary operator to apply to each index - -This function is equivalent to a parallel execution of -the following loop on a GPU: - -@code{.cpp} -// step is positive [first, last) -for(auto i=first; ilast; i+=step) { - c(i); -} -@endcode -*/ -template -void cuda_for_each_index(P&& p, I first, I last, I inc, C c) { - - using E = std::decay_t

; - - unsigned count = distance(first, last, inc); - - if(count == 0) { - return; - } - - detail::cuda_for_each_index_kernel<<>>( - first, inc, count, c - ); -} - -// ---------------------------------------------------------------------------- -// single_task -// ---------------------------------------------------------------------------- - -/** @private */ -template -__global__ void cuda_single_task(C callable) { - callable(); -} - -// Function: single_task -template -cudaTask cudaFlow::single_task(C c) { - return kernel(1, 1, 0, cuda_single_task, c); -} - -// Function: single_task -template -void cudaFlow::single_task(cudaTask task, C c) { - return kernel(task, 1, 1, 0, cuda_single_task, c); -} - -// Function: single_task -template -cudaTask cudaFlowCapturer::single_task(C callable) { - return on([=] (cudaStream_t stream) mutable { - cuda_single_task(cudaDefaultExecutionPolicy(stream), callable); - }); -} - -// Function: single_task -template -void cudaFlowCapturer::single_task(cudaTask task, C callable) { - on(task, [=] (cudaStream_t stream) mutable { - cuda_single_task(cudaDefaultExecutionPolicy(stream), callable); - }); -} - // ---------------------------------------------------------------------------- // cudaFlow: for_each, for_each_index // ---------------------------------------------------------------------------- // Function: for_each -template -cudaTask cudaFlow::for_each(I first, I last, C c) { +template +template +cudaTask cudaGraphBase::for_each(I first, I last, C c) { - using E = cudaDefaultExecutionPolicy; - unsigned count = std::distance(first, last); - // TODO: - //if(count == 0) { - // return; - //} - return kernel( E::num_blocks(count), E::nt, 0, - detail::cuda_for_each_kernel, first, count, c + detail::cuda_for_each_kernel, first, count, c ); } // Function: for_each -template -void cudaFlow::for_each(cudaTask task, I first, I last, C c) { - - using E = cudaDefaultExecutionPolicy; +template +template +void cudaGraphExecBase::for_each(cudaTask task, I first, I last, C c) { unsigned count = std::distance(first, last); - // TODO: - //if(count == 0) { - // return; - //} - kernel(task, E::num_blocks(count), E::nt, 0, - detail::cuda_for_each_kernel, first, count, c + detail::cuda_for_each_kernel, first, count, c ); } // Function: for_each_index -template -cudaTask cudaFlow::for_each_index(I first, I last, I inc, C c) { - - using E = cudaDefaultExecutionPolicy; +template +template +cudaTask cudaGraphBase::for_each_index(I first, I last, I inc, C c) { unsigned count = distance(first, last, inc); - // TODO: - //if(count == 0) { - // return; - //} - return kernel( E::num_blocks(count), E::nt, 0, - detail::cuda_for_each_index_kernel, first, inc, count, c + detail::cuda_for_each_index_kernel, first, inc, count, c ); } // Function: for_each_index -template -void cudaFlow::for_each_index(cudaTask task, I first, I last, I inc, C c) { +template +template +void cudaGraphExecBase::for_each_index(cudaTask task, I first, I last, I inc, C c) { - using E = cudaDefaultExecutionPolicy; - unsigned count = distance(first, last, inc); - - // TODO: - //if(count == 0) { - // return; - //} return kernel(task, E::num_blocks(count), E::nt, 0, - detail::cuda_for_each_index_kernel, first, inc, count, c + detail::cuda_for_each_index_kernel, first, inc, count, c ); } -// ---------------------------------------------------------------------------- -// cudaFlowCapturer: for_each, for_each_index -// ---------------------------------------------------------------------------- - -// Function: for_each -template -cudaTask cudaFlowCapturer::for_each(I first, I last, C c) { - return on([=](cudaStream_t stream) mutable { - cuda_for_each(cudaDefaultExecutionPolicy(stream), first, last, c); - }); -} - -// Function: for_each_index -template -cudaTask cudaFlowCapturer::for_each_index(I beg, I end, I inc, C c) { - return on([=] (cudaStream_t stream) mutable { - cuda_for_each_index(cudaDefaultExecutionPolicy(stream), beg, end, inc, c); - }); -} - -// Function: for_each -template -void cudaFlowCapturer::for_each(cudaTask task, I first, I last, C c) { - on(task, [=](cudaStream_t stream) mutable { - cuda_for_each(cudaDefaultExecutionPolicy(stream), first, last, c); - }); -} - -// Function: for_each_index -template -void cudaFlowCapturer::for_each_index( - cudaTask task, I beg, I end, I inc, C c -) { - on(task, [=] (cudaStream_t stream) mutable { - cuda_for_each_index(cudaDefaultExecutionPolicy(stream), beg, end, inc, c); - }); -} - - } // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/reduce.hpp b/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/reduce.hpp index d6ba33244d..5a5de0a800 100644 --- a/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/reduce.hpp +++ b/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/reduce.hpp @@ -17,9 +17,9 @@ namespace tf::detail { template struct cudaBlockReduce { - static const unsigned group_size = std::min(nt, CUDA_WARP_SIZE); - static const unsigned num_passes = log2(group_size); - static const unsigned num_items = nt / group_size; + static constexpr unsigned group_size = (std::min)(nt, CUDA_WARP_SIZE); + static constexpr unsigned num_passes = static_floor_log2(); + static constexpr unsigned num_items = nt / group_size; static_assert( nt && (0 == nt % CUDA_WARP_SIZE), diff --git a/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/scan.hpp b/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/scan.hpp index bce0d63417..223d683cf8 100644 --- a/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/scan.hpp +++ b/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/scan.hpp @@ -42,9 +42,9 @@ struct cudaScanResult { template struct cudaBlockScan { - const static unsigned num_warps = nt / CUDA_WARP_SIZE; - const static unsigned num_passes = log2(nt); - const static unsigned capacity = nt + num_warps; + static constexpr unsigned num_warps = nt / CUDA_WARP_SIZE; + static constexpr unsigned num_passes = static_floor_log2(); + static constexpr unsigned capacity = nt + num_warps; /** @private */ union storage_t { diff --git a/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/single_task.hpp b/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/single_task.hpp new file mode 100644 index 0000000000..4177ff38ef --- /dev/null +++ b/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/single_task.hpp @@ -0,0 +1,36 @@ +#pragma once + +/** +@file taskflow/cuda/algorithm/single_task.hpp +@brief cuda single-task algorithms include file +*/ + +namespace tf { + +/** @private */ +template +__global__ void cuda_single_task(C callable) { + callable(); +} + +// Function: single_task +template +template +cudaTask cudaGraphBase::single_task(C c) { + return kernel(1, 1, 0, cuda_single_task, c); +} + +// Function: single_task +template +template +void cudaGraphExecBase::single_task(cudaTask task, C c) { + return kernel(task, 1, 1, 0, cuda_single_task, c); +} + +} // end of namespace tf ----------------------------------------------------- + + + + + + diff --git a/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/sort.hpp b/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/sort.hpp index 0e563f414f..97695f8778 100644 --- a/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/sort.hpp +++ b/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/sort.hpp @@ -150,7 +150,7 @@ template struct cudaBlockSort { static constexpr bool has_values = !std::is_same::value; - static constexpr unsigned num_passes = log2(nt); + static constexpr unsigned num_passes = static_floor_log2(); /** @private */ union Storage { diff --git a/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/transform.hpp b/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/transform.hpp index b1146bdd7b..e8fc386e45 100644 --- a/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/transform.hpp +++ b/bundled/taskflow-3.10.0/taskflow/cuda/algorithm/transform.hpp @@ -18,12 +18,12 @@ namespace detail { /** @private */ -template +template __global__ void cuda_transform_kernel(I first, unsigned count, O output, C op) { auto tid = threadIdx.x; auto bid = blockIdx.x; - auto tile = cuda_get_tile(bid, nt*vt, count); - cuda_strided_iterate( + auto tile = cuda_get_tile(bid, E::nv, count); + cuda_strided_iterate( [=]__device__(auto, auto j) { auto offset = j + tile.begin; *(output + offset) = op(*(first+offset)); @@ -36,14 +36,14 @@ __global__ void cuda_transform_kernel(I first, unsigned count, O output, C op) { /** @private */ -template +template __global__ void cuda_transform_kernel( I1 first1, I2 first2, unsigned count, O output, C op ) { auto tid = threadIdx.x; auto bid = blockIdx.x; - auto tile = cuda_get_tile(bid, nt*vt, count); - cuda_strided_iterate( + auto tile = cuda_get_tile(bid, E::nv, count); + cuda_strided_iterate( [=]__device__(auto, auto j) { auto offset = j + tile.begin; *(output + offset) = op(*(first1+offset), *(first2+offset)); @@ -55,224 +55,68 @@ __global__ void cuda_transform_kernel( } // end of namespace detail ------------------------------------------------- -// ---------------------------------------------------------------------------- -// CUDA standard algorithms: transform -// ---------------------------------------------------------------------------- - -/** -@brief performs asynchronous parallel transforms over a range of items - -@tparam P execution policy type -@tparam I input iterator type -@tparam O output iterator type -@tparam C unary operator type - -@param p execution policy -@param first iterator to the beginning of the range -@param last iterator to the end of the range -@param output iterator to the beginning of the output range -@param op unary operator to apply to transform each item - -This method is equivalent to the parallel execution of the following loop on a GPU: - -@code{.cpp} -while (first != last) { - *output++ = op(*first++); -} -@endcode - -*/ -template -void cuda_transform(P&& p, I first, I last, O output, C op) { - - using E = std::decay_t

; - - unsigned count = std::distance(first, last); - - if(count == 0) { - return; - } - - detail::cuda_transform_kernel - <<>> ( - first, count, output, op - ); -} - -/** -@brief performs asynchronous parallel transforms over two ranges of items - -@tparam P execution policy type -@tparam I1 first input iterator type -@tparam I2 second input iterator type -@tparam O output iterator type -@tparam C binary operator type - -@param p execution policy -@param first1 iterator to the beginning of the first range -@param last1 iterator to the end of the first range -@param first2 iterator to the beginning of the second range -@param output iterator to the beginning of the output range -@param op binary operator to apply to transform each pair of items - -This method is equivalent to the parallel execution of the following loop on a GPU: - -@code{.cpp} -while (first1 != last1) { - *output++ = op(*first1++, *first2++); -} -@endcode -*/ -template -void cuda_transform( - P&& p, I1 first1, I1 last1, I2 first2, O output, C op -) { - - using E = std::decay_t

; - - unsigned count = std::distance(first1, last1); - - if(count == 0) { - return; - } - - detail::cuda_transform_kernel - <<>> ( - first1, first2, count, output, op - ); -} - // ---------------------------------------------------------------------------- // cudaFlow // ---------------------------------------------------------------------------- // Function: transform -template -cudaTask cudaFlow::transform(I first, I last, O output, C c) { +template +template +cudaTask cudaGraphBase::transform(I first, I last, O output, C c) { - using E = cudaDefaultExecutionPolicy; - unsigned count = std::distance(first, last); - // TODO: - //if(count == 0) { - // return; - //} - return kernel( E::num_blocks(count), E::nt, 0, - detail::cuda_transform_kernel, + detail::cuda_transform_kernel, first, count, output, c ); } // Function: transform -template -cudaTask cudaFlow::transform(I1 first1, I1 last1, I2 first2, O output, C c) { +template +template +cudaTask cudaGraphBase::transform(I1 first1, I1 last1, I2 first2, O output, C c) { - using E = cudaDefaultExecutionPolicy; - unsigned count = std::distance(first1, last1); - // TODO: - //if(count == 0) { - // return; - //} - return kernel( E::num_blocks(count), E::nt, 0, - detail::cuda_transform_kernel, + detail::cuda_transform_kernel, first1, first2, count, output, c ); } + // Function: update transform -template -void cudaFlow::transform(cudaTask task, I first, I last, O output, C c) { +template +template +void cudaGraphExecBase::transform(cudaTask task, I first, I last, O output, C c) { - using E = cudaDefaultExecutionPolicy; - unsigned count = std::distance(first, last); - // TODO: - //if(count == 0) { - // return; - //} - kernel(task, E::num_blocks(count), E::nt, 0, - detail::cuda_transform_kernel, + detail::cuda_transform_kernel, first, count, output, c ); } // Function: update transform -template -void cudaFlow::transform( +template +template +void cudaGraphExecBase::transform( cudaTask task, I1 first1, I1 last1, I2 first2, O output, C c ) { - using E = cudaDefaultExecutionPolicy; - unsigned count = std::distance(first1, last1); - - // TODO: - //if(count == 0) { - // return; - //} kernel(task, E::num_blocks(count), E::nt, 0, - detail::cuda_transform_kernel, + detail::cuda_transform_kernel, first1, first2, count, output, c ); } -// ---------------------------------------------------------------------------- -// cudaFlowCapturer -// ---------------------------------------------------------------------------- - -// Function: transform -template -cudaTask cudaFlowCapturer::transform(I first, I last, O output, C op) { - return on([=](cudaStream_t stream) mutable { - cudaDefaultExecutionPolicy p(stream); - cuda_transform(p, first, last, output, op); - }); -} - -// Function: transform -template -cudaTask cudaFlowCapturer::transform( - I1 first1, I1 last1, I2 first2, O output, C op -) { - return on([=](cudaStream_t stream) mutable { - cudaDefaultExecutionPolicy p(stream); - cuda_transform(p, first1, last1, first2, output, op); - }); -} - -// Function: transform -template -void cudaFlowCapturer::transform( - cudaTask task, I first, I last, O output, C op -) { - on(task, [=] (cudaStream_t stream) mutable { - cudaDefaultExecutionPolicy p(stream); - cuda_transform(p, first, last, output, op); - }); -} - -// Function: transform -template -void cudaFlowCapturer::transform( - cudaTask task, I1 first1, I1 last1, I2 first2, O output, C op -) { - on(task, [=] (cudaStream_t stream) mutable { - cudaDefaultExecutionPolicy p(stream); - cuda_transform(p, first1, last1, first2, output, op); - }); -} - } // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-3.10.0/taskflow/cuda/cuda_capturer.hpp b/bundled/taskflow-3.10.0/taskflow/cuda/cuda_capturer.hpp index c73061cd6c..f0a431b8c2 100644 --- a/bundled/taskflow-3.10.0/taskflow/cuda/cuda_capturer.hpp +++ b/bundled/taskflow-3.10.0/taskflow/cuda/cuda_capturer.hpp @@ -1,6 +1,5 @@ #pragma once -#include "cuda_task.hpp" #include "cuda_optimizer.hpp" /** @@ -474,7 +473,7 @@ class cudaFlowCapturer { a native CUDA graph. */ template - OPT& make_optimizer(ArgsT&&... args); + void make_optimizer(ArgsT&&... args); /** @brief captures the cudaFlow and turns it into a CUDA Graph @@ -505,17 +504,15 @@ class cudaFlowCapturer { cudaGraph_t native_graph(); /** - @brief acquires a reference to the underlying CUDA graph executable + @brief instantiates an executable graph from this cudaflow capturer */ - cudaGraphExec_t native_executable(); + cudaGraphExec instantiate(); private: cudaFlowGraph _cfg; Optimizer _optimizer; - - cudaGraphExec _exe {nullptr}; }; // Function: empty @@ -530,7 +527,6 @@ inline size_t cudaFlowCapturer::num_tasks() const { // Procedure: clear inline void cudaFlowCapturer::clear() { - _exe.clear(); _cfg.clear(); } @@ -560,10 +556,6 @@ inline cudaTask cudaFlowCapturer::noop() { return on([](cudaStream_t){}); } -// Function: noop -inline void cudaFlowCapturer::noop(cudaTask task) { - on(task, [](cudaStream_t){}); -} // Function: memcpy inline cudaTask cudaFlowCapturer::memcpy( @@ -607,6 +599,12 @@ cudaTask cudaFlowCapturer::kernel( }); } +// Function: make_optimizer +template +void cudaFlowCapturer::make_optimizer(ArgsT&&... args) { + return _optimizer.emplace(std::forward(args)...); +} + // Function: capture inline cudaGraph_t cudaFlowCapturer::capture() { return std::visit( @@ -614,111 +612,121 @@ inline cudaGraph_t cudaFlowCapturer::capture() { ); } -// Procedure: run -inline void cudaFlowCapturer::run(cudaStream_t stream) { - - // If the topology got changed, we need to destroy the executable - // and create a new one - if(_cfg._state & cudaFlowGraph::CHANGED) { - _cfg._native_handle.reset(capture()); - _exe.instantiate(_cfg._native_handle); - } - // if the graph is just updated (i.e., topology does not change), - // we can skip part of the optimization and just update the executable - // with the new captured graph - else if(_cfg._state & cudaFlowGraph::UPDATED) { - // TODO: skip part of the optimization (e.g., levelization) - _cfg._native_handle.reset(capture()); - if(_exe.update(_cfg._native_handle) != cudaGraphExecUpdateSuccess) { - _exe.instantiate(_cfg._native_handle); - } - } +// Function: instantiate +inline cudaGraphExec cudaFlowCapturer::instantiate() { + + _cfg._native_handle.reset(capture()); - // run the executable (should exist) - _exe.launch(stream); + cudaGraphExec_t exec; + TF_CHECK_CUDA( + cudaGraphInstantiate(&exec, _cfg._native_handle, nullptr, nullptr, 0), + "failed to create an executable graph" + ); - _cfg._state = cudaFlowGraph::OFFLOADED; + return cudaGraphExec(exec); } +//// Procedure: run +//inline void cudaFlowCapturer::run(cudaStream_t stream) { +// +// // If the topology got changed, we need to destroy the executable +// // and create a new one +// if(_cfg._state & cudaFlowGraph::CHANGED) { +// _cfg._native_handle.reset(capture()); +// _exe.instantiate(_cfg._native_handle); +// } +// // if the graph is just updated (i.e., topology does not change), +// // we can skip part of the optimization and just update the executable +// // with the new captured graph +// else if(_cfg._state & cudaFlowGraph::UPDATED) { +// // TODO: skip part of the optimization (e.g., levelization) +// _cfg._native_handle.reset(capture()); +// if(_exe.update(_cfg._native_handle) != cudaGraphExecUpdateSuccess) { +// _exe.instantiate(_cfg._native_handle); +// } +// } +// +// // run the executable (should exist) +// _exe.run(stream); +// +// _cfg._state = cudaFlowGraph::OFFLOADED; +//} + // Function: native_graph inline cudaGraph_t cudaFlowCapturer::native_graph() { return _cfg._native_handle; } -// Function: native_executable -inline cudaGraphExec_t cudaFlowCapturer::native_executable() { - return _exe; -} - -// Function: on -template , void>* -> -void cudaFlowCapturer::on(cudaTask task, C&& callable) { - - if(task.type() != cudaTaskType::CAPTURE) { - TF_THROW("invalid cudaTask type (must be CAPTURE)"); - } - - _cfg._state |= cudaFlowGraph::UPDATED; - - std::get_if(&task._node->_handle)->work = - std::forward(callable); -} - -// Function: memcpy -inline void cudaFlowCapturer::memcpy( - cudaTask task, void* dst, const void* src, size_t count -) { - on(task, [dst, src, count](cudaStream_t stream) mutable { - TF_CHECK_CUDA( - cudaMemcpyAsync(dst, src, count, cudaMemcpyDefault, stream), - "failed to capture memcpy" - ); - }); -} +//// Function: on +//template , void>* +//> +//void cudaFlowCapturer::on(cudaTask task, C&& callable) { +// +// if(task.type() != cudaTaskType::CAPTURE) { +// TF_THROW("invalid cudaTask type (must be CAPTURE)"); +// } +// +// _cfg._state |= cudaFlowGraph::UPDATED; +// +// std::get_if(&task._node->_handle)->work = +// std::forward(callable); +//} +// +//// Function: noop +//inline void cudaFlowCapturer::noop(cudaTask task) { +// on(task, [](cudaStream_t){}); +//} +//// +//// Function: memcpy +//inline void cudaFlowCapturer::memcpy( +// cudaTask task, void* dst, const void* src, size_t count +//) { +// on(task, [dst, src, count](cudaStream_t stream) mutable { +// TF_CHECK_CUDA( +// cudaMemcpyAsync(dst, src, count, cudaMemcpyDefault, stream), +// "failed to capture memcpy" +// ); +// }); +//} +// +//// Function: copy +//template , void>* +//> +//void cudaFlowCapturer::copy( +// cudaTask task, T* tgt, const T* src, size_t num +//) { +// on(task, [tgt, src, num] (cudaStream_t stream) mutable { +// TF_CHECK_CUDA( +// cudaMemcpyAsync(tgt, src, sizeof(T)*num, cudaMemcpyDefault, stream), +// "failed to capture copy" +// ); +// }); +//} +// +//// Function: memset +//inline void cudaFlowCapturer::memset( +// cudaTask task, void* ptr, int v, size_t n +//) { +// on(task, [ptr, v, n] (cudaStream_t stream) mutable { +// TF_CHECK_CUDA( +// cudaMemsetAsync(ptr, v, n, stream), "failed to capture memset" +// ); +// }); +//} +// +//// Function: kernel +//template +//void cudaFlowCapturer::kernel( +// cudaTask task, dim3 g, dim3 b, size_t s, F f, ArgsT&&... args +//) { +// on(task, [g, b, s, f, args...] (cudaStream_t stream) mutable { +// f<<>>(args...); +// }); +//} +// -// Function: copy -template , void>* -> -void cudaFlowCapturer::copy( - cudaTask task, T* tgt, const T* src, size_t num -) { - on(task, [tgt, src, num] (cudaStream_t stream) mutable { - TF_CHECK_CUDA( - cudaMemcpyAsync(tgt, src, sizeof(T)*num, cudaMemcpyDefault, stream), - "failed to capture copy" - ); - }); -} - -// Function: memset -inline void cudaFlowCapturer::memset( - cudaTask task, void* ptr, int v, size_t n -) { - on(task, [ptr, v, n] (cudaStream_t stream) mutable { - TF_CHECK_CUDA( - cudaMemsetAsync(ptr, v, n, stream), "failed to capture memset" - ); - }); -} - -// Function: kernel -template -void cudaFlowCapturer::kernel( - cudaTask task, dim3 g, dim3 b, size_t s, F f, ArgsT&&... args -) { - on(task, [g, b, s, f, args...] (cudaStream_t stream) mutable { - f<<>>(args...); - }); -} - -// Function: make_optimizer -template -OPT& cudaFlowCapturer::make_optimizer(ArgsT&&... args) { - return _optimizer.emplace(std::forward(args)...); -} } // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-3.10.0/taskflow/cuda/cuda_execution_policy.hpp b/bundled/taskflow-3.10.0/taskflow/cuda/cuda_execution_policy.hpp index ae90d98aa5..c33eaa1d5d 100644 --- a/bundled/taskflow-3.10.0/taskflow/cuda/cuda_execution_policy.hpp +++ b/bundled/taskflow-3.10.0/taskflow/cuda/cuda_execution_policy.hpp @@ -42,25 +42,10 @@ class cudaExecutionPolicy { const static unsigned nv = NT*VT; /** - @brief constructs an execution policy object with default stream + @brief constructs an execution policy object */ cudaExecutionPolicy() = default; - /** - @brief constructs an execution policy object with the given stream - */ - explicit cudaExecutionPolicy(cudaStream_t s) : _stream{s} {} - - /** - @brief queries the associated stream - */ - cudaStream_t stream() noexcept { return _stream; }; - - /** - @brief assigns a stream - */ - void stream(cudaStream_t stream) noexcept { _stream = stream; } - /** @brief queries the number of blocks to accommodate N elements */ @@ -138,10 +123,6 @@ class cudaExecutionPolicy { tf::cuda_merge and tf::cuda_merge_by_key. */ inline static unsigned merge_bufsz(unsigned a_count, unsigned b_count); - - private: - - cudaStream_t _stream {0}; }; /** diff --git a/bundled/taskflow-3.10.0/taskflow/cuda/cuda_graph.hpp b/bundled/taskflow-3.10.0/taskflow/cuda/cuda_graph.hpp index 3cfee37d8a..b96a809855 100644 --- a/bundled/taskflow-3.10.0/taskflow/cuda/cuda_graph.hpp +++ b/bundled/taskflow-3.10.0/taskflow/cuda/cuda_graph.hpp @@ -1,5 +1,7 @@ #pragma once +#include + #include "cuda_memory.hpp" #include "cuda_stream.hpp" #include "cuda_meta.hpp" @@ -158,6 +160,8 @@ inline size_t cuda_graph_get_num_edges(cudaGraph_t graph) { return num_edges; } + + /** @brief acquires the nodes in a native CUDA graph */ @@ -223,580 +227,851 @@ inline cudaGraphNodeType cuda_get_graph_node_type(cudaGraphNode_t node) { return type; } +// ---------------------------------------------------------------------------- +// cudaTask Types +// ---------------------------------------------------------------------------- + /** -@brief convert the type of a native CUDA graph node to a readable string +@brief convert a cuda_task type to a human-readable string */ -inline const char* cuda_graph_node_type_to_string(cudaGraphNodeType type) { - switch(type) { - case cudaGraphNodeTypeKernel : return "kernel"; - case cudaGraphNodeTypeMemcpy : return "memcpy"; - case cudaGraphNodeTypeMemset : return "memset"; - case cudaGraphNodeTypeHost : return "host"; - case cudaGraphNodeTypeGraph : return "graph"; - case cudaGraphNodeTypeEmpty : return "empty"; - case cudaGraphNodeTypeWaitEvent : return "event_wait"; - case cudaGraphNodeTypeEventRecord : return "event_record"; - default : return "undefined"; +constexpr const char* to_string(cudaGraphNodeType type) { + switch (type) { + case cudaGraphNodeTypeKernel: return "Kernel"; + case cudaGraphNodeTypeMemcpy: return "Memcpy"; + case cudaGraphNodeTypeMemset: return "Memset"; + case cudaGraphNodeTypeHost: return "Host"; + case cudaGraphNodeTypeGraph: return "Graph"; + case cudaGraphNodeTypeEmpty: return "Empty"; + case cudaGraphNodeTypeWaitEvent: return "WaitEvent"; + case cudaGraphNodeTypeEventRecord: return "EventRecord"; + case cudaGraphNodeTypeExtSemaphoreSignal: return "ExtSemaphoreSignal"; + case cudaGraphNodeTypeExtSemaphoreWait: return "ExtSemaphoreWait"; + case cudaGraphNodeTypeMemAlloc: return "MemAlloc"; + case cudaGraphNodeTypeMemFree: return "MemFree"; + case cudaGraphNodeTypeConditional: return "Conditional"; + default: return "undefined"; } } +// ---------------------------------------------------------------------------- +// cudaTask +// ---------------------------------------------------------------------------- + /** -@brief dumps a native CUDA graph and all associated child graphs to a DOT format +@class cudaTask -@tparam T output stream target -@param os target output stream -@param graph native CUDA graph +@brief class to create a task handle of a CUDA Graph node */ -template -void cuda_dump_graph(T& os, cudaGraph_t g) { - - os << "digraph cudaGraph {\n"; - - std::stack> stack; - stack.push(std::make_tuple(g, nullptr, 1)); - - int pl = 0; - - while(stack.empty() == false) { - - auto [graph, parent, l] = stack.top(); - stack.pop(); - - for(int i=0; i " << 'p' << to << ";\n"; - } - - for(auto& node : nodes) { - auto type = cuda_get_graph_node_type(node); - if(type == cudaGraphNodeTypeGraph) { - - cudaGraph_t child_graph; - TF_CHECK_CUDA(cudaGraphChildGraphNodeGetGraph(node, &child_graph), ""); - stack.push(std::make_tuple(child_graph, node, l+1)); - - os << 'p' << node << "[" - << "shape=folder, style=filled, fontcolor=white, fillcolor=purple, " - << "label=\"cudaGraph-L" << l+1 - << "\"];\n"; - } - else { - os << 'p' << node << "[label=\"" - << cuda_graph_node_type_to_string(type) - << "\"];\n"; - } - } - - // precede to parent - if(parent != nullptr) { - std::unordered_set successors; - for(const auto& p : edges) { - successors.insert(p.first); - } - for(auto node : nodes) { - if(successors.find(node) == successors.end()) { - os << 'p' << node << " -> " << 'p' << parent << ";\n"; - } - } - } - - // set the previous level - pl = l; - } +class cudaTask { - for(int i=0; i<=pl; i++) { - os << "}\n"; - } + template + friend class cudaGraphBase; + + template + friend class cudaGraphExecBase; + + friend class cudaFlow; + friend class cudaFlowCapturer; + friend class cudaFlowCapturerBase; + + friend std::ostream& operator << (std::ostream&, const cudaTask&); + + public: + + /** + @brief constructs an empty cudaTask + */ + cudaTask() = default; + + /** + @brief copy-constructs a cudaTask + */ + cudaTask(const cudaTask&) = default; + + /** + @brief copy-assigns a cudaTask + */ + cudaTask& operator = (const cudaTask&) = default; + + /** + @brief adds precedence links from this to other tasks + + @tparam Ts parameter pack + + @param tasks one or multiple tasks + + @return @c *this + */ + template + cudaTask& precede(Ts&&... tasks); + + /** + @brief adds precedence links from other tasks to this + + @tparam Ts parameter pack + + @param tasks one or multiple tasks + + @return @c *this + */ + template + cudaTask& succeed(Ts&&... tasks); + + /** + @brief queries the number of successors + */ + size_t num_successors() const; + + /** + @brief queries the number of dependents + */ + size_t num_predecessors() const; + + /** + @brief queries the type of this task + */ + auto type() const; + + /** + @brief dumps the task through an output stream + + @param os an output stream target + */ + void dump(std::ostream& os) const; + + private: + + cudaTask(cudaGraph_t, cudaGraphNode_t); + + cudaGraph_t _native_graph {nullptr}; + cudaGraphNode_t _native_node {nullptr}; +}; + +// Constructor +inline cudaTask::cudaTask(cudaGraph_t native_graph, cudaGraphNode_t native_node) : + _native_graph {native_graph}, _native_node {native_node} { +} + +// Function: precede +template +cudaTask& cudaTask::precede(Ts&&... tasks) { + ( + cudaGraphAddDependencies( + _native_graph, &_native_node, &(tasks._native_node), 1 + ), ... + ); + return *this; +} + +// Function: succeed +template +cudaTask& cudaTask::succeed(Ts&&... tasks) { + (tasks.precede(*this), ...); + return *this; +} + +// Function: num_predecessors +inline size_t cudaTask::num_predecessors() const { + size_t num_predecessors {0}; + cudaGraphNodeGetDependencies(_native_node, nullptr, &num_predecessors); + return num_predecessors; +} + +// Function: num_successors +inline size_t cudaTask::num_successors() const { + size_t num_successors {0}; + cudaGraphNodeGetDependentNodes(_native_node, nullptr, &num_successors); + return num_successors; +} + +// Function: type +inline auto cudaTask::type() const { + cudaGraphNodeType type; + cudaGraphNodeGetType(_native_node, &type); + return type; +} + +// Function: dump +inline void cudaTask::dump(std::ostream& os) const { + os << "cudaTask [type=" << to_string(type()) << ']'; +} + +/** +@brief overload of ostream inserter operator for cudaTask +*/ +inline std::ostream& operator << (std::ostream& os, const cudaTask& ct) { + ct.dump(os); + return os; } // ---------------------------------------------------------------------------- // cudaGraph // ---------------------------------------------------------------------------- - + /** -@private -*/ + * @struct cudaGraphCreator + * @brief a functor for creating a CUDA graph + * + * This structure provides an overloaded function call operator to create a + * new CUDA graph using `cudaGraphCreate`. + * + */ struct cudaGraphCreator { - cudaGraph_t operator () () const { + + /** + * @brief creates a new CUDA graph + * + * Calls `cudaGraphCreate` to generate a CUDA native graph and returns it. + * If the graph creation fails, an error is reported. + * + * @return A newly created `cudaGraph_t` instance. + * @throws If CUDA graph creation fails, an error is logged. + */ + cudaGraph_t operator () () const { cudaGraph_t g; TF_CHECK_CUDA(cudaGraphCreate(&g, 0), "failed to create a CUDA native graph"); - return g; + return g; + } + + /** + @brief return the given CUDA graph + */ + cudaGraph_t operator () (cudaGraph_t graph) const { + return graph; } + }; /** -@private -*/ + * @struct cudaGraphDeleter + * @brief a functor for deleting a CUDA graph + * + * This structure provides an overloaded function call operator to safely + * destroy a CUDA graph using `cudaGraphDestroy`. + * + */ struct cudaGraphDeleter { + + /** + * @brief deletes a CUDA graph + * + * Calls `cudaGraphDestroy` to release the CUDA graph resource if it is valid. + * + * @param g the CUDA graph to be destroyed + */ void operator () (cudaGraph_t g) const { - if(g) { - cudaGraphDestroy(g); - } + cudaGraphDestroy(g); } }; + /** -@class cudaGraph +@class cudaGraphBase -@brief class to create an RAII-styled wrapper over a CUDA executable graph +@brief class to create a CUDA graph managed by C++ smart pointer -A cudaGraph object is an RAII-styled wrapper over -a native CUDA graph (@c cudaGraph_t). -A cudaGraph object is move-only. +@tparam Creator functor to create the stream (used in constructor) +@tparam Deleter functor to delete the stream (used in destructor) + +This class wraps a `cudaGraph_t` handle with std::unique_ptr to ensure proper +resource management and automatic cleanup. */ -class cudaGraph : - public cudaObject { +template +class cudaGraphBase : public std::unique_ptr, cudaGraphDeleter> { + + static_assert(std::is_pointer_v, "cudaGraph_t is not a pointer type"); public: + + /** + @brief base std::unique_ptr type + */ + using base_type = std::unique_ptr, Deleter>; /** - @brief constructs an RAII-styled object from the given CUDA exec + @brief constructs a `cudaGraph` object by passing the given arguments to the executable CUDA graph creator - Constructs a cudaGraph object from the given CUDA graph @c native. + Constructs a `cudaGraph` object by passing the given arguments to the executable CUDA graph creator + + @param args arguments to pass to the executable CUDA graph creator */ - explicit cudaGraph(cudaGraph_t native) : cudaObject(native) { } + template + explicit cudaGraphBase(ArgsT&& ... args) : base_type( + Creator{}(std::forward(args)...), Deleter() + ) { + } /** - @brief constructs a cudaGraph object with a new CUDA graph + @brief constructs a `cudaGraph` from the given rhs using move semantics */ - cudaGraph() = default; -}; + cudaGraphBase(cudaGraphBase&&) = default; -// ---------------------------------------------------------------------------- -// cudaGraphExec -// ---------------------------------------------------------------------------- + /** + @brief assign the rhs to `*this` using move semantics + */ + cudaGraphBase& operator = (cudaGraphBase&&) = default; + + /** + @brief queries the number of nodes in a native CUDA graph + */ + size_t num_nodes() const; -/** -@private -*/ -struct cudaGraphExecCreator { - cudaGraphExec_t operator () () const { return nullptr; } -}; + /** + @brief queries the number of edges in a native CUDA graph + */ + size_t num_edges() const; -/** -@private -*/ -struct cudaGraphExecDeleter { - void operator () (cudaGraphExec_t executable) const { - if(executable) { - cudaGraphExecDestroy(executable); - } - } -}; + /** + @brief queries if the graph is empty + */ + bool empty() const; -/** -@class cudaGraphExec + /** + @brief dumps the CUDA graph to a DOT format through the given output stream + + @param os target output stream + */ + void dump(std::ostream& os); -@brief class to create an RAII-styled wrapper over a CUDA executable graph + // ------------------------------------------------------------------------ + // Graph building routines + // ------------------------------------------------------------------------ -A cudaGraphExec object is an RAII-styled wrapper over -a native CUDA executable graph (@c cudaGraphExec_t). -A cudaGraphExec object is move-only. -*/ -class cudaGraphExec : - public cudaObject { + /** + @brief creates a no-operation task - public: + @return a tf::cudaTask handle + + An empty node performs no operation during execution, + but can be used for transitive ordering. + For example, a phased execution graph with 2 groups of @c n nodes + with a barrier between them can be represented using an empty node + and @c 2*n dependency edges, + rather than no empty node and @c n^2 dependency edges. + */ + cudaTask noop(); /** - @brief constructs an RAII-styled object from the given CUDA exec + @brief creates a host task that runs a callable on the host + + @tparam C callable type + + @param callable a callable object with neither arguments nor return + (i.e., constructible from @c std::function) + @param user_data a pointer to the user data + + @return a tf::cudaTask handle - Constructs a cudaGraphExec object which owns @c exec. + A host task can only execute CPU-specific functions and cannot do any CUDA calls + (e.g., @c cudaMalloc). */ - explicit cudaGraphExec(cudaGraphExec_t exec) : cudaObject(exec) { } - + template + cudaTask host(C&& callable, void* user_data); + /** - @brief default constructor + @brief creates a kernel task + + @tparam F kernel function type + @tparam ArgsT kernel function parameters type + + @param g configured grid + @param b configured block + @param s configured shared memory size in bytes + @param f kernel function + @param args arguments to forward to the kernel function by copy + + @return a tf::cudaTask handle */ - cudaGraphExec() = default; - + template + cudaTask kernel(dim3 g, dim3 b, size_t s, F f, ArgsT... args); + /** - @brief instantiates the executable from the given CUDA graph + @brief creates a memset task that fills untyped data with a byte value + + @param dst pointer to the destination device memory area + @param v value to set for each byte of specified memory + @param count size in bytes to set + + @return a tf::cudaTask handle + + A memset task fills the first @c count bytes of device memory area + pointed by @c dst with the byte value @c v. */ - void instantiate(cudaGraph_t graph) { - cudaGraphExecDeleter {} (object); - TF_CHECK_CUDA( - cudaGraphInstantiate(&object, graph, nullptr, nullptr, 0), - "failed to create an executable graph" - ); - } - + cudaTask memset(void* dst, int v, size_t count); + /** - @brief updates the executable from the given CUDA graph + @brief creates a memcpy task that copies untyped data in bytes + + @param tgt pointer to the target memory block + @param src pointer to the source memory block + @param bytes bytes to copy + + @return a tf::cudaTask handle + + A memcpy task transfers @c bytes of data from a source location + to a target location. Direction can be arbitrary among CPUs and GPUs. */ - cudaGraphExecUpdateResult update(cudaGraph_t graph) { - cudaGraphNode_t error_node; - cudaGraphExecUpdateResult error_result; - cudaGraphExecUpdate(object, graph, &error_node, &error_result); - return error_result; - } - + cudaTask memcpy(void* tgt, const void* src, size_t bytes); + /** - @brief launches the executable graph via the given stream - */ - void launch(cudaStream_t stream) { - TF_CHECK_CUDA( - cudaGraphLaunch(object, stream), "failed to launch a CUDA executable graph" - ); - } -}; + @brief creates a memset task that sets a typed memory block to zero -// ---------------------------------------------------------------------------- -// cudaFlowGraph class -// ---------------------------------------------------------------------------- + @tparam T element type (size of @c T must be either 1, 2, or 4) + @param dst pointer to the destination device memory area + @param count number of elements -// class: cudaFlowGraph -class cudaFlowGraph { + @return a tf::cudaTask handle - friend class cudaFlowNode; - friend class cudaTask; - friend class cudaFlowCapturer; - friend class cudaFlow; - friend class cudaFlowOptimizerBase; - friend class cudaFlowSequentialOptimizer; - friend class cudaFlowLinearOptimizer; - friend class cudaFlowRoundRobinOptimizer; - friend class Taskflow; - friend class Executor; + A zero task zeroes the first @c count elements of type @c T + in a device memory area pointed by @c dst. + */ + template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr + > + cudaTask zero(T* dst, size_t count); - constexpr static int OFFLOADED = 0x01; - constexpr static int CHANGED = 0x02; - constexpr static int UPDATED = 0x04; + /** + @brief creates a memset task that fills a typed memory block with a value - public: + @tparam T element type (size of @c T must be either 1, 2, or 4) - cudaFlowGraph() = default; - ~cudaFlowGraph() = default; + @param dst pointer to the destination device memory area + @param value value to fill for each element of type @c T + @param count number of elements - cudaFlowGraph(const cudaFlowGraph&) = delete; - cudaFlowGraph(cudaFlowGraph&&) = default; + @return a tf::cudaTask handle - cudaFlowGraph& operator = (const cudaFlowGraph&) = delete; - cudaFlowGraph& operator = (cudaFlowGraph&&) = default; + A fill task fills the first @c count elements of type @c T with @c value + in a device memory area pointed by @c dst. + The value to fill is interpreted in type @c T rather than byte. + */ + template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr + > + cudaTask fill(T* dst, T value, size_t count); - template - cudaFlowNode* emplace_back(ArgsT&&...); + /** + @brief creates a memcopy task that copies typed data - bool empty() const; + @tparam T element type (non-void) - void clear(); - void dump(std::ostream&, const void*, const std::string&) const ; + @param tgt pointer to the target memory block + @param src pointer to the source memory block + @param num number of elements to copy - private: + @return a tf::cudaTask handle - int _state{CHANGED}; - cudaGraph _native_handle {nullptr}; - std::vector> _nodes; -}; + A copy task transfers num*sizeof(T) bytes of data from a source location + to a target location. Direction can be arbitrary among CPUs and GPUs. + */ + template , void>* = nullptr + > + cudaTask copy(T* tgt, const T* src, size_t num); + + // ------------------------------------------------------------------------ + // generic algorithms + // ------------------------------------------------------------------------ -// ---------------------------------------------------------------------------- -// cudaFlowNode class -// ---------------------------------------------------------------------------- + /** + @brief runs a callable with only a single kernel thread -/** -@private -@class: cudaFlowNode -*/ -class cudaFlowNode { + @tparam C callable type - friend class cudaFlowGraph; - friend class cudaTask; - friend class cudaFlow; - friend class cudaFlowCapturer; - friend class cudaFlowOptimizerBase; - friend class cudaFlowSequentialOptimizer; - friend class cudaFlowLinearOptimizer; - friend class cudaFlowRoundRobinOptimizer; - friend class Taskflow; - friend class Executor; + @param c callable to run by a single kernel thread - // Empty handle - struct Empty { - }; + @return a tf::cudaTask handle + */ + template + cudaTask single_task(C c); + + /** + @brief applies a callable to each dereferenced element of the data array - // Host handle - struct Host { + @tparam I iterator type + @tparam C callable type + @tparam E execution poligy (default tf::cudaDefaultExecutionPolicy) - template - Host(C&&); + @param first iterator to the beginning (inclusive) + @param last iterator to the end (exclusive) + @param callable a callable object to apply to the dereferenced iterator - std::function func; + @return a tf::cudaTask handle - static void callback(void*); - }; + This method is equivalent to the parallel execution of the following loop on a GPU: - // Memset handle - struct Memset { - }; + @code{.cpp} + for(auto itr = first; itr != last; itr++) { + callable(*itr); + } + @endcode + */ + template + cudaTask for_each(I first, I last, C callable); + + /** + @brief applies a callable to each index in the range with the step size - // Memcpy handle - struct Memcpy { - }; + @tparam I index type + @tparam C callable type + @tparam E execution poligy (default tf::cudaDefaultExecutionPolicy) - // Kernel handle - struct Kernel { + @param first beginning index + @param last last index + @param step step size + @param callable the callable to apply to each element in the data array - template - Kernel(F&& f); + @return a tf::cudaTask handle - void* func {nullptr}; - }; + This method is equivalent to the parallel execution of the following loop on a GPU: - // Subflow handle - struct Subflow { - cudaFlowGraph cfg; - }; + @code{.cpp} + // step is positive [first, last) + for(auto i=first; ilast; i+=step) { + callable(i); + } + @endcode + */ + template + cudaTask for_each_index(I first, I last, I step, C callable); + + /** + @brief applies a callable to a source range and stores the result in a target range - template - Capture(C&&); + @tparam I input iterator type + @tparam O output iterator type + @tparam C unary operator type + @tparam E execution poligy (default tf::cudaDefaultExecutionPolicy) - std::function work; + @param first iterator to the beginning of the input range + @param last iterator to the end of the input range + @param output iterator to the beginning of the output range + @param op the operator to apply to transform each element in the range - cudaEvent_t event; - size_t level; - size_t lid; - size_t idx; - }; + @return a tf::cudaTask handle - using handle_t = std::variant< - Empty, - Host, - Memset, - Memcpy, - Kernel, - Subflow, - Capture - >; + This method is equivalent to the parallel execution of the following loop on a GPU: - public: + @code{.cpp} + while (first != last) { + *output++ = callable(*first++); + } + @endcode + */ + template + cudaTask transform(I first, I last, O output, C op); + + /** + @brief creates a task to perform parallel transforms over two ranges of items + + @tparam I1 first input iterator type + @tparam I2 second input iterator type + @tparam O output iterator type + @tparam C unary operator type + @tparam E execution poligy (default tf::cudaDefaultExecutionPolicy) + + @param first1 iterator to the beginning of the input range + @param last1 iterator to the end of the input range + @param first2 iterato + @param output iterator to the beginning of the output range + @param op binary operator to apply to transform each pair of items in the + two input ranges - // variant index - constexpr static auto EMPTY = get_index_v; - constexpr static auto HOST = get_index_v; - constexpr static auto MEMSET = get_index_v; - constexpr static auto MEMCPY = get_index_v; - constexpr static auto KERNEL = get_index_v; - constexpr static auto SUBFLOW = get_index_v; - constexpr static auto CAPTURE = get_index_v; + @return cudaTask handle - cudaFlowNode() = delete; + This method is equivalent to the parallel execution of the following loop on a GPU: - template - cudaFlowNode(cudaFlowGraph&, ArgsT&&...); + @code{.cpp} + while (first1 != last1) { + *output++ = op(*first1++, *first2++); + } + @endcode + */ + template + cudaTask transform(I1 first1, I1 last1, I2 first2, O output, C op); private: - cudaFlowGraph& _cfg; + cudaGraphBase(const cudaGraphBase&) = delete; + cudaGraphBase& operator = (const cudaGraphBase&) = delete; +}; - std::string _name; +// query the number of nodes +template +size_t cudaGraphBase::num_nodes() const { + size_t n; + TF_CHECK_CUDA( + cudaGraphGetNodes(this->get(), nullptr, &n), + "failed to get native graph nodes" + ); + return n; +} - handle_t _handle; +// query the emptiness +template +bool cudaGraphBase::empty() const { + return num_nodes() == 0; +} - cudaGraphNode_t _native_handle {nullptr}; +// query the number of edges +template +size_t cudaGraphBase::num_edges() const { + size_t num_edges; + TF_CHECK_CUDA( + cudaGraphGetEdges(this->get(), nullptr, nullptr, &num_edges), + "failed to get native graph edges" + ); + return num_edges; +} - SmallVector _successors; - SmallVector _dependents; +//// dump the graph +//inline void cudaGraph::dump(std::ostream& os) { +// +// // acquire the native handle +// auto g = this->get(); +// +// os << "digraph cudaGraph {\n"; +// +// std::stack> stack; +// stack.push(std::make_tuple(g, nullptr, 1)); +// +// int pl = 0; +// +// while(stack.empty() == false) { +// +// auto [graph, parent, l] = stack.top(); +// stack.pop(); +// +// for(int i=0; i " << 'p' << to << ";\n"; +// } +// +// for(auto& node : nodes) { +// auto type = cuda_get_graph_node_type(node); +// if(type == cudaGraphNodeTypeGraph) { +// +// cudaGraph_t child_graph; +// TF_CHECK_CUDA(cudaGraphChildGraphNodeGetGraph(node, &child_graph), ""); +// stack.push(std::make_tuple(child_graph, node, l+1)); +// +// os << 'p' << node << "[" +// << "shape=folder, style=filled, fontcolor=white, fillcolor=purple, " +// << "label=\"cudaGraph-L" << l+1 +// << "\"];\n"; +// } +// else { +// os << 'p' << node << "[label=\"" +// << to_string(type) +// << "\"];\n"; +// } +// } +// +// // precede to parent +// if(parent != nullptr) { +// std::unordered_set successors; +// for(const auto& p : edges) { +// successors.insert(p.first); +// } +// for(auto node : nodes) { +// if(successors.find(node) == successors.end()) { +// os << 'p' << node << " -> " << 'p' << parent << ";\n"; +// } +// } +// } +// +// // set the previous level +// pl = l; +// } +// +// for(int i=0; i<=pl; i++) { +// os << "}\n"; +// } +//} + +// dump the graph +template +void cudaGraphBase::dump(std::ostream& os) { + + // Generate a unique temporary filename in the system's temp directory using filesystem + auto temp_path = std::filesystem::temp_directory_path() / "graph_"; + std::random_device rd; + std::uniform_int_distribution dist(100000, 999999); // Generates a random number + temp_path += std::to_string(dist(rd)) + ".dot"; + + // Call the original function with the temporary file + TF_CHECK_CUDA(cudaGraphDebugDotPrint(this->get(), temp_path.string().c_str(), 0), ""); + + // Read the file and write to the output stream + std::ifstream file(temp_path); + if (file) { + os << file.rdbuf(); // Copy file contents to the stream + file.close(); + std::filesystem::remove(temp_path); // Clean up the temporary file + } else { + TF_THROW("failed to open ", temp_path, " for dumping the CUDA graph"); + } +} - void _precede(cudaFlowNode*); -}; +// Function: noop +template +cudaTask cudaGraphBase::noop() { -// ---------------------------------------------------------------------------- -// cudaFlowNode definitions -// ---------------------------------------------------------------------------- + cudaGraphNode_t node; -// Host handle constructor + TF_CHECK_CUDA( + cudaGraphAddEmptyNode(&node, this->get(), nullptr, 0), + "failed to create a no-operation (empty) node" + ); + + return cudaTask(this->get(), node); +} + +// Function: host +template template -cudaFlowNode::Host::Host(C&& c) : func {std::forward(c)} { +cudaTask cudaGraphBase::host(C&& callable, void* user_data) { + + cudaGraphNode_t node; + cudaHostNodeParams p {callable, user_data}; + + TF_CHECK_CUDA( + cudaGraphAddHostNode(&node, this->get(), nullptr, 0, &p), + "failed to create a host node" + ); + + return cudaTask(this->get(), node); } -// Host callback -inline void cudaFlowNode::Host::callback(void* data) { - static_cast(data)->func(); -}; +// Function: kernel +template +template +cudaTask cudaGraphBase::kernel( + dim3 g, dim3 b, size_t s, F f, ArgsT... args +) { -// Kernel handle constructor -template -cudaFlowNode::Kernel::Kernel(F&& f) : - func {std::forward(f)} { + cudaGraphNode_t node; + cudaKernelNodeParams p; + + void* arguments[sizeof...(ArgsT)] = { (void*)(&args)... }; + + p.func = (void*)f; + p.gridDim = g; + p.blockDim = b; + p.sharedMemBytes = s; + p.kernelParams = arguments; + p.extra = nullptr; + + TF_CHECK_CUDA( + cudaGraphAddKernelNode(&node, this->get(), nullptr, 0, &p), + "failed to create a kernel task" + ); + + return cudaTask(this->get(), node); } -// Capture handle constructor -template -cudaFlowNode::Capture::Capture(C&& c) : - work {std::forward(c)} { +// Function: zero +template +template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* +> +cudaTask cudaGraphBase::zero(T* dst, size_t count) { + + cudaGraphNode_t node; + auto p = cuda_get_zero_parms(dst, count); + + TF_CHECK_CUDA( + cudaGraphAddMemsetNode(&node, this->get(), nullptr, 0, &p), + "failed to create a memset (zero) task" + ); + + return cudaTask(this->get(), node); } -// Constructor -template -cudaFlowNode::cudaFlowNode(cudaFlowGraph& graph, ArgsT&&... args) : - _cfg {graph}, - _handle {std::forward(args)...} { +// Function: fill +template +template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* +> +cudaTask cudaGraphBase::fill(T* dst, T value, size_t count) { + + cudaGraphNode_t node; + auto p = cuda_get_fill_parms(dst, value, count); + TF_CHECK_CUDA( + cudaGraphAddMemsetNode(&node, this->get(), nullptr, 0, &p), + "failed to create a memset (fill) task" + ); + + return cudaTask(this->get(), node); } -// Procedure: _precede -inline void cudaFlowNode::_precede(cudaFlowNode* v) { +// Function: copy +template +template < + typename T, + std::enable_if_t, void>* +> +cudaTask cudaGraphBase::copy(T* tgt, const T* src, size_t num) { - _cfg._state |= cudaFlowGraph::CHANGED; + cudaGraphNode_t node; + auto p = cuda_get_copy_parms(tgt, src, num); - _successors.push_back(v); - v->_dependents.push_back(this); + TF_CHECK_CUDA( + cudaGraphAddMemcpyNode(&node, this->get(), nullptr, 0, &p), + "failed to create a memcpy (copy) task" + ); - // capture node doesn't have the native graph yet - if(_handle.index() != cudaFlowNode::CAPTURE) { - TF_CHECK_CUDA( - cudaGraphAddDependencies( - _cfg._native_handle, &_native_handle, &v->_native_handle, 1 - ), - "failed to add a preceding link ", this, "->", v - ); - } + return cudaTask(this->get(), node); } -// ---------------------------------------------------------------------------- -// cudaGraph definitions -// ---------------------------------------------------------------------------- +// Function: memset +template +cudaTask cudaGraphBase::memset(void* dst, int ch, size_t count) { -// Function: empty -inline bool cudaFlowGraph::empty() const { - return _nodes.empty(); -} - -// Procedure: clear -inline void cudaFlowGraph::clear() { - _state |= cudaFlowGraph::CHANGED; - _nodes.clear(); - _native_handle.clear(); -} - -// Function: emplace_back -template -cudaFlowNode* cudaFlowGraph::emplace_back(ArgsT&&... args) { - - _state |= cudaFlowGraph::CHANGED; - - auto node = std::make_unique(std::forward(args)...); - _nodes.emplace_back(std::move(node)); - return _nodes.back().get(); - - // TODO: use object pool to save memory - //auto node = new cudaFlowNode(std::forward(args)...); - //_nodes.push_back(node); - //return node; -} - -// Procedure: dump the graph to a DOT format -inline void cudaFlowGraph::dump( - std::ostream& os, const void* root, const std::string& root_name -) const { - - // recursive dump with stack - std::stack> stack; - stack.push(std::make_tuple(this, nullptr, 1)); - - int pl = 0; - - while(!stack.empty()) { - - auto [graph, parent, l] = stack.top(); - stack.pop(); - - for(int i=0; i_name.empty()) os << 'p' << parent; - else os << parent->_name; - os << "\";\n" << "color=\"purple\"\n"; - } - - for(auto& node : graph->_nodes) { - - auto v = node.get(); - - os << 'p' << v << "[label=\""; - if(v->_name.empty()) { - os << 'p' << v << "\""; - } - else { - os << v->_name << "\""; - } - - switch(v->_handle.index()) { - case cudaFlowNode::KERNEL: - os << " style=\"filled\"" - << " color=\"white\" fillcolor=\"black\"" - << " fontcolor=\"white\"" - << " shape=\"box3d\""; - break; - - case cudaFlowNode::SUBFLOW: - stack.push(std::make_tuple( - &(std::get_if(&v->_handle)->cfg), v, l+1) - ); - os << " style=\"filled\"" - << " color=\"black\" fillcolor=\"purple\"" - << " fontcolor=\"white\"" - << " shape=\"folder\""; - break; - - default: - break; - } - - os << "];\n"; - - for(const auto s : v->_successors) { - os << 'p' << v << " -> " << 'p' << s << ";\n"; - } - - if(v->_successors.size() == 0) { - if(parent == nullptr) { - if(root) { - os << 'p' << v << " -> p" << root << ";\n"; - } - } - else { - os << 'p' << v << " -> p" << parent << ";\n"; - } - } - } - - // set the previous level - pl = l; - } + cudaGraphNode_t node; + auto p = cuda_get_memset_parms(dst, ch, count); - for(int i=0; iget(), nullptr, 0, &p), + "failed to create a memset task" + ); + return cudaTask(this->get(), node); } +// Function: memcpy +template +cudaTask cudaGraphBase::memcpy(void* tgt, const void* src, size_t bytes) { + + cudaGraphNode_t node; + auto p = cuda_get_memcpy_parms(tgt, src, bytes); + + TF_CHECK_CUDA( + cudaGraphAddMemcpyNode(&node, this->get(), nullptr, 0, &p), + "failed to create a memcpy task" + ); + + return cudaTask(this->get(), node); +} + + + + } // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-3.10.0/taskflow/cuda/cuda_graph_exec.hpp b/bundled/taskflow-3.10.0/taskflow/cuda/cuda_graph_exec.hpp new file mode 100644 index 0000000000..e829fe8b2b --- /dev/null +++ b/bundled/taskflow-3.10.0/taskflow/cuda/cuda_graph_exec.hpp @@ -0,0 +1,378 @@ +#pragma once + +#include "cuda_graph.hpp" + + +namespace tf { + +// ---------------------------------------------------------------------------- +// cudaGraphExec +// ---------------------------------------------------------------------------- + +/** +@struct cudaGraphExecCreator +@brief a functor for creating an executable CUDA graph + +This structure provides an overloaded function call operator to create a +new executable CUDA graph using `cudaGraphCreate`. +*/ +struct cudaGraphExecCreator { + /** + @brief returns a null executable CUDA graph + */ + cudaGraphExec_t operator () () const { + return nullptr; + } + + /** + @brief returns the given executable graph + */ + cudaGraphExec_t operator () (cudaGraphExec_t exec) const { + return exec; + } + + /** + @brief returns a newly instantiated executable graph from the given CUDA graph + */ + cudaGraphExec_t operator () (cudaGraph_t graph) const { + cudaGraphExec_t exec; + TF_CHECK_CUDA( + cudaGraphInstantiate(&exec, graph, nullptr, nullptr, 0), + "failed to create an executable graph" + ); + return exec; + } + + /** + @brief returns a newly instantiated executable graph from the given CUDA graph + */ + template + cudaGraphExec_t operator () (const cudaGraphBase& graph) const { + return this->operator()(graph.get()); + } +}; + +/** +@struct cudaGraphExecDeleter +@brief a functor for deleting an executable CUDA graph + +This structure provides an overloaded function call operator to safely +destroy a CUDA graph using `cudaGraphDestroy`. +*/ +struct cudaGraphExecDeleter { + /** + * @brief deletes an executable CUDA graph + * + * Calls `cudaGraphDestroy` to release the CUDA graph resource if it is valid. + * + * @param executable the executable CUDA graph to be destroyed + */ + void operator () (cudaGraphExec_t executable) const { + cudaGraphExecDestroy(executable); + } +}; + +/** +@class cudaGraphExecBase + +@brief class to create an executable CUDA graph managed by C++ smart pointer + +@tparam Creator functor to create the stream (used in constructor) +@tparam Deleter functor to delete the stream (used in destructor) + +This class wraps a `cudaGraphExec_t` handle with `std::unique_ptr` to ensure proper +resource management and automatic cleanup. +*/ +template +class cudaGraphExecBase : public std::unique_ptr, Deleter> { + + static_assert(std::is_pointer_v, "cudaGraphExec_t is not a pointer type"); + + public: + + /** + @brief base std::unique_ptr type + */ + using base_type = std::unique_ptr, Deleter>; + + /** + @brief constructs a `cudaGraphExec` object by passing the given arguments to the executable CUDA graph creator + + Constructs a `cudaGraphExec` object by passing the given arguments to the executable CUDA graph creator + + @param args arguments to pass to the executable CUDA graph creator + */ + template + explicit cudaGraphExecBase(ArgsT&& ... args) : base_type( + Creator{}(std::forward(args)...), Deleter() + ) {} + + /** + @brief constructs a `cudaGraphExec` from the given rhs using move semantics + */ + cudaGraphExecBase(cudaGraphExecBase&&) = default; + + /** + @brief assign the rhs to `*this` using move semantics + */ + cudaGraphExecBase& operator = (cudaGraphExecBase&&) = default; + + // ---------------------------------------------------------------------------------------------- + // Update Methods + // ---------------------------------------------------------------------------------------------- + + /** + @brief updates parameters of a host task + + This method updates the parameter of the given host task (similar to tf::cudaFlow::host). + */ + template + void host(cudaTask task, C&& callable, void* user_data); + + /** + @brief updates parameters of a kernel task + + The method is similar to tf::cudaFlow::kernel but operates on a task + of type tf::cudaTaskType::KERNEL. + The kernel function name must NOT change. + */ + template + void kernel( + cudaTask task, dim3 g, dim3 b, size_t shm, F f, ArgsT... args + ); + + /** + @brief updates parameters of a memset task + + The method is similar to tf::cudaFlow::memset but operates on a task + of type tf::cudaTaskType::MEMSET. + The source/destination memory may have different address values but + must be allocated from the same contexts as the original + source/destination memory. + */ + void memset(cudaTask task, void* dst, int ch, size_t count); + + /** + @brief updates parameters of a memcpy task + + The method is similar to tf::cudaFlow::memcpy but operates on a task + of type tf::cudaTaskType::MEMCPY. + The source/destination memory may have different address values but + must be allocated from the same contexts as the original + source/destination memory. + */ + void memcpy(cudaTask task, void* tgt, const void* src, size_t bytes); + + /** + @brief updates parameters of a memset task to a zero task + + The method is similar to tf::cudaFlow::zero but operates on + a task of type tf::cudaTaskType::MEMSET. + + The source/destination memory may have different address values but + must be allocated from the same contexts as the original + source/destination memory. + */ + template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr + > + void zero(cudaTask task, T* dst, size_t count); + + /** + @brief updates parameters of a memset task to a fill task + + The method is similar to tf::cudaFlow::fill but operates on a task + of type tf::cudaTaskType::MEMSET. + + The source/destination memory may have different address values but + must be allocated from the same contexts as the original + source/destination memory. + */ + template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr + > + void fill(cudaTask task, T* dst, T value, size_t count); + + /** + @brief updates parameters of a memcpy task to a copy task + + The method is similar to tf::cudaFlow::copy but operates on a task + of type tf::cudaTaskType::MEMCPY. + The source/destination memory may have different address values but + must be allocated from the same contexts as the original + source/destination memory. + */ + template , void>* = nullptr + > + void copy(cudaTask task, T* tgt, const T* src, size_t num); + + //--------------------------------------------------------------------------- + // Algorithm Primitives + //--------------------------------------------------------------------------- + + /** + @brief updates a single-threaded kernel task + + This method is similar to cudaFlow::single_task but operates + on an existing task. + */ + template + void single_task(cudaTask task, C c); + + /** + @brief updates parameters of a `for_each` kernel task created from the CUDA graph of `*this` + */ + template + void for_each(cudaTask task, I first, I last, C callable); + + /** + @brief updates parameters of a `for_each_index` kernel task created from the CUDA graph of `*this` + */ + template + void for_each_index(cudaTask task, I first, I last, I step, C callable); + + /** + @brief updates parameters of a `transform` kernel task created from the CUDA graph of `*this` + */ + template + void transform(cudaTask task, I first, I last, O output, C c); + + /** + @brief updates parameters of a `transform` kernel task created from the CUDA graph of `*this` + */ + template + void transform(cudaTask task, I1 first1, I1 last1, I2 first2, O output, C c); + + + private: + + cudaGraphExecBase(const cudaGraphExecBase&) = delete; + + cudaGraphExecBase& operator = (const cudaGraphExecBase&) = delete; +}; + +// ------------------------------------------------------------------------------------------------ +// update methods +// ------------------------------------------------------------------------------------------------ + +// Function: host +template +template +void cudaGraphExecBase::host(cudaTask task, C&& func, void* user_data) { + cudaHostNodeParams p {func, user_data}; + TF_CHECK_CUDA( + cudaGraphExecHostNodeSetParams(this->get(), task._native_node, &p), + "failed to update kernel parameters on ", task + ); +} + +// Function: update kernel parameters +template +template +void cudaGraphExecBase::kernel( + cudaTask task, dim3 g, dim3 b, size_t s, F f, ArgsT... args +) { + cudaKernelNodeParams p; + + void* arguments[sizeof...(ArgsT)] = { (void*)(&args)... }; + p.func = (void*)f; + p.gridDim = g; + p.blockDim = b; + p.sharedMemBytes = s; + p.kernelParams = arguments; + p.extra = nullptr; + + TF_CHECK_CUDA( + cudaGraphExecKernelNodeSetParams(this->get(), task._native_node, &p), + "failed to update kernel parameters on ", task + ); +} + +// Function: update copy parameters +template +template , void>*> +void cudaGraphExecBase::copy(cudaTask task, T* tgt, const T* src, size_t num) { + auto p = cuda_get_copy_parms(tgt, src, num); + TF_CHECK_CUDA( + cudaGraphExecMemcpyNodeSetParams(this->get(), task._native_node, &p), + "failed to update memcpy parameters on ", task + ); +} + +// Function: update memcpy parameters +template +void cudaGraphExecBase::memcpy( + cudaTask task, void* tgt, const void* src, size_t bytes +) { + auto p = cuda_get_memcpy_parms(tgt, src, bytes); + + TF_CHECK_CUDA( + cudaGraphExecMemcpyNodeSetParams(this->get(), task._native_node, &p), + "failed to update memcpy parameters on ", task + ); +} + +// Procedure: memset +template +void cudaGraphExecBase::memset(cudaTask task, void* dst, int ch, size_t count) { + auto p = cuda_get_memset_parms(dst, ch, count); + TF_CHECK_CUDA( + cudaGraphExecMemsetNodeSetParams(this->get(), task._native_node, &p), + "failed to update memset parameters on ", task + ); +} + +// Procedure: fill +template +template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* +> +void cudaGraphExecBase::fill(cudaTask task, T* dst, T value, size_t count) { + auto p = cuda_get_fill_parms(dst, value, count); + TF_CHECK_CUDA( + cudaGraphExecMemsetNodeSetParams(this->get(), task._native_node, &p), + "failed to update memset parameters on ", task + ); +} + +// Procedure: zero +template +template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* +> +void cudaGraphExecBase::zero(cudaTask task, T* dst, size_t count) { + auto p = cuda_get_zero_parms(dst, count); + TF_CHECK_CUDA( + cudaGraphExecMemsetNodeSetParams(this->get(), task._native_node, &p), + "failed to update memset parameters on ", task + ); +} + +//------------------------------------------------------------------------------------------------- +// forward declaration +//------------------------------------------------------------------------------------------------- + +/** +@private +*/ +template +cudaStreamBase& cudaStreamBase::run(cudaGraphExec_t exec) { + TF_CHECK_CUDA( + cudaGraphLaunch(exec, this->get()), "failed to launch a CUDA executable graph" + ); + return *this; +} + +/** +@private +*/ +template +template +cudaStreamBase& cudaStreamBase::run(const cudaGraphExecBase& exec) { + return run(exec.get()); +} + + + +} // end of namespace tf ------------------------------------------------------------------------- diff --git a/bundled/taskflow-3.10.0/taskflow/cuda/cuda_stream.hpp b/bundled/taskflow-3.10.0/taskflow/cuda/cuda_stream.hpp index 1e312605be..e20eab09d6 100644 --- a/bundled/taskflow-3.10.0/taskflow/cuda/cuda_stream.hpp +++ b/bundled/taskflow-3.10.0/taskflow/cuda/cuda_stream.hpp @@ -1,6 +1,6 @@ #pragma once -#include "cuda_object.hpp" +#include "cuda_error.hpp" /** @file cuda_stream.hpp @@ -10,217 +10,328 @@ namespace tf { +// ---------------------------------------------------------------------------- +// cudaEventBase +// ---------------------------------------------------------------------------- + +/** +@struct cudaEventCreator + +@brief functor to create a `cudaEvent_t` object +*/ +struct cudaEventCreator { + + /** + @brief creates a new `cudaEvent_t` object using `cudaEventCreate` + */ + cudaEvent_t operator () () const { + cudaEvent_t event; + TF_CHECK_CUDA(cudaEventCreate(&event), "failed to create a CUDA event"); + return event; + } + + /** + @brief creates a new `cudaEvent_t` object using `cudaEventCreate` with the given `flag` + */ + cudaEvent_t operator () (unsigned int flag) const { + cudaEvent_t event; + TF_CHECK_CUDA( + cudaEventCreateWithFlags(&event, flag), + "failed to create a CUDA event with flag=", flag + ); + return event; + } + + /** + @brief returns the given `cudaEvent_t` object + */ + cudaEvent_t operator () (cudaEvent_t event) const { + return event; + } +}; + +/** +@struct cudaEventDeleter + +@brief functor to delete a `cudaEvent_t` object +*/ +struct cudaEventDeleter { + + /** + @brief deletes the given `cudaEvent_t` object using `cudaEventDestroy` + */ + void operator () (cudaEvent_t event) const { + cudaEventDestroy(event); + } +}; + +/** +@class cudaEventBase + +@brief class to create a smart pointer wrapper for managing `cudaEvent_t` + +@tparam Creator functor to create the stream (used in constructor) +@tparam Deleter functor to delete the stream (used in destructor) + +The `cudaEventBase` class encapsulates a `cudaEvent_t` using `std::unique_ptr`, ensuring that +CUDA events are properly created and destroyed with a unique ownership. +*/ +template +class cudaEventBase : public std::unique_ptr, Deleter> { + + static_assert(std::is_pointer_v, "cudaEvent_t is not a pointer type"); + + public: + + /** + @brief base type for the underlying unique pointer + + This alias provides a shorthand for the underlying `std::unique_ptr` type that manages + CUDA event resources with an associated deleter. + */ + using base_type = std::unique_ptr, Deleter>; + + /** + @brief constructs a `cudaEvent` object by passing the given arguments to the event creator + + Constructs a `cudaEvent` object by passing the given arguments to the event creator + + @param args arguments to pass to the event creator + */ + template + explicit cudaEventBase(ArgsT&& ... args) : base_type( + Creator{}(std::forward(args)...), Deleter() + ) { + } + + /** + @brief constructs a `cudaEvent` from the given rhs using move semantics + */ + cudaEventBase(cudaEventBase&&) = default; + + /** + @brief assign the rhs to `*this` using move semantics + */ + cudaEventBase& operator = (cudaEventBase&&) = default; + + private: + + cudaEventBase(const cudaEventBase&) = delete; + cudaEventBase& operator = (const cudaEventBase&) = delete; +}; + +/** +@brief default smart pointer type to manage a `cudaEvent_t` object with unique ownership +*/ +using cudaEvent = cudaEventBase; // ---------------------------------------------------------------------------- // cudaStream // ---------------------------------------------------------------------------- /** -@private +@struct cudaStreamCreator + +@brief functor to create a `cudaStream_t` object */ struct cudaStreamCreator { + + /** + @brief constructs a new `cudaStream_t` object using `cudaStreamCreate` + */ cudaStream_t operator () () const { cudaStream_t stream; TF_CHECK_CUDA(cudaStreamCreate(&stream), "failed to create a CUDA stream"); return stream; } + + /** + @brief returns the given `cudaStream_t` object + */ + cudaStream_t operator () (cudaStream_t stream) const { + return stream; + } }; /** -@private +@struct cudaStreamDeleter + +@brief functor to delete a `cudaStream_t` object */ struct cudaStreamDeleter { + + /** + @brief deletes the given `cudaStream_t` object + */ void operator () (cudaStream_t stream) const { - if(stream) { - cudaStreamDestroy(stream); - } + cudaStreamDestroy(stream); } }; /** -@class cudaStream +@class cudaStreamBase -@brief class to create an RAII-styled wrapper over a native CUDA stream +@brief class to create a smart pointer wrapper for managing `cudaStream_t` -A cudaStream object is an RAII-styled wrapper over a native CUDA stream -(@c cudaStream_t). -A cudaStream object is move-only. +@tparam Creator functor to create the stream (used in constructor) +@tparam Deleter functor to delete the stream (used in destructor) + +The `cudaStream` class encapsulates a `cudaStream_t` using `std::unique_ptr`, ensuring that +CUDA events are properly created and destroyed with a unique ownership. */ -class cudaStream : +template +class cudaStreamBase : public std::unique_ptr, Deleter> { - public cudaObject { + static_assert(std::is_pointer_v, "cudaStream_t is not a pointer type"); public: - - /** - @brief constructs an RAII-styled object from the given CUDA stream - - Constructs a cudaStream object which owns @c stream. - */ - explicit cudaStream(cudaStream_t stream) : cudaObject(stream) { - } - - /** - @brief default constructor - */ - cudaStream() = default; - - /** - @brief synchronizes the associated stream - - Equivalently calling @c cudaStreamSynchronize to block - until this stream has completed all operations. - */ - void synchronize() const { - TF_CHECK_CUDA( - cudaStreamSynchronize(object), "failed to synchronize a CUDA stream" - ); - } - - /** - @brief begins graph capturing on the stream - - When a stream is in capture mode, all operations pushed into the stream - will not be executed, but will instead be captured into a graph, - which will be returned via cudaStream::end_capture. - - A thread's mode can be one of the following: - + @c cudaStreamCaptureModeGlobal: This is the default mode. - If the local thread has an ongoing capture sequence that was not initiated - with @c cudaStreamCaptureModeRelaxed at @c cuStreamBeginCapture, - or if any other thread has a concurrent capture sequence initiated with - @c cudaStreamCaptureModeGlobal, this thread is prohibited from potentially - unsafe API calls. - - + @c cudaStreamCaptureModeThreadLocal: If the local thread has an ongoing capture - sequence not initiated with @c cudaStreamCaptureModeRelaxed, - it is prohibited from potentially unsafe API calls. - Concurrent capture sequences in other threads are ignored. - - + @c cudaStreamCaptureModeRelaxed: The local thread is not prohibited - from potentially unsafe API calls. Note that the thread is still prohibited - from API calls which necessarily conflict with stream capture, for example, - attempting @c cudaEventQuery on an event that was last recorded - inside a capture sequence. - */ - void begin_capture(cudaStreamCaptureMode m = cudaStreamCaptureModeGlobal) const { - TF_CHECK_CUDA( - cudaStreamBeginCapture(object, m), - "failed to begin capture on stream ", object, " with thread mode ", m - ); - } - - /** - @brief ends graph capturing on the stream - - Equivalently calling @c cudaStreamEndCapture to - end capture on stream and returning the captured graph. - Capture must have been initiated on stream via a call to cudaStream::begin_capture. - If capture was invalidated, due to a violation of the rules of stream capture, - then a NULL graph will be returned. - */ - cudaGraph_t end_capture() const { - cudaGraph_t native_g; - TF_CHECK_CUDA( - cudaStreamEndCapture(object, &native_g), - "failed to end capture on stream ", object - ); - return native_g; - } - - /** - @brief records an event on the stream - - Equivalently calling @c cudaEventRecord to record an event on this stream, - both of which must be on the same CUDA context. - */ - void record(cudaEvent_t event) const { - TF_CHECK_CUDA( - cudaEventRecord(event, object), - "failed to record event ", event, " on stream ", object - ); - } - - /** - @brief waits on an event - - Equivalently calling @c cudaStreamWaitEvent to make all future work - submitted to stream wait for all work captured in event. - */ - void wait(cudaEvent_t event) const { - TF_CHECK_CUDA( - cudaStreamWaitEvent(object, event, 0), - "failed to wait for event ", event, " on stream ", object - ); - } -}; - -// ---------------------------------------------------------------------------- -// cudaEvent -// ---------------------------------------------------------------------------- -/** -@private -*/ -struct cudaEventCreator { + /** + @brief base type for the underlying unique pointer + + This alias provides a shorthand for the underlying `std::unique_ptr` type that manages + CUDA stream resources with an associated deleter. + */ + using base_type = std::unique_ptr, Deleter>; + + /** + @brief constructs a `cudaStream` object by passing the given arguments to the stream creator + + Constructs a `cudaStream` object by passing the given arguments to the stream creator + + @param args arguments to pass to the stream creator + */ + template + explicit cudaStreamBase(ArgsT&& ... args) : base_type( + Creator{}(std::forward(args)...), Deleter() + ) { + } + + /** + @brief constructs a `cudaStream` from the given rhs using move semantics + */ + cudaStreamBase(cudaStreamBase&&) = default; + + /** + @brief assign the rhs to `*this` using move semantics + */ + cudaStreamBase& operator = (cudaStreamBase&&) = default; + + /** + @brief synchronizes the associated stream - cudaEvent_t operator () () const { - cudaEvent_t event; - TF_CHECK_CUDA(cudaEventCreate(&event), "failed to create a CUDA event"); - return event; + Equivalently calling @c cudaStreamSynchronize to block + until this stream has completed all operations. + */ + cudaStreamBase& synchronize() { + TF_CHECK_CUDA( + cudaStreamSynchronize(this->get()), "failed to synchronize a CUDA stream" + ); + return *this; } - cudaEvent_t operator () (unsigned int flag) const { - cudaEvent_t event; + /** + @brief begins graph capturing on the stream + + When a stream is in capture mode, all operations pushed into the stream + will not be executed, but will instead be captured into a graph, + which will be returned via cudaStream::end_capture. + + A thread's mode can be one of the following: + + @c cudaStreamCaptureModeGlobal: This is the default mode. + If the local thread has an ongoing capture sequence that was not initiated + with @c cudaStreamCaptureModeRelaxed at @c cuStreamBeginCapture, + or if any other thread has a concurrent capture sequence initiated with + @c cudaStreamCaptureModeGlobal, this thread is prohibited from potentially + unsafe API calls. + + + @c cudaStreamCaptureModeThreadLocal: If the local thread has an ongoing capture + sequence not initiated with @c cudaStreamCaptureModeRelaxed, + it is prohibited from potentially unsafe API calls. + Concurrent capture sequences in other threads are ignored. + + + @c cudaStreamCaptureModeRelaxed: The local thread is not prohibited + from potentially unsafe API calls. Note that the thread is still prohibited + from API calls which necessarily conflict with stream capture, for example, + attempting @c cudaEventQuery on an event that was last recorded + inside a capture sequence. + */ + void begin_capture(cudaStreamCaptureMode m = cudaStreamCaptureModeGlobal) const { TF_CHECK_CUDA( - cudaEventCreateWithFlags(&event, flag), - "failed to create a CUDA event with flag=", flag + cudaStreamBeginCapture(this->get(), m), + "failed to begin capture on stream ", this->get(), " with thread mode ", m ); - return event; } -}; -/** -@private -*/ -struct cudaEventDeleter { - void operator () (cudaEvent_t event) const { - if (event != nullptr) { - cudaEventDestroy(event); - } + /** + @brief ends graph capturing on the stream + + Equivalently calling @c cudaStreamEndCapture to + end capture on stream and returning the captured graph. + Capture must have been initiated on stream via a call to cudaStream::begin_capture. + If capture was invalidated, due to a violation of the rules of stream capture, + then a NULL graph will be returned. + */ + cudaGraph_t end_capture() const { + cudaGraph_t native_g; + TF_CHECK_CUDA( + cudaStreamEndCapture(this->get(), &native_g), + "failed to end capture on stream ", this->get() + ); + return native_g; } -}; + + /** + @brief records an event on the stream -/** -@class cudaEvent + Equivalently calling @c cudaEventRecord to record an event on this stream, + both of which must be on the same CUDA context. + */ + void record(cudaEvent_t event) const { + TF_CHECK_CUDA( + cudaEventRecord(event, this->get()), + "failed to record event ", event, " on stream ", this->get() + ); + } -@brief class to create an RAII-styled wrapper over a native CUDA event + /** + @brief waits on an event -A cudaEvent object is an RAII-styled wrapper over a native CUDA event -(@c cudaEvent_t). -A cudaEvent object is move-only. -*/ -class cudaEvent : - public cudaObject { + Equivalently calling @c cudaStreamWaitEvent to make all future work + submitted to stream wait for all work captured in event. + */ + void wait(cudaEvent_t event) const { + TF_CHECK_CUDA( + cudaStreamWaitEvent(this->get(), event, 0), + "failed to wait for event ", event, " on stream ", this->get() + ); + } - public: + /** + @brief runs the given executable CUDA graph - /** - @brief constructs an RAII-styled CUDA event object from the given CUDA event - */ - explicit cudaEvent(cudaEvent_t event) : cudaObject(event) { } + @param exec the given `cudaGraphExec` + */ + template + cudaStreamBase& run(const cudaGraphExecBase& exec); - /** - @brief constructs an RAII-styled CUDA event object - */ - cudaEvent() = default; - - /** - @brief constructs an RAII-styled CUDA event object with the given flag - */ - explicit cudaEvent(unsigned int flag) : cudaObject(cudaEventCreator{}(flag)) { } + /** + @brief runs the given executable CUDA graph + + @param exec the given `cudaGraphExec_t` + */ + cudaStreamBase& run(cudaGraphExec_t exec); + + private: + + cudaStreamBase(const cudaStreamBase&) = delete; + cudaStreamBase& operator = (const cudaStreamBase&) = delete; }; +/** +@brief default smart pointer type to manage a `cudaStream_t` object with unique ownership +*/ +using cudaStream = cudaStreamBase; } // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-3.10.0/taskflow/cuda/cudaflow.hpp b/bundled/taskflow-3.10.0/taskflow/cuda/cudaflow.hpp index 61d5c84dc3..770de9c15a 100644 --- a/bundled/taskflow-3.10.0/taskflow/cuda/cudaflow.hpp +++ b/bundled/taskflow-3.10.0/taskflow/cuda/cudaflow.hpp @@ -1,8 +1,9 @@ #pragma once #include "../taskflow.hpp" -#include "cuda_task.hpp" -#include "cuda_capturer.hpp" +#include "cuda_graph.hpp" +#include "cuda_graph_exec.hpp" +#include "algorithm/single_task.hpp" /** @file taskflow/cuda/cudaflow.hpp @@ -11,1013 +12,15 @@ namespace tf { -// ---------------------------------------------------------------------------- -// class definition: cudaFlow -// ---------------------------------------------------------------------------- - /** -@class cudaFlow - -@brief class to create a %cudaFlow task dependency graph - -A %cudaFlow is a high-level interface over CUDA Graph to perform GPU operations -using the task dependency graph model. -The class provides a set of methods for creating and launch different tasks -on one or multiple CUDA devices, -for instance, kernel tasks, data transfer tasks, and memory operation tasks. -The following example creates a %cudaFlow of two kernel tasks, @c task1 and -@c task2, where @c task1 runs before @c task2. - -@code{.cpp} -tf::Taskflow taskflow; -tf::Executor executor; - -taskflow.emplace([&](tf::cudaFlow& cf){ - // create two kernel tasks - tf::cudaTask task1 = cf.kernel(grid1, block1, shm_size1, kernel1, args1); - tf::cudaTask task2 = cf.kernel(grid2, block2, shm_size2, kernel2, args2); - - // kernel1 runs before kernel2 - task1.precede(task2); -}); - -executor.run(taskflow).wait(); -@endcode - -A %cudaFlow is a task (tf::Task) created from tf::Taskflow -and will be run by @em one worker thread in the executor. -That is, the callable that describes a %cudaFlow -will be executed sequentially. -Inside a %cudaFlow task, different GPU tasks (tf::cudaTask) may run -in parallel scheduled by the CUDA runtime. - -Please refer to @ref GPUTaskingcudaFlow for details. +@brief default smart pointer type to manage a `cudaGraph_t` object with unique ownership */ -class cudaFlow { - - public: - - /** - @brief constructs a %cudaFlow - */ - cudaFlow(); - - /** - @brief destroys the %cudaFlow and its associated native CUDA graph - and executable graph - */ - ~cudaFlow() = default; - - /** - @brief default move constructor - */ - cudaFlow(cudaFlow&&) = default; - - /** - @brief default move assignment operator - */ - cudaFlow& operator = (cudaFlow&&) = default; - - /** - @brief queries the emptiness of the graph - */ - bool empty() const; - - /** - @brief queries the number of tasks - */ - size_t num_tasks() const; - - /** - @brief clears the %cudaFlow object - */ - void clear(); - - /** - @brief dumps the %cudaFlow graph into a DOT format through an - output stream - */ - void dump(std::ostream& os) const; - - /** - @brief dumps the native CUDA graph into a DOT format through an - output stream - - The native CUDA graph may be different from the upper-level %cudaFlow - graph when flow capture is involved. - */ - void dump_native_graph(std::ostream& os) const; - - // ------------------------------------------------------------------------ - // Graph building routines - // ------------------------------------------------------------------------ - - /** - @brief creates a no-operation task - - @return a tf::cudaTask handle - - An empty node performs no operation during execution, - but can be used for transitive ordering. - For example, a phased execution graph with 2 groups of @c n nodes - with a barrier between them can be represented using an empty node - and @c 2*n dependency edges, - rather than no empty node and @c n^2 dependency edges. - */ - cudaTask noop(); - - /** - @brief creates a host task that runs a callable on the host - - @tparam C callable type - - @param callable a callable object with neither arguments nor return - (i.e., constructible from @c std::function) - - @return a tf::cudaTask handle - - A host task can only execute CPU-specific functions and cannot do any CUDA calls - (e.g., @c cudaMalloc). - */ - template - cudaTask host(C&& callable); - - /** - @brief updates parameters of a host task - - The method is similar to tf::cudaFlow::host but operates on a task - of type tf::cudaTaskType::HOST. - */ - template - void host(cudaTask task, C&& callable); - - /** - @brief creates a kernel task - - @tparam F kernel function type - @tparam ArgsT kernel function parameters type - - @param g configured grid - @param b configured block - @param s configured shared memory size in bytes - @param f kernel function - @param args arguments to forward to the kernel function by copy - - @return a tf::cudaTask handle - */ - template - cudaTask kernel(dim3 g, dim3 b, size_t s, F f, ArgsT... args); - - /** - @brief updates parameters of a kernel task - - The method is similar to tf::cudaFlow::kernel but operates on a task - of type tf::cudaTaskType::KERNEL. - The kernel function name must NOT change. - */ - template - void kernel( - cudaTask task, dim3 g, dim3 b, size_t shm, F f, ArgsT... args - ); - - /** - @brief creates a memset task that fills untyped data with a byte value - - @param dst pointer to the destination device memory area - @param v value to set for each byte of specified memory - @param count size in bytes to set - - @return a tf::cudaTask handle - - A memset task fills the first @c count bytes of device memory area - pointed by @c dst with the byte value @c v. - */ - cudaTask memset(void* dst, int v, size_t count); - - /** - @brief updates parameters of a memset task - - The method is similar to tf::cudaFlow::memset but operates on a task - of type tf::cudaTaskType::MEMSET. - The source/destination memory may have different address values but - must be allocated from the same contexts as the original - source/destination memory. - */ - void memset(cudaTask task, void* dst, int ch, size_t count); - - /** - @brief creates a memcpy task that copies untyped data in bytes - - @param tgt pointer to the target memory block - @param src pointer to the source memory block - @param bytes bytes to copy - - @return a tf::cudaTask handle - - A memcpy task transfers @c bytes of data from a source location - to a target location. Direction can be arbitrary among CPUs and GPUs. - */ - cudaTask memcpy(void* tgt, const void* src, size_t bytes); - - /** - @brief updates parameters of a memcpy task - - The method is similar to tf::cudaFlow::memcpy but operates on a task - of type tf::cudaTaskType::MEMCPY. - The source/destination memory may have different address values but - must be allocated from the same contexts as the original - source/destination memory. - */ - void memcpy(cudaTask task, void* tgt, const void* src, size_t bytes); - - /** - @brief creates a memset task that sets a typed memory block to zero - - @tparam T element type (size of @c T must be either 1, 2, or 4) - @param dst pointer to the destination device memory area - @param count number of elements - - @return a tf::cudaTask handle - - A zero task zeroes the first @c count elements of type @c T - in a device memory area pointed by @c dst. - */ - template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr - > - cudaTask zero(T* dst, size_t count); - - /** - @brief updates parameters of a memset task to a zero task - - The method is similar to tf::cudaFlow::zero but operates on - a task of type tf::cudaTaskType::MEMSET. - - The source/destination memory may have different address values but - must be allocated from the same contexts as the original - source/destination memory. - */ - template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr - > - void zero(cudaTask task, T* dst, size_t count); - - /** - @brief creates a memset task that fills a typed memory block with a value - - @tparam T element type (size of @c T must be either 1, 2, or 4) - - @param dst pointer to the destination device memory area - @param value value to fill for each element of type @c T - @param count number of elements - - @return a tf::cudaTask handle - - A fill task fills the first @c count elements of type @c T with @c value - in a device memory area pointed by @c dst. - The value to fill is interpreted in type @c T rather than byte. - */ - template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr - > - cudaTask fill(T* dst, T value, size_t count); - - /** - @brief updates parameters of a memset task to a fill task - - The method is similar to tf::cudaFlow::fill but operates on a task - of type tf::cudaTaskType::MEMSET. - - The source/destination memory may have different address values but - must be allocated from the same contexts as the original - source/destination memory. - */ - template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr - > - void fill(cudaTask task, T* dst, T value, size_t count); - - /** - @brief creates a memcopy task that copies typed data - - @tparam T element type (non-void) - - @param tgt pointer to the target memory block - @param src pointer to the source memory block - @param num number of elements to copy - - @return a tf::cudaTask handle - - A copy task transfers num*sizeof(T) bytes of data from a source location - to a target location. Direction can be arbitrary among CPUs and GPUs. - */ - template , void>* = nullptr - > - cudaTask copy(T* tgt, const T* src, size_t num); - - /** - @brief updates parameters of a memcpy task to a copy task - - The method is similar to tf::cudaFlow::copy but operates on a task - of type tf::cudaTaskType::MEMCPY. - The source/destination memory may have different address values but - must be allocated from the same contexts as the original - source/destination memory. - */ - template , void>* = nullptr - > - void copy(cudaTask task, T* tgt, const T* src, size_t num); - - // ------------------------------------------------------------------------ - // run method - // ------------------------------------------------------------------------ - /** - @brief offloads the %cudaFlow onto a GPU asynchronously via a stream - - @param stream stream for performing this operation - - Offloads the present %cudaFlow onto a GPU asynchronously via - the given stream. - - An offloaded %cudaFlow forces the underlying graph to be instantiated. - After the instantiation, you should not modify the graph topology - but update node parameters. - */ - void run(cudaStream_t stream); - - /** - @brief acquires a reference to the underlying CUDA graph - */ - cudaGraph_t native_graph(); - - /** - @brief acquires a reference to the underlying CUDA graph executable - */ - cudaGraphExec_t native_executable(); - - // ------------------------------------------------------------------------ - // generic algorithms - // ------------------------------------------------------------------------ - - /** - @brief runs a callable with only a single kernel thread - - @tparam C callable type - - @param c callable to run by a single kernel thread - - @return a tf::cudaTask handle - */ - template - cudaTask single_task(C c); - - /** - @brief updates a single-threaded kernel task - - This method is similar to cudaFlow::single_task but operates - on an existing task. - */ - template - void single_task(cudaTask task, C c); - - /** - @brief applies a callable to each dereferenced element of the data array - - @tparam I iterator type - @tparam C callable type - - @param first iterator to the beginning (inclusive) - @param last iterator to the end (exclusive) - @param callable a callable object to apply to the dereferenced iterator - - @return a tf::cudaTask handle - - This method is equivalent to the parallel execution of the following loop on a GPU: - - @code{.cpp} - for(auto itr = first; itr != last; itr++) { - callable(*itr); - } - @endcode - */ - template - cudaTask for_each(I first, I last, C callable); - - /** - @brief updates parameters of a kernel task created from - tf::cudaFlow::for_each - - The type of the iterators and the callable must be the same as - the task created from tf::cudaFlow::for_each. - */ - template - void for_each(cudaTask task, I first, I last, C callable); - - /** - @brief applies a callable to each index in the range with the step size - - @tparam I index type - @tparam C callable type - - @param first beginning index - @param last last index - @param step step size - @param callable the callable to apply to each element in the data array - - @return a tf::cudaTask handle - - This method is equivalent to the parallel execution of the following loop on a GPU: - - @code{.cpp} - // step is positive [first, last) - for(auto i=first; ilast; i+=step) { - callable(i); - } - @endcode - */ - template - cudaTask for_each_index(I first, I last, I step, C callable); - - /** - @brief updates parameters of a kernel task created from - tf::cudaFlow::for_each_index - - The type of the iterators and the callable must be the same as - the task created from tf::cudaFlow::for_each_index. - */ - template - void for_each_index( - cudaTask task, I first, I last, I step, C callable - ); - - /** - @brief applies a callable to a source range and stores the result in a target range - - @tparam I input iterator type - @tparam O output iterator type - @tparam C unary operator type - - @param first iterator to the beginning of the input range - @param last iterator to the end of the input range - @param output iterator to the beginning of the output range - @param op the operator to apply to transform each element in the range - - @return a tf::cudaTask handle - - This method is equivalent to the parallel execution of the following loop on a GPU: - - @code{.cpp} - while (first != last) { - *output++ = callable(*first++); - } - @endcode - */ - template - cudaTask transform(I first, I last, O output, C op); - - /** - @brief updates parameters of a kernel task created from - tf::cudaFlow::transform - - The type of the iterators and the callable must be the same as - the task created from tf::cudaFlow::for_each. - */ - template - void transform(cudaTask task, I first, I last, O output, C c); - - /** - @brief creates a task to perform parallel transforms over two ranges of items - - @tparam I1 first input iterator type - @tparam I2 second input iterator type - @tparam O output iterator type - @tparam C unary operator type - - @param first1 iterator to the beginning of the input range - @param last1 iterator to the end of the input range - @param first2 iterato - @param output iterator to the beginning of the output range - @param op binary operator to apply to transform each pair of items in the - two input ranges - - @return cudaTask handle - - This method is equivalent to the parallel execution of the following loop on a GPU: - - @code{.cpp} - while (first1 != last1) { - *output++ = op(*first1++, *first2++); - } - @endcode - */ - template - cudaTask transform(I1 first1, I1 last1, I2 first2, O output, C op); - - /** - @brief updates parameters of a kernel task created from - tf::cudaFlow::transform +using cudaGraph = cudaGraphBase; - The type of the iterators and the callable must be the same as - the task created from tf::cudaFlow::for_each. - */ - template - void transform( - cudaTask task, I1 first1, I1 last1, I2 first2, O output, C c - ); - - // ------------------------------------------------------------------------ - // subflow - // ------------------------------------------------------------------------ - - /** - @brief constructs a subflow graph through tf::cudaFlowCapturer - - @tparam C callable type constructible from - @c std::function - - @param callable the callable to construct a capture flow - - @return a tf::cudaTask handle - - A captured subflow forms a sub-graph to the %cudaFlow and can be used to - capture custom (or third-party) kernels that cannot be directly constructed - from the %cudaFlow. - - Example usage: - - @code{.cpp} - taskflow.emplace([&](tf::cudaFlow& cf){ - - tf::cudaTask my_kernel = cf.kernel(my_arguments); - - // create a flow capturer to capture custom kernels - tf::cudaTask my_subflow = cf.capture([&](tf::cudaFlowCapturer& capturer){ - capturer.on([&](cudaStream_t stream){ - invoke_custom_kernel_with_stream(stream, custom_arguments); - }); - }); - - my_kernel.precede(my_subflow); - }); - @endcode - */ - template - cudaTask capture(C&& callable); - - /** - @brief updates the captured child graph - - The method is similar to tf::cudaFlow::capture but operates on a task - of type tf::cudaTaskType::SUBFLOW. - The new captured graph must be topologically identical to the original - captured graph. - */ - template - void capture(cudaTask task, C callable); - - private: - - cudaFlowGraph _cfg; - cudaGraphExec _exe {nullptr}; -}; - -// Construct a standalone cudaFlow -inline cudaFlow::cudaFlow() { - _cfg._native_handle.create(); -} - -// Procedure: clear -inline void cudaFlow::clear() { - _exe.clear(); - _cfg.clear(); - _cfg._native_handle.create(); -} - -// Function: empty -inline bool cudaFlow::empty() const { - return _cfg._nodes.empty(); -} - -// Function: num_tasks -inline size_t cudaFlow::num_tasks() const { - return _cfg._nodes.size(); -} - -// Procedure: dump -inline void cudaFlow::dump(std::ostream& os) const { - _cfg.dump(os, nullptr, ""); -} - -// Procedure: dump -inline void cudaFlow::dump_native_graph(std::ostream& os) const { - cuda_dump_graph(os, _cfg._native_handle); -} - -// ---------------------------------------------------------------------------- -// Graph building methods -// ---------------------------------------------------------------------------- - -// Function: noop -inline cudaTask cudaFlow::noop() { - - auto node = _cfg.emplace_back( - _cfg, std::in_place_type_t{} - ); - - TF_CHECK_CUDA( - cudaGraphAddEmptyNode( - &node->_native_handle, _cfg._native_handle, nullptr, 0 - ), - "failed to create a no-operation (empty) node" - ); - - return cudaTask(node); -} - -// Function: host -template -cudaTask cudaFlow::host(C&& c) { - - auto node = _cfg.emplace_back( - _cfg, std::in_place_type_t{}, std::forward(c) - ); - - auto h = std::get_if(&node->_handle); - - cudaHostNodeParams p; - p.fn = cudaFlowNode::Host::callback; - p.userData = h; - - TF_CHECK_CUDA( - cudaGraphAddHostNode( - &node->_native_handle, _cfg._native_handle, nullptr, 0, &p - ), - "failed to create a host node" - ); - - return cudaTask(node); -} - -// Function: kernel -template -cudaTask cudaFlow::kernel( - dim3 g, dim3 b, size_t s, F f, ArgsT... args -) { - - auto node = _cfg.emplace_back( - _cfg, std::in_place_type_t{}, (void*)f - ); - - cudaKernelNodeParams p; - void* arguments[sizeof...(ArgsT)] = { (void*)(&args)... }; - p.func = (void*)f; - p.gridDim = g; - p.blockDim = b; - p.sharedMemBytes = s; - p.kernelParams = arguments; - p.extra = nullptr; - - TF_CHECK_CUDA( - cudaGraphAddKernelNode( - &node->_native_handle, _cfg._native_handle, nullptr, 0, &p - ), - "failed to create a kernel task" - ); - - return cudaTask(node); -} - -// Function: zero -template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* -> -cudaTask cudaFlow::zero(T* dst, size_t count) { - - auto node = _cfg.emplace_back( - _cfg, std::in_place_type_t{} - ); - - auto p = cuda_get_zero_parms(dst, count); - - TF_CHECK_CUDA( - cudaGraphAddMemsetNode( - &node->_native_handle, _cfg._native_handle, nullptr, 0, &p - ), - "failed to create a memset (zero) task" - ); - - return cudaTask(node); -} - -// Function: fill -template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* -> -cudaTask cudaFlow::fill(T* dst, T value, size_t count) { - - auto node = _cfg.emplace_back( - _cfg, std::in_place_type_t{} - ); - - auto p = cuda_get_fill_parms(dst, value, count); - - TF_CHECK_CUDA( - cudaGraphAddMemsetNode( - &node->_native_handle, _cfg._native_handle, nullptr, 0, &p - ), - "failed to create a memset (fill) task" - ); - - return cudaTask(node); -} - -// Function: copy -template < - typename T, - std::enable_if_t, void>* -> -cudaTask cudaFlow::copy(T* tgt, const T* src, size_t num) { - - auto node = _cfg.emplace_back( - _cfg, std::in_place_type_t{} - ); - - auto p = cuda_get_copy_parms(tgt, src, num); - - TF_CHECK_CUDA( - cudaGraphAddMemcpyNode( - &node->_native_handle, _cfg._native_handle, nullptr, 0, &p - ), - "failed to create a memcpy (copy) task" - ); - - return cudaTask(node); -} - -// Function: memset -inline cudaTask cudaFlow::memset(void* dst, int ch, size_t count) { - - auto node = _cfg.emplace_back( - _cfg, std::in_place_type_t{} - ); - - auto p = cuda_get_memset_parms(dst, ch, count); - - TF_CHECK_CUDA( - cudaGraphAddMemsetNode( - &node->_native_handle, _cfg._native_handle, nullptr, 0, &p - ), - "failed to create a memset task" - ); - - return cudaTask(node); -} - -// Function: memcpy -inline cudaTask cudaFlow::memcpy(void* tgt, const void* src, size_t bytes) { - - auto node = _cfg.emplace_back( - _cfg, std::in_place_type_t{} - ); - - auto p = cuda_get_memcpy_parms(tgt, src, bytes); - - TF_CHECK_CUDA( - cudaGraphAddMemcpyNode( - &node->_native_handle, _cfg._native_handle, nullptr, 0, &p - ), - "failed to create a memcpy task" - ); - - return cudaTask(node); -} - -// ------------------------------------------------------------------------ -// update methods -// ------------------------------------------------------------------------ - -// Function: host -template -void cudaFlow::host(cudaTask task, C&& c) { - - if(task.type() != cudaTaskType::HOST) { - TF_THROW(task, " is not a host task"); - } - - auto h = std::get_if(&task._node->_handle); - - h->func = std::forward(c); -} - -// Function: update kernel parameters -template -void cudaFlow::kernel( - cudaTask task, dim3 g, dim3 b, size_t s, F f, ArgsT... args -) { - - if(task.type() != cudaTaskType::KERNEL) { - TF_THROW(task, " is not a kernel task"); - } - - cudaKernelNodeParams p; - - void* arguments[sizeof...(ArgsT)] = { (void*)(&args)... }; - p.func = (void*)f; - p.gridDim = g; - p.blockDim = b; - p.sharedMemBytes = s; - p.kernelParams = arguments; - p.extra = nullptr; - - TF_CHECK_CUDA( - cudaGraphExecKernelNodeSetParams(_exe, task._node->_native_handle, &p), - "failed to update kernel parameters on ", task - ); -} - -// Function: update copy parameters -template , void>*> -void cudaFlow::copy(cudaTask task, T* tgt, const T* src, size_t num) { - - if(task.type() != cudaTaskType::MEMCPY) { - TF_THROW(task, " is not a memcpy task"); - } - - auto p = cuda_get_copy_parms(tgt, src, num); - - TF_CHECK_CUDA( - cudaGraphExecMemcpyNodeSetParams(_exe, task._node->_native_handle, &p), - "failed to update memcpy parameters on ", task - ); -} - -// Function: update memcpy parameters -inline void cudaFlow::memcpy( - cudaTask task, void* tgt, const void* src, size_t bytes -) { - - if(task.type() != cudaTaskType::MEMCPY) { - TF_THROW(task, " is not a memcpy task"); - } - - auto p = cuda_get_memcpy_parms(tgt, src, bytes); - - TF_CHECK_CUDA( - cudaGraphExecMemcpyNodeSetParams(_exe, task._node->_native_handle, &p), - "failed to update memcpy parameters on ", task - ); -} - -// Procedure: memset -inline void cudaFlow::memset(cudaTask task, void* dst, int ch, size_t count) { - - if(task.type() != cudaTaskType::MEMSET) { - TF_THROW(task, " is not a memset task"); - } - - auto p = cuda_get_memset_parms(dst, ch, count); - - TF_CHECK_CUDA( - cudaGraphExecMemsetNodeSetParams(_exe, task._node->_native_handle, &p), - "failed to update memset parameters on ", task - ); -} - -// Procedure: fill -template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* -> -void cudaFlow::fill(cudaTask task, T* dst, T value, size_t count) { - - if(task.type() != cudaTaskType::MEMSET) { - TF_THROW(task, " is not a memset task"); - } - - auto p = cuda_get_fill_parms(dst, value, count); - - TF_CHECK_CUDA( - cudaGraphExecMemsetNodeSetParams(_exe, task._node->_native_handle, &p), - "failed to update memset parameters on ", task - ); -} - -// Procedure: zero -template && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* -> -void cudaFlow::zero(cudaTask task, T* dst, size_t count) { - - if(task.type() != cudaTaskType::MEMSET) { - TF_THROW(task, " is not a memset task"); - } - - auto p = cuda_get_zero_parms(dst, count); - - TF_CHECK_CUDA( - cudaGraphExecMemsetNodeSetParams(_exe, task._node->_native_handle, &p), - "failed to update memset parameters on ", task - ); -} - -// Function: capture -template -void cudaFlow::capture(cudaTask task, C c) { - - if(task.type() != cudaTaskType::SUBFLOW) { - TF_THROW(task, " is not a subflow task"); - } - - // insert a subflow node - // construct a captured flow from the callable - auto node_handle = std::get_if(&task._node->_handle); - //node_handle->graph.clear(); - - cudaFlowCapturer capturer; - c(capturer); - - // obtain the optimized captured graph - capturer._cfg._native_handle.reset(capturer.capture()); - node_handle->cfg = std::move(capturer._cfg); - - TF_CHECK_CUDA( - cudaGraphExecChildGraphNodeSetParams( - _exe, - task._node->_native_handle, - node_handle->cfg._native_handle - ), - "failed to update a captured child graph" - ); -} - -// ---------------------------------------------------------------------------- -// captured flow -// ---------------------------------------------------------------------------- - -// Function: capture -template -cudaTask cudaFlow::capture(C&& c) { - - // insert a subflow node - auto node = _cfg.emplace_back( - _cfg, std::in_place_type_t{} - ); - - // construct a captured flow from the callable - auto node_handle = std::get_if(&node->_handle); - - // perform capturing - cudaFlowCapturer capturer; - c(capturer); - - // obtain the optimized captured graph - capturer._cfg._native_handle.reset(capturer.capture()); - - // move capturer's cudaFlow graph into node - node_handle->cfg = std::move(capturer._cfg); - - TF_CHECK_CUDA( - cudaGraphAddChildGraphNode( - &node->_native_handle, - _cfg._native_handle, - nullptr, - 0, - node_handle->cfg._native_handle - ), - "failed to add a cudaFlow capturer task" - ); - - return cudaTask(node); -} - -// ---------------------------------------------------------------------------- -// run method -// ---------------------------------------------------------------------------- - -// Procedure: run -inline void cudaFlow::run(cudaStream_t stream) { - if(!_exe) { - _exe.instantiate(_cfg._native_handle); - } - _exe.launch(stream); - _cfg._state = cudaFlowGraph::OFFLOADED; -} - -// Function: native_cfg -inline cudaGraph_t cudaFlow::native_graph() { - return _cfg._native_handle; -} - -// Function: native_executable -inline cudaGraphExec_t cudaFlow::native_executable() { - return _exe; -} +/** +@brief default smart pointer type to manage a `cudaGraphExec_t` object with unique ownership +*/ +using cudaGraphExec = cudaGraphExecBase; } // end of namespace tf ----------------------------------------------------- diff --git a/bundled/taskflow-3.10.0/taskflow/utility/lazy_string.hpp b/bundled/taskflow-3.10.0/taskflow/utility/lazy_string.hpp new file mode 100644 index 0000000000..dce2340f68 --- /dev/null +++ b/bundled/taskflow-3.10.0/taskflow/utility/lazy_string.hpp @@ -0,0 +1,73 @@ +#pragma once + +#include +#include + +namespace tf { + +class LazyString { + + public: + + LazyString() = default; + + LazyString(const std::string& str) : + _str(str.empty() ? nullptr : std::make_unique(str)) { + } + + LazyString(std::string&& str) : + _str(str.empty() ? nullptr : std::make_unique(std::move(str))) { + } + + LazyString(const char* str) : + _str((!str || str[0] == '\0') ? nullptr : std::make_unique(str)) { + } + + // Modify the operator to return a const reference + operator const std::string& () const noexcept { + static const std::string empty_string; + return _str ? *_str : empty_string; + } + + LazyString& operator = (const std::string& str) { + if(_str == nullptr) { + _str = std::make_unique(str); + } + else { + *_str = str; + } + return *this; + } + + LazyString& operator = (std::string&& str) { + if(_str == nullptr) { + _str = std::make_unique(std::move(str)); + } + else { + *_str = std::move(str); + } + return *this; + } + + bool empty() const noexcept { + return !_str || _str->empty(); + } + + size_t size() const noexcept { + return _str ? _str->size() : 0; + } + + friend std::ostream& operator<<(std::ostream& os, const LazyString& ls) { + os << (ls._str ? *ls._str : ""); + return os; + } + + private: + + std::unique_ptr _str; + +}; + + + +} // end of namespace tf ------------------------------------------------------------------------- diff --git a/bundled/taskflow-3.10.0/taskflow/utility/math.hpp b/bundled/taskflow-3.10.0/taskflow/utility/math.hpp index 15685a42e5..2b8ea7dc7a 100644 --- a/bundled/taskflow-3.10.0/taskflow/utility/math.hpp +++ b/bundled/taskflow-3.10.0/taskflow/utility/math.hpp @@ -1,6 +1,7 @@ #pragma once #include +#include namespace tf { @@ -61,24 +62,54 @@ constexpr bool is_pow2(const T& x) { } /** - * @brief Computes the floor of log2 of the given positive integer. + * @brief computes the floor of the base-2 logarithm of a number using count-leading-zeros (CTL). * - * This function calculates the largest integer `log` such that `2^log <= n`. - * - * @tparam T The type of the input. Must be an integral type. - * @param n The positive integer to compute log2 for. Assumes `n > 0`. - * @return The floor of log2 of `n`. - * - * @attention This function is constexpr and can be evaluated at compile time. + * This function efficiently calculates the floor of `log2(n)` for both 32-bit and 64-bit integers. * + * @tparam T integer type (uint32_t or uint64_t). + * @param n input number. + * @return floor of `log2(n)` */ -template -constexpr int log2(T n) { - int log = 0; +template +constexpr size_t floor_log2(T n) { + + static_assert(std::is_unsigned_v, "log2 only supports unsigned integer types"); + +#if defined(_MSC_VER) + unsigned long index; + if constexpr (sizeof(T) == 8) { + _BitScanReverse64(&index, n); + } else { + _BitScanReverse(&index, static_cast(n)); + } + return static_cast(index); +#elif defined(__GNUC__) || defined(__clang__) + if constexpr (sizeof(T) == 8) { + return 63 - __builtin_clzll(n); + } else { + return 31 - __builtin_clz(n); + } +#else + // Portable fallback: Uses bit shifts to count leading zeros manually + size_t log = 0; while (n >>= 1) { ++log; } return log; +#endif +} + +/** +@brief returns the floor of `log2(N)` at compile time +*/ +template +constexpr size_t static_floor_log2() { + return (N < 2) ? 0 : 1 + static_floor_log2(); + //auto log = 0; + //while (N >>= 1) { + // ++log; + //} + //return log; } /** @@ -256,6 +287,91 @@ inline T seed() noexcept { return std::chrono::system_clock::now().time_since_epoch().count(); } +/** + * @brief counts the number of trailing zeros in an integer. + * + * This function provides a portable implementation for counting the number of + * trailing zeros across different platforms and integer sizes (32-bit and 64-bit). + * + * @tparam T integer type (32-bit or 64-bit). + * @param x non-zero integer to count trailing zeros from + * @return the number of trailing zeros in @c x + * + * @attention + * The behavior is undefined when @c x is 0. + */ +template >> +auto ctz(T x) { + + #if defined(_MSC_VER) + unsigned long index; + if constexpr (sizeof(T) == 8) { + _BitScanForward64(&index, x); + } else { + _BitScanForward(&index, (unsigned long)x); + } + return index; + #elif defined(__GNUC__) || defined(__clang__) + if constexpr (sizeof(T) == 8) { + return __builtin_ctzll(x); + } else { + return __builtin_ctz(x); + } + #else + size_t r = 0; + while ((x & 1) == 0) { + x >>= 1; + r++; + } + return r; + #endif +} + +// ------------------------------------------------------------------------------------------------ +// coprime +// ------------------------------------------------------------------------------------------------ + +/** + * @brief computes a coprime of a given number + * + * This function finds the largest number less than N that is coprime (i.e., has a greatest common divisor of 1) with @c N. + * If @c N is less than 3, it returns 1 as a default coprime. + * + * @param N input number for which a coprime is to be found. + * @return the largest number < @c N that is coprime to N + */ +constexpr size_t coprime(size_t N) { + if(N < 3) { + return 1; + } + for (size_t x = N; --x > 0;) { + if (std::gcd(x, N) == 1) { + return x; + } + } + return 1; +} + +/** + * @brief generates a compile-time array of coprimes for numbers from 0 to N-1 + * + * This function constructs a constexpr array where each element at index `i` contains a coprime of `i` + * (the largest number less than `i` that is coprime to it). + * + * @tparam N the size of the array to generate (should be greater than 0). + * @return a constexpr array of size @c N where each index holds a coprime of its value. + */ +template +constexpr std::array make_coprime_lut() { + static_assert(N>0, "N must be greater than 0"); + std::array coprimes{}; + for (size_t n = 0; n < N; ++n) { + coprimes[n] = coprime(n); + } + return coprimes; +} + + //class XorShift64 { // // public: diff --git a/bundled/taskflow-3.10.0/taskflow/utility/mpmc.hpp b/bundled/taskflow-3.10.0/taskflow/utility/mpmc.hpp index 1c74cef1b2..f9e53ca6d1 100644 --- a/bundled/taskflow-3.10.0/taskflow/utility/mpmc.hpp +++ b/bundled/taskflow-3.10.0/taskflow/utility/mpmc.hpp @@ -4,6 +4,8 @@ #include #include +#include "os.hpp" + namespace tf { /** @@ -149,4 +151,358 @@ private: alignas(2*TF_CACHELINE_SIZE) std::atomic _dequeue_pos; }; +// ------------------------------------------------------------------------------------------------ +// specialization for pointer type +// ------------------------------------------------------------------------------------------------ + +template +class MPMC { + + constexpr static uint64_t BufferSize = 1ull << LogSize; + constexpr static uint64_t BufferMask = (BufferSize - 1); + + static_assert((BufferSize >= 2) && ((BufferSize & (BufferSize - 1)) == 0)); + +public: + + /** + * Constructs a bounded multi-producer, multi-consumer queue + * + * Note: Due to the algorithm used, buffer_size must be a power + * of two and must be greater than or equal to two. + * + * @param buffer_size Number of spaces available in the queue. + */ + explicit MPMC() { + for (size_t i = 0; i < _buffer.size(); i++) { + _buffer[i].sequence.store(i, std::memory_order_relaxed); + } + _enqueue_pos.store(0, std::memory_order_relaxed); + _dequeue_pos.store(0, std::memory_order_relaxed); + } + + + /** + * Enqueues an item into the queue + * + * @param data Argument to place into the array + * @return false if the queue was full (and enqueing failed), + * true otherwise + */ + bool try_enqueue(T* data) { + Cell *cell; + auto pos = _enqueue_pos.load(std::memory_order_relaxed); + for (; ;) { + cell = &_buffer[pos & BufferMask]; + auto seq = cell->sequence.load(std::memory_order_acquire); + if (seq == pos) { + if (_enqueue_pos.compare_exchange_weak(pos, pos + 1, + std::memory_order_relaxed)) { + break; + } + } else if (seq < pos) { + return false; + } else { + pos = _enqueue_pos.load(std::memory_order_relaxed); + } + } + + cell->data = data; + cell->sequence.store(pos + 1, std::memory_order_release); + + return true; + } + + void enqueue(T* data) { + + Cell *cell; + auto pos = _enqueue_pos.load(std::memory_order_relaxed); + + for (; ;) { + cell = &_buffer[pos & BufferMask]; + auto seq = cell->sequence.load(std::memory_order_acquire); + if (seq == pos) { + if (_enqueue_pos.compare_exchange_weak(pos, pos + 1, + std::memory_order_relaxed)) { + break; + } + } + else { + pos = _enqueue_pos.load(std::memory_order_relaxed); + } + } + + cell->data = data; + cell->sequence.store(pos + 1, std::memory_order_release); + } + + /** + * Dequeues an item from the queue + * + * @param[out] data Reference to place item into + * @return false if the queue was empty (and dequeuing failed), + * true if successful + */ + T* try_dequeue() { + Cell *cell; + auto pos = _dequeue_pos.load(std::memory_order_relaxed); + for (; ;) { + cell = &_buffer[pos & BufferMask]; + auto seq = cell->sequence.load(std::memory_order_acquire); + if (seq == pos + 1) { + if (_dequeue_pos.compare_exchange_weak(pos, pos + 1, + std::memory_order_relaxed)) { + break; + } + } else if (seq < (pos + 1)) { + return nullptr; + } else { + pos = _dequeue_pos.load(std::memory_order_relaxed); + } + } + + auto data = cell->data; + cell->sequence.store(pos + BufferMask + 1, std::memory_order_release); + + return data; + } + + bool empty() const { + auto beg = _dequeue_pos.load(std::memory_order_relaxed); + auto end = _enqueue_pos.load(std::memory_order_relaxed); + return beg >= end; + } + + size_t capacity() const { + return BufferSize; + } + +private: + + struct Cell { + T* data; + std::atomic sequence; + }; + + //static const size_t cacheline_size = 64; + + alignas(2*TF_CACHELINE_SIZE) std::array _buffer; + alignas(2*TF_CACHELINE_SIZE) std::atomic _enqueue_pos; + alignas(2*TF_CACHELINE_SIZE) std::atomic _dequeue_pos; +}; + +/** + * RunQueue is a fixed-size, partially non-blocking deque or Work items. + * Operations on front of the queue must be done by a single thread (owner), + * operations on back of the queue can be done by multiple threads concurrently. + * + * Algorithm outline: + * All remote threads operating on the queue back are serialized by a mutex. + * This ensures that at most two threads access state: owner and one remote + * thread (Size aside). The algorithm ensures that the occupied region of the + * underlying array is logically continuous (can wraparound, but no stray + * occupied elements). Owner operates on one end of this region, remote thread + * operates on the other end. Synchronization between these threads + * (potential consumption of the last element and take up of the last empty + * element) happens by means of state variable in each element. States are: + * empty, busy (in process of insertion of removal) and ready. Threads claim + * elements (empty->busy and ready->busy transitions) by means of a CAS + * operation. The finishing transition (busy->empty and busy->ready) are done + * with plain store as the element is exclusively owned by the current thread. + * + * Note: we could permit only pointers as elements, then we would not need + * separate state variable as null/non-null pointer value would serve as state, + * but that would require malloc/free per operation for large, complex values + * (and this is designed to store std::function<()>). +template +class RunQueue { + public: + RunQueue() : front_(0), back_(0) { + // require power-of-two for fast masking + eigen_plain_assert((kSize & (kSize - 1)) == 0); + eigen_plain_assert(kSize > 2); // why would you do this? + eigen_plain_assert(kSize <= (64 << 10)); // leave enough space for counter + for (unsigned i = 0; i < kSize; i++) array_[i].state.store(kEmpty, std::memory_order_relaxed); + } + + ~RunQueue() { eigen_plain_assert(Size() == 0); } + + // PushFront inserts w at the beginning of the queue. + // If queue is full returns w, otherwise returns default-constructed Work. + Work PushFront(Work w) { + unsigned front = front_.load(std::memory_order_relaxed); + Elem* e = &array_[front & kMask]; + uint8_t s = e->state.load(std::memory_order_relaxed); + if (s != kEmpty || !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) return w; + front_.store(front + 1 + (kSize << 1), std::memory_order_relaxed); + e->w = std::move(w); + e->state.store(kReady, std::memory_order_release); + return Work(); + } + + // PopFront removes and returns the first element in the queue. + // If the queue was empty returns default-constructed Work. + Work PopFront() { + unsigned front = front_.load(std::memory_order_relaxed); + Elem* e = &array_[(front - 1) & kMask]; + uint8_t s = e->state.load(std::memory_order_relaxed); + if (s != kReady || !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) return Work(); + Work w = std::move(e->w); + e->state.store(kEmpty, std::memory_order_release); + front = ((front - 1) & kMask2) | (front & ~kMask2); + front_.store(front, std::memory_order_relaxed); + return w; + } + + // PushBack adds w at the end of the queue. + // If queue is full returns w, otherwise returns default-constructed Work. + Work PushBack(Work w) { + EIGEN_MUTEX_LOCK lock(mutex_); + unsigned back = back_.load(std::memory_order_relaxed); + Elem* e = &array_[(back - 1) & kMask]; + uint8_t s = e->state.load(std::memory_order_relaxed); + if (s != kEmpty || !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) return w; + back = ((back - 1) & kMask2) | (back & ~kMask2); + back_.store(back, std::memory_order_relaxed); + e->w = std::move(w); + e->state.store(kReady, std::memory_order_release); + return Work(); + } + + // PopBack removes and returns the last elements in the queue. + Work PopBack() { + if (Empty()) return Work(); + EIGEN_MUTEX_LOCK lock(mutex_); + unsigned back = back_.load(std::memory_order_relaxed); + Elem* e = &array_[back & kMask]; + uint8_t s = e->state.load(std::memory_order_relaxed); + if (s != kReady || !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) return Work(); + Work w = std::move(e->w); + e->state.store(kEmpty, std::memory_order_release); + back_.store(back + 1 + (kSize << 1), std::memory_order_relaxed); + return w; + } + + // PopBackHalf removes and returns half last elements in the queue. + // Returns number of elements removed. + unsigned PopBackHalf(std::vector* result) { + if (Empty()) return 0; + EIGEN_MUTEX_LOCK lock(mutex_); + unsigned back = back_.load(std::memory_order_relaxed); + unsigned size = Size(); + unsigned mid = back; + if (size > 1) mid = back + (size - 1) / 2; + unsigned n = 0; + unsigned start = 0; + for (; static_cast(mid - back) >= 0; mid--) { + Elem* e = &array_[mid & kMask]; + uint8_t s = e->state.load(std::memory_order_relaxed); + if (n == 0) { + if (s != kReady || !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) continue; + start = mid; + } else { + // Note: no need to store temporal kBusy, we exclusively own these + // elements. + eigen_plain_assert(s == kReady); + } + result->push_back(std::move(e->w)); + e->state.store(kEmpty, std::memory_order_release); + n++; + } + if (n != 0) back_.store(start + 1 + (kSize << 1), std::memory_order_relaxed); + return n; + } + + // Size returns current queue size. + // Can be called by any thread at any time. + unsigned Size() const { return SizeOrNotEmpty(); } + + // Empty tests whether container is empty. + // Can be called by any thread at any time. + bool Empty() const { return SizeOrNotEmpty() == 0; } + + // Delete all the elements from the queue. + void Flush() { + while (!Empty()) { + PopFront(); + } + } + + private: + static const unsigned kMask = kSize - 1; + static const unsigned kMask2 = (kSize << 1) - 1; + + enum State { + kEmpty, + kBusy, + kReady, + }; + + struct Elem { + std::atomic state; + Work w; + }; + + // Low log(kSize) + 1 bits in front_ and back_ contain rolling index of + // front/back, respectively. The remaining bits contain modification counters + // that are incremented on Push operations. This allows us to (1) distinguish + // between empty and full conditions (if we would use log(kSize) bits for + // position, these conditions would be indistinguishable); (2) obtain + // consistent snapshot of front_/back_ for Size operation using the + // modification counters. + EIGEN_ALIGN_TO_AVOID_FALSE_SHARING std::atomic front_; + EIGEN_ALIGN_TO_AVOID_FALSE_SHARING std::atomic back_; + EIGEN_MUTEX mutex_; // guards `PushBack` and `PopBack` (accesses `back_`) + + EIGEN_ALIGN_TO_AVOID_FALSE_SHARING Elem array_[kSize]; + + // SizeOrNotEmpty returns current queue size; if NeedSizeEstimate is false, + // only whether the size is 0 is guaranteed to be correct. + // Can be called by any thread at any time. + template + unsigned SizeOrNotEmpty() const { + // Emptiness plays critical role in thread pool blocking. So we go to great + // effort to not produce false positives (claim non-empty queue as empty). + unsigned front = front_.load(std::memory_order_acquire); + for (;;) { + // Capture a consistent snapshot of front/tail. + unsigned back = back_.load(std::memory_order_acquire); + unsigned front1 = front_.load(std::memory_order_relaxed); + if (front != front1) { + front = front1; + std::atomic_thread_fence(std::memory_order_acquire); + continue; + } + if (NeedSizeEstimate) { + return CalculateSize(front, back); + } else { + // This value will be 0 if the queue is empty, and undefined otherwise. + unsigned maybe_zero = ((front ^ back) & kMask2); + // Queue size estimate must agree with maybe zero check on the queue + // empty/non-empty state. + eigen_assert((CalculateSize(front, back) == 0) == (maybe_zero == 0)); + return maybe_zero; + } + } + } + + EIGEN_ALWAYS_INLINE unsigned CalculateSize(unsigned front, unsigned back) const { + int size = (front & kMask2) - (back & kMask2); + // Fix overflow. + if (EIGEN_PREDICT_FALSE(size < 0)) size += 2 * kSize; + // Order of modification in push/pop is crafted to make the queue look + // larger than it is during concurrent modifications. E.g. push can + // increment size before the corresponding pop has decremented it. + // So the computed size can be up to kSize + 1, fix it. + if (EIGEN_PREDICT_FALSE(size > static_cast(kSize))) size = kSize; + return static_cast(size); + } + + RunQueue(const RunQueue&) = delete; + void operator=(const RunQueue&) = delete; +}; +*/ + + } // end of namespace tf ----------------------------------------------------- + diff --git a/bundled/taskflow-3.10.0/taskflow/utility/os.hpp b/bundled/taskflow-3.10.0/taskflow/utility/os.hpp index 9a10f6b994..99826179f2 100644 --- a/bundled/taskflow-3.10.0/taskflow/utility/os.hpp +++ b/bundled/taskflow-3.10.0/taskflow/utility/os.hpp @@ -3,6 +3,7 @@ #include #include #include +#include #define TF_OS_LINUX 0 #define TF_OS_DRAGONFLY 0 @@ -247,6 +248,13 @@ inline void pause() { #endif } +/** +@brief pause CPU for a specified number of iterations +*/ +inline void pause(size_t count) { + while(count-- > 0) pause(); +} + /** * @brief spins until the given predicate becomes true * diff --git a/bundled/taskflow-3.10.0/taskflow/utility/small_vector.hpp b/bundled/taskflow-3.10.0/taskflow/utility/small_vector.hpp index 0ef8f9c960..1fe107a031 100644 --- a/bundled/taskflow-3.10.0/taskflow/utility/small_vector.hpp +++ b/bundled/taskflow-3.10.0/taskflow/utility/small_vector.hpp @@ -114,9 +114,15 @@ class SmallVectorTemplateCommon : public SmallVectorBase { private: template friend struct SmallVectorStorage; + //template + //struct AlignedUnionType { + // alignas(X) std::byte buff[std::max(sizeof(std::byte), sizeof(X))]; + //}; + template struct AlignedUnionType { - alignas(X) std::byte buff[std::max(sizeof(std::byte), sizeof(X))]; + static constexpr std::size_t max_size = (sizeof(std::byte) > sizeof(X)) ? sizeof(std::byte) : sizeof(X); + alignas(X) std::byte buff[max_size]; }; // Allocate raw space for N elements of type T. If T has a ctor or dtor, we