See a quick [presentation](https://taskflow.github.io/) and
visit the [documentation][documentation] to learn more about Taskflow.
-Technical details can be referred to our [IEEE TPDS paper][TPDS21].
+Technical details can be referred to our [IEEE TPDS paper][TPDS22].
# Start Your First Taskflow Program
## Offload a Task to a GPU
-Taskflow supports GPU tasking for you to accelerate a wide range of scientific computing applications by harnessing the power of CPU-GPU collaborative computing using CUDA.
+Taskflow supports GPU tasking for you to accelerate a wide range of scientific computing applications by harnessing the power of CPU-GPU collaborative computing using Nvidia CUDA Graph.
```cpp
__global__ void saxpy(size_t N, float alpha, float* dx, float* dy) {
y[i] = a*x[i] + y[i];
}
}
-tf::Task cudaflow = taskflow.emplace([&](tf::cudaFlow& cf) {
-
- // data copy tasks
- tf::cudaTask h2d_x = cf.copy(dx, hx.data(), N).name("h2d_x");
- tf::cudaTask h2d_y = cf.copy(dy, hy.data(), N).name("h2d_y");
- tf::cudaTask d2h_x = cf.copy(hx.data(), dx, N).name("d2h_x");
- tf::cudaTask d2h_y = cf.copy(hy.data(), dy, N).name("d2h_y");
- // kernel task with parameters to launch the saxpy kernel
- tf::cudaTask saxpy = cf.kernel(
- (N+255)/256, 256, 0, saxpy, N, 2.0f, dx, dy
- ).name("saxpy");
-
+// create a CUDA Graph task
+tf::Task cudaflow = taskflow.emplace([&]() {
+ tf::cudaGraph cg;
+ tf::cudaTask h2d_x = cg.copy(dx, hx.data(), N);
+ tf::cudaTask h2d_y = cg.copy(dy, hy.data(), N);
+ tf::cudaTask d2h_x = cg.copy(hx.data(), dx, N);
+ tf::cudaTask d2h_y = cg.copy(hy.data(), dy, N);
+ tf::cudaTask saxpy = cg.kernel((N+255)/256, 256, 0, saxpy, N, 2.0f, dx, dy);
saxpy.succeed(h2d_x, h2d_y)
.precede(d2h_x, d2h_y);
-}).name("cudaFlow");
+
+ // instantiate an executable CUDA graph and run it through a stream
+ tf::cudaGraphExec exec(cg);
+ tf::cudaStream stream;
+ stream.run(exec).synchronize();
+}).name("CUDA Graph Task");
```
<p align="center"><img src="doxygen/images/saxpy_1_cudaflow.svg"></p>
such as parallel iterations, parallel reductions, and parallel sort.
```cpp
-// standard parallel CPU algorithms
tf::Task task1 = taskflow.for_each( // assign each element to 100 in parallel
first, last, [] (auto& i) { i = 100; }
);
tf::Task task3 = taskflow.sort( // sort a range of items in parallel
first, last, [] (auto a, auto b) { return a < b; }
);
-
-// standard parallel GPU algorithms
-tf::cudaTask cuda1 = cudaflow.for_each( // assign each element to 100 on GPU
- dfirst, dlast, [] __device__ (auto i) { i = 100; }
-);
-tf::cudaTask cuda2 = cudaflow.reduce( // reduce a range of items on GPU
- dfirst, dlast, init, [] __device__ (auto a, auto b) { return a + b; }
-);
-tf::cudaTask cuda3 = cudaflow.sort( // sort a range of items on GPU
- dfirst, dlast, [] __device__ (auto a, auto b) { return a < b; }
-);
```
Additionally, Taskflow provides composable graph building blocks for you to
| <!-- --> | <!-- --> | <!-- --> | <!-- --> |
|:-------------------------:|:-------------------------:|:-------------------------:|:-------------------------:|
|<img src="doxygen/images/utah-ece-logo.png">|<img src="doxygen/images/nsf.png"> | <img src="doxygen/images/darpa.png"> | <img src="doxygen/images/NumFocus.png">|
-|<img src="doxygen/images/nvidia-logo.png"> | | | |
+|<img src="doxygen/images/nvidia-logo.png"> | <img src="doxygen/images/uw-madison-ece-logo.png"> | | |
# License
namespace tf {
-namespace detail {
-
-// Function: find_if_loop
-template <typename Iterator, typename Predicate>
-bool find_if_loop(
- std::atomic<size_t>& offset,
- Iterator& beg,
- size_t& prev_e,
- size_t curr_b,
- size_t curr_e,
- Predicate predicate
-) {
- // early prune
- if(offset.load(std::memory_order_relaxed) < curr_b) {
- return true;
- }
- std::advance(beg, curr_b - prev_e);
- for(size_t x = curr_b; x<curr_e; x++) {
- if(predicate(*beg++)) {
- atomic_min(offset, x);
- return true;
- }
- }
- prev_e = curr_e;
- return false;
-}
-
-// Function: find_if_not_loop
-template <typename Iterator, typename Predicate>
-bool find_if_not_loop(
- std::atomic<size_t>& offset,
- Iterator& beg,
- size_t& prev_e,
- size_t curr_b,
- size_t curr_e,
- Predicate predicate
-) {
-
- // early prune
- if(offset.load(std::memory_order_relaxed) < curr_b) {
- return true;
- }
- std::advance(beg, curr_b - prev_e);
- for(size_t x = curr_b; x<curr_e; x++) {
- if(!predicate(*beg++)) {
- atomic_min(offset, x);
- return true;
- }
- }
- prev_e = curr_e;
- return false;
-}
-
-} // namespace detail --------------------------------------------------------
-
// Function: make_find_if_task
template <typename B, typename E, typename T, typename UOP, typename P = DefaultPartitioner>
auto make_find_if_task(B first, E last, T& result, UOP predicate, P part = P()) {
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- part([&](){ result = std::find_if(beg, end, predicate); })();
+ part([=, &result]() mutable { result = std::find_if(beg, end, predicate); })();
return;
}
if(N < W) {
W = N;
}
-
- // we leverage smart pointer to let the last task update the result
- std::shared_ptr<std::atomic<size_t>> offset(
- new std::atomic<size_t>(N),
- [=, &result](std::atomic<size_t>* p) {
- result = std::next(beg, p->load(std::memory_order_relaxed));
- delete p;
- }
- );
+
+ auto mutex = std::make_shared<std::mutex>();
+ const auto origin = beg;
+ result = std::next(origin, N);
// static partitioner
if constexpr(part.type() == PartitionerType::STATIC) {
for(size_t w=0, curr_b=0; w<W && curr_b < N;) {
auto chunk_size = part.adjusted_chunk_size(N, W, w);
- auto task = part([=] () mutable {
+ auto task = part([=, &result] () mutable {
part.loop_until(N, W, curr_b, chunk_size,
- [=, &offset, prev_e=size_t{0}](size_t part_b, size_t part_e) mutable {
- return detail::find_if_loop(
- *offset, beg, prev_e, part_b, part_e, predicate
- );
+ [=, &result, prev_e=size_t{0}](size_t part_b, size_t part_e) mutable {
+ std::advance(beg, part_b - prev_e);
+ for(size_t x = part_b; x<part_e; x++) {
+ if(predicate(*beg++)) {
+ std::lock_guard<std::mutex> lock(*mutex);
+ if(size_t offset = std::distance(origin, result); x < offset) {
+ result = std::next(origin, x);
+ }
+ return true;
+ }
+ }
+ prev_e = part_e;
+ return false;
}
);
- // must release the ownership before async is destroyed
- // as the node deletion comes after the join counter reaches zero
- offset.reset();
});
(++w == W || (curr_b += chunk_size) >= N) ? task() : rt.silent_async(task);
}
else {
auto next = std::make_shared<std::atomic<size_t>>(0);
for(size_t w=0; w<W;) {
- auto task = part([=] () mutable {
+ auto task = part([=, &result] () mutable {
part.loop_until(N, W, *next,
- [=, &offset, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable {
- return detail::find_if_loop(
- *offset, beg, prev_e, curr_b, curr_e, predicate
- );
+ [=, &result, prev_e=size_t{0}](size_t part_b, size_t part_e) mutable {
+ std::advance(beg, part_b - prev_e);
+ for(size_t x = part_b; x<part_e; x++) {
+ if(predicate(*beg++)) {
+ std::lock_guard<std::mutex> lock(*mutex);
+ if(size_t offset = std::distance(origin, result); x < offset) {
+ result = std::next(origin, x);
+ }
+ return true;
+ }
+ }
+ prev_e = part_e;
+ return false;
}
);
- // must release the ownership before async is destroyed
- // as the node deletion comes after the join counter reaches zero
- offset.reset();
});
(++w == W) ? task() : rt.silent_async(task);
}
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- part([&](){ result = std::find_if_not(beg, end, predicate); })();
+ part([=, &result] () mutable { result = std::find_if_not(beg, end, predicate); })();
return;
}
W = N;
}
- // we leverage smart pointer to let the last task update the result
- std::shared_ptr<std::atomic<size_t>> offset(
- new std::atomic<size_t>(N),
- [=, &result](std::atomic<size_t>* p) {
- result = std::next(beg, p->load(std::memory_order_relaxed));
- delete p;
- }
- );
-
+ auto mutex = std::make_shared<std::mutex>();
+ const auto origin = beg;
+ result = std::next(origin, N);
+
// static partitioner
if constexpr(part.type() == PartitionerType::STATIC) {
for(size_t w=0, curr_b=0; w<W && curr_b < N;) {
auto chunk_size = part.adjusted_chunk_size(N, W, w);
- auto task = part([=] () mutable {
+ auto task = part([=, &result] () mutable {
part.loop_until(N, W, curr_b, chunk_size,
- [=, &offset, prev_e=size_t{0}](size_t part_b, size_t part_e) mutable {
- return detail::find_if_not_loop(
- *offset, beg, prev_e, part_b, part_e, predicate
- );
+ [=, &result, prev_e=size_t{0}](size_t part_b, size_t part_e) mutable {
+ std::advance(beg, part_b - prev_e);
+ for(size_t x = part_b; x<part_e; x++) {
+ if(!predicate(*beg++)) {
+ std::lock_guard<std::mutex> lock(*mutex);
+ if(size_t offset = std::distance(origin, result); x < offset) {
+ result = std::next(origin, x);
+ }
+ return true;
+ }
+ }
+ prev_e = part_e;
+ return false;
}
);
- // must release the ownership before async is destroyed
- // as the node deletion comes after the join counter reaches zero
- offset.reset();
});
(++w == W || (curr_b += chunk_size) >= N) ? task() : rt.silent_async(task);
}
else {
auto next = std::make_shared<std::atomic<size_t>>(0);
for(size_t w=0; w<W;) {
- auto task = part([=] () mutable {
+ auto task = part([=, &result] () mutable {
part.loop_until(N, W, *next,
- [=, &offset, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable {
- return detail::find_if_not_loop(
- *offset, beg, prev_e, curr_b, curr_e, predicate
- );
+ [=, &result, prev_e=size_t{0}](size_t part_b, size_t part_e) mutable {
+ std::advance(beg, part_b - prev_e);
+ for(size_t x = part_b; x<part_e; x++) {
+ if(!predicate(*beg++)) {
+ std::lock_guard<std::mutex> lock(*mutex);
+ if(size_t offset = std::distance(origin, result); x < offset) {
+ result = std::next(origin, x);
+ }
+ return true;
+ }
+ }
+ prev_e = part_e;
+ return false;
}
);
- // must release the ownership before async is destroyed
- // as the node deletion comes after the join counter reaches zero
- offset.reset();
});
(++w == W) ? task() : rt.silent_async(task);
}
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- part([&](){ result = std::min_element(beg, end, comp); })();
+ part([=, &result] () mutable { result = std::min_element(beg, end, comp); })();
return;
}
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- part([&](){ result = std::max_element(beg, end, comp); })();
+ part([=, &result] () mutable { result = std::max_element(beg, end, comp); })();
return;
}
template <typename B, typename E, typename C, typename P = DefaultPartitioner>
auto make_for_each_task(B b, E e, C c, P part = P()) {
- using namespace std::string_literals;
-
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
// the workload is sequentially doable
if(W <= 1 || N <= part.chunk_size()) {
- part([=](){ std::for_each(beg, end, c); })();
+ part([=]() mutable { std::for_each(beg, end, c); })();
return;
}
template <typename B, typename E, typename S, typename C, typename P = DefaultPartitioner>
auto make_for_each_index_task(B b, E e, S s, C c, P part = P()){
- using namespace std::string_literals;
-
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using S_t = std::decay_t<unwrap_ref_decay_t<S>>;
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- part([&](){
+ part([=]() mutable {
for(size_t x=0; x<N; x++, beg+=inc) {
c(beg);
}
auto next = std::make_shared<std::atomic<size_t>>(0);
for(size_t w=0; w<W;) {
auto task = part([=] () mutable {
- part.loop(N, W, *next, [=] (size_t part_b, size_t part_e) {
+ part.loop(N, W, *next, [=] (size_t part_b, size_t part_e) mutable {
auto idx = static_cast<B_t>(part_b) * inc + beg;
for(size_t x=part_b; x<part_e; x++, idx += inc) {
c(idx);
};
}
-// Function: make_for_each_index_task
+// Function: make_for_each_by_index_task
template <typename R, typename C, typename P = DefaultPartitioner>
-auto make_for_each_index_task(R range, C c, P part = P()){
+auto make_for_each_by_index_task(R range, C c, P part = P()){
using range_type = std::decay_t<unwrap_ref_decay_t<R>>;
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- part([&](){ c(r); })();
+ part([=]() mutable { c(r); })();
return;
}
);
}
-// Function: for_each_index
+// Function: for_each_by_index
template <typename R, typename C, typename P>
-Task FlowBuilder::for_each_index(R range, C c, P part){
+Task FlowBuilder::for_each_by_index(R range, C c, P part){
return emplace(
- make_for_each_index_task(range, c, part)
+ make_for_each_by_index_task(range, c, part)
);
}
}
PreemptionGuard preemption_guard(rt);
rt._executor._schedule_graph_with_parent(
- rt._worker, graph.begin(), graph.end(), rt._parent, NSTATE::NONE
+ rt._worker, graph.begin(), graph.end(), rt._parent
);
};
}
if(curr_b >= N) {
return;
}
- func(curr_b, std::min(curr_b + chunk_size, N));
+ func(curr_b, (std::min)(curr_b + chunk_size, N));
}
break;
}
q = chunk_size;
}
//size_t curr_e = (q <= r) ? curr_b + q : N;
- size_t curr_e = std::min(curr_b + q, N);
+ size_t curr_e = (std::min)(curr_b + q, N);
if(next.compare_exchange_strong(curr_b, curr_e, std::memory_order_relaxed,
std::memory_order_relaxed)) {
func(curr_b, curr_e);
if(curr_b >= N) {
return;
}
- if(func(curr_b, std::min(curr_b + chunk_size, N))) {
+ if(func(curr_b, (std::min)(curr_b + chunk_size, N))) {
return;
}
}
q = chunk_size;
}
//size_t curr_e = (q <= r) ? curr_b + q : N;
- size_t curr_e = std::min(curr_b + q, N);
+ size_t curr_e = (std::min)(curr_b + q, N);
if(next.compare_exchange_strong(curr_b, curr_e, std::memory_order_relaxed,
std::memory_order_relaxed)) {
if(func(curr_b, curr_e)) {
size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
while(curr_b < N) {
- func(curr_b, std::min(curr_b + chunk_size, N));
+ func(curr_b, (std::min)(curr_b + chunk_size, N));
curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
}
}
size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
while(curr_b < N) {
- if(func(curr_b, std::min(curr_b + chunk_size, N))) {
+ if(func(curr_b, (std::min)(curr_b + chunk_size, N))) {
return;
}
curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
) {
size_t stride = W * chunk_size;
while(curr_b < N) {
- size_t curr_e = std::min(curr_b + chunk_size, N);
+ size_t curr_e = (std::min)(curr_b + chunk_size, N);
func(curr_b, curr_e);
curr_b += stride;
}
) {
size_t stride = W * chunk_size;
while(curr_b < N) {
- size_t curr_e = std::min(curr_b + chunk_size, N);
+ size_t curr_e = (std::min)(curr_b + chunk_size, N);
if(func(curr_b, curr_e)) {
return;
}
std::swap(b1, b2);
}
- b1 = std::max(b1, size_t{1});
- b2 = std::max(b2, b1 + 1);
+ b1 = (std::max)(b1, size_t{1});
+ b2 = (std::max)(b2, b1 + 1);
return {b1, b2};
}
size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
while(curr_b < N) {
- func(curr_b, std::min(curr_b + chunk_size, N));
+ func(curr_b, (std::min)(curr_b + chunk_size, N));
chunk_size = dist(engine);
curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
}
size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
while(curr_b < N) {
- if(func(curr_b, std::min(curr_b + chunk_size, N))){
+ if(func(curr_b, (std::min)(curr_b + chunk_size, N))){
return;
}
chunk_size = dist(engine);
private:
float _alpha {0.01f};
- float _beta {0.5f};
+ float _beta {0.50f};
};
/**
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
- return [=, &r=init] (Runtime& rt) mutable {
+ return [=, &init] (Runtime& rt) mutable {
// fetch the iterator values
B_t beg = b;
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- part([&](){ for(; beg!=end; r = bop(r, *beg++)); })();
+ part([=, &init] () mutable { for(; beg!=end; init = bop(init, *beg++)); })();
return;
}
// variable sum need to avoid copy at the first step
auto chunk_size = std::max(size_t{2}, part.adjusted_chunk_size(N, W, w));
- auto task = part([=, &r] () mutable {
+ auto task = part([=, &init] () mutable {
std::advance(beg, curr_b);
if(N - curr_b == 1) {
std::lock_guard<std::mutex> lock(*mutex);
- r = bop(r, *beg);
+ init = bop(init, *beg);
return;
}
// final reduce
std::lock_guard<std::mutex> lock(*mutex);
- r = bop(r, sum);
+ init = bop(init, sum);
});
(++w == W || (curr_b += chunk_size) >= N) ? task() : rt.silent_async(task);
for(size_t w=0; w<W;) {
- auto task = part([=, &r] () mutable {
+ auto task = part([=, &init] () mutable {
// pre-reduce
size_t s0 = next->fetch_add(2, std::memory_order_relaxed);
if(N - s0 == 1) {
std::lock_guard<std::mutex> lock(*mutex);
- r = bop(r, *beg);
+ init = bop(init, *beg);
return;
}
// final reduce
std::lock_guard<std::mutex> lock(*mutex);
- r = bop(r, sum);
+ init = bop(init, sum);
});
(++w == W) ? task() : rt.silent_async(task);
}
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
- return [=, &r=init] (Runtime& rt) mutable {
+ return [=, &init] (Runtime& rt) mutable {
// fetch the iterator values
B_t beg = b;
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- part([&](){ for(; beg!=end; r = bop(std::move(r), uop(*beg++))); })();
+ part([=, &init] () mutable { for(; beg!=end; init = bop(std::move(init), uop(*beg++))); })();
return;
}
auto chunk_size = part.adjusted_chunk_size(N, W, w);
- auto task = part([=, &r] () mutable {
+ auto task = part([=, &init] () mutable {
std::advance(beg, curr_b);
if(N - curr_b == 1) {
std::lock_guard<std::mutex> lock(*mutex);
- r = bop(std::move(r), uop(*beg));
+ init = bop(std::move(init), uop(*beg));
return;
}
// final reduce
std::lock_guard<std::mutex> lock(*mutex);
- r = bop(std::move(r), std::move(sum));
+ init = bop(std::move(init), std::move(sum));
});
(++w == W || (curr_b += chunk_size) >= N) ? task() : rt.silent_async(task);
else {
auto next = std::make_shared<std::atomic<size_t>>(0);
for(size_t w=0; w<W;) {
- auto task = part([=, &r] () mutable {
+ auto task = part([=, &init] () mutable {
// pre-reduce
size_t s0 = next->fetch_add(2, std::memory_order_relaxed);
if(N - s0 == 1) {
std::lock_guard<std::mutex> lock(*mutex);
- r = bop(std::move(r), uop(*beg));
+ init = bop(std::move(init), uop(*beg));
return;
}
// final reduce
std::lock_guard<std::mutex> lock(*mutex);
- r = bop(std::move(r), std::move(sum));
+ init = bop(std::move(init), std::move(sum));
});
(++w == W) ? task() : rt.silent_async(task);
}
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- part([&](){ for(; beg1!=end1; r = bop_r(std::move(r), bop_t(*beg1++, *beg2++))); })();
+ part([=, &r] () mutable { for(; beg1!=end1; r = bop_r(std::move(r), bop_t(*beg1++, *beg2++))); })();
return;
}
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- part([&](){ init = lop(r, std::move(init)); })();
+ part([=, &init] () mutable { init = lop(r, std::move(init)); })();
return;
}
// loop reduce
part.loop(N, W, curr_b, chunk_size, [=, &tmp](size_t part_b, size_t part_e) mutable {
- tmp = lop(r.discrete_domain(part_b, part_e), std::move(tmp));
+ tmp = lop(r.discrete_domain(part_b, part_e), std::move(tmp));
});
// final reduce - tmp is guaranteed to have value
detail::parallel_pdqsort<B_t, C,
is_std_compare_v<std::decay_t<C>> &&
std::is_arithmetic_v<typename std::iterator_traits<B_t>::value_type>
- >(rt, beg, end, cmp, log2(end - beg));
+ >(rt, beg, end, cmp, log2(size_t(end - beg)));
};
}
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- part([=](){ std::transform(beg, end, d_beg, c); })();
+ part([=]() mutable { std::transform(beg, end, d_beg, c); })();
return;
}
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- part([=](){ std::transform(beg1, end1, beg2, d_beg, c); })();
+ part([=]() mutable { std::transform(beg1, end1, beg2, d_beg, c); })();
return;
}
#pragma once
#include "executor.hpp"
+#include "runtime.hpp"
// https://hackmd.io/@sysprog/concurrency-atomics
namespace tf {
+// ----------------------------------------------------------------------------
+// Async Helper Methods
+// ----------------------------------------------------------------------------
+
+// Procedure: _schedule_async_task
+TF_FORCE_INLINE void Executor::_schedule_async_task(Node* node) {
+ (pt::this_worker) ? _schedule(*pt::this_worker, node) : _schedule(node);
+}
+
+// Procedure: _tear_down_async
+inline void Executor::_tear_down_async(Worker& worker, Node* node, Node*& cache) {
+
+ // from executor
+ if(auto parent = node->_parent; parent == nullptr) {
+ _decrement_topology();
+ }
+ // from runtime
+ else {
+ auto state = parent->_nstate;
+ if(parent->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1) {
+ if(state & NSTATE::PREEMPTED) {
+ _update_cache(worker, cache, parent);
+ }
+ }
+ }
+ recycle(node);
+}
+
// ----------------------------------------------------------------------------
// Async
// ----------------------------------------------------------------------------
return async(DefaultTaskParams{}, std::forward<F>(f));
}
+// Function: _async
+template <typename P, typename F>
+auto Executor::_async(P&& params, F&& f, Topology* tpg, Node* parent) {
+
+ // async task with runtime: [] (tf::Runtime&) -> void {}
+ if constexpr (is_runtime_task_v<F>) {
+
+ std::promise<void> p;
+ auto fu{p.get_future()};
+
+ _schedule_async_task(animate(
+ NSTATE::NONE, ESTATE::ANCHORED, std::forward<P>(params), tpg, parent, 0,
+ std::in_place_type_t<Node::Async>{},
+ [p=MoC{std::move(p)}, f=std::forward<F>(f)](Runtime& rt, bool reentered) mutable {
+ if(!reentered) {
+ f(rt);
+ }
+ else {
+ auto& eptr = rt._parent->_exception_ptr;
+ eptr ? p.object.set_exception(eptr) : p.object.set_value();
+ }
+ }
+ ));
+ return fu;
+ }
+ // async task with closure: [] () -> auto { return ... }
+ else if constexpr (std::is_invocable_v<F>){
+ using R = std::invoke_result_t<F>;
+ std::packaged_task<R()> p(std::forward<F>(f));
+ auto fu{p.get_future()};
+ _schedule_async_task(animate(
+ NSTATE::NONE, ESTATE::NONE, std::forward<P>(params), tpg, parent, 0,
+ std::in_place_type_t<Node::Async>{},
+ [p=make_moc(std::move(p))]() mutable { p.object(); }
+ ));
+ return fu;
+ }
+ else {
+ static_assert(dependent_false_v<F>,
+ "invalid async target - must be one of the following types:\n\
+ (1) [] (tf::Runtime&) -> void {}\n\
+ (2) [] () -> auto { ... return ... }\n"
+ );
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Silent Async
// ----------------------------------------------------------------------------
template <typename P, typename F>
void Executor::silent_async(P&& params, F&& f) {
_increment_topology();
- _silent_async(
- std::forward<P>(params), std::forward<F>(f), nullptr, nullptr
- );
+ _silent_async(std::forward<P>(params), std::forward<F>(f), nullptr, nullptr);
}
// Function: silent_async
silent_async(DefaultTaskParams{}, std::forward<F>(f));
}
-// ----------------------------------------------------------------------------
-// Async Helper Methods
-// ----------------------------------------------------------------------------
-
-// Procedure: _schedule_async_task
-inline void Executor::_schedule_async_task(Node* node) {
- // Here we don't use _this_worker since _schedule will check if the
- // given worker belongs to this executor.
- (pt::this_worker && pt::this_worker->_executor == this) ? _schedule(*pt::this_worker, node) :
- _schedule(node);
-}
-
-// Procedure: _tear_down_async
-inline void Executor::_tear_down_async(Worker& worker, Node* node, Node*& cache) {
-
- // from executor
- if(auto parent = node->_parent; parent == nullptr) {
- _decrement_topology();
+// Function: _silent_async
+template <typename P, typename F>
+void Executor::_silent_async(P&& params, F&& f, Topology* tpg, Node* parent) {
+ // silent task
+ if constexpr (is_runtime_task_v<F> || is_static_task_v<F>) {
+ _schedule_async_task(animate(
+ NSTATE::NONE, ESTATE::NONE, std::forward<P>(params), tpg, parent, 0,
+ std::in_place_type_t<Node::Async>{}, std::forward<F>(f)
+ ));
}
- // from runtime
+ // invalid silent async target
else {
- auto state = parent->_nstate;
- if(parent->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1) {
- if(state & NSTATE::PREEMPTED) {
- _update_cache(worker, cache, parent);
- }
- }
+ static_assert(dependent_false_v<F>,
+ "invalid silent_async target - must be one of the following types:\n\
+ (1) [] (tf::Runtime&) -> void {}\n\
+ (2) [] () -> void { ... }\n"
+ );
}
- recycle(node);
}
// ----------------------------------------------------------------------------
size_t num_dependents = std::distance(first, last);
AsyncTask task(animate(
- std::forward<P>(params), nullptr, nullptr, num_dependents,
+ NSTATE::NONE, ESTATE::NONE, std::forward<P>(params), nullptr, nullptr, num_dependents,
std::in_place_type_t<Node::DependentAsync>{}, std::forward<F>(func)
));
size_t num_dependents = std::distance(first, last);
- // async with runtime: [] (tf::Runtime&) {}
+ // async with runtime: [] (tf::Runtime&) -> void {}
if constexpr (is_runtime_task_v<F>) {
std::promise<void> p;
return std::make_pair(std::move(task), std::move(fu));
}
- // async without runtime: [] () {}
+ // async without runtime: [] () -> auto { return ... }
else if constexpr(std::is_invocable_v<F>) {
using R = std::invoke_result_t<F>;
auto fu{p.get_future()};
AsyncTask task(animate(
- std::forward<P>(params), nullptr, nullptr, num_dependents,
+ NSTATE::NONE, ESTATE::NONE, std::forward<P>(params), nullptr, nullptr, num_dependents,
std::in_place_type_t<Node::DependentAsync>{},
[p=make_moc(std::move(p))] () mutable { p.object(); }
));
auto& state = std::get_if<Node::DependentAsync>(&(task._node->_handle))->state;
- add_successor:
+ while (true) {
- auto target = ASTATE::UNFINISHED;
-
- // acquires the lock
- if(state.compare_exchange_weak(target, ASTATE::LOCKED,
- std::memory_order_acq_rel,
- std::memory_order_acquire)) {
- task._node->_successors.push_back(node);
- state.store(ASTATE::UNFINISHED, std::memory_order_release);
- }
- // dep's state is FINISHED, which means dep finished its callable already
- // thus decrement the node's join counter by 1
- else if (target == ASTATE::FINISHED) {
- num_dependents = node->_join_counter.fetch_sub(1, std::memory_order_acq_rel) - 1;
- }
- // another worker adding its async task to the same successors of this node
- else {
- goto add_successor;
+ auto target = ASTATE::UNFINISHED;
+
+ // Try to acquire the lock
+ if (state.compare_exchange_strong(target, ASTATE::LOCKED,
+ std::memory_order_acq_rel,
+ std::memory_order_acquire)) {
+ task._node->_edges.push_back(node);
+ state.store(ASTATE::UNFINISHED, std::memory_order_release);
+ break;
+ }
+
+ // If already finished, decrement the join counter
+ if (target == ASTATE::FINISHED) {
+ num_dependents = node->_join_counter.fetch_sub(1, std::memory_order_acq_rel) - 1;
+ break;
+ }
+
+ // If locked by another worker, retry
}
}
}
// spawn successors whenever their dependencies are resolved
- for(size_t i=0; i<node->_successors.size(); ++i) {
- if(auto s = node->_successors[i];
+ for(size_t i=0; i<node->_edges.size(); ++i) {
+ if(auto s = node->_edges[i];
s->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1
) {
_update_cache(worker, cache, s);
namespace tf {
+//-----------------------------------------------------------------------------
+
class AtomicNotifier {
friend class Executor;
void prepare_wait(Waiter*) noexcept;
void cancel_wait(Waiter*) noexcept;
void commit_wait(Waiter*) noexcept;
+
size_t size() const noexcept;
+ size_t num_waiters() const noexcept;
private:
std::vector<Waiter> _waiters;
static constexpr uint64_t WAITER_INC {1};
- static constexpr size_t EPOCH_SHIFT {32};
+ static constexpr uint64_t EPOCH_SHIFT {32};
static constexpr uint64_t EPOCH_INC {uint64_t(1) << EPOCH_SHIFT};
static constexpr uint64_t WAITER_MASK {EPOCH_INC - 1};
};
-inline void AtomicNotifier::notify_one() noexcept {
- uint64_t prev = _state.fetch_add(EPOCH_INC, std::memory_order_acq_rel);
- if(TF_UNLIKELY(prev & WAITER_MASK)) { // has waiter (typically unlikely)
- _state.notify_one();
- }
-}
-
-inline void AtomicNotifier::notify_all() noexcept {
- uint64_t prev = _state.fetch_add(EPOCH_INC, std::memory_order_acq_rel);
- if(TF_UNLIKELY(prev & WAITER_MASK)) { // has waiter (typically unlikely)
- _state.notify_all();
- }
-}
-
-inline void AtomicNotifier::notify_n(size_t n) noexcept {
- if(n >= _waiters.size()) {
- notify_all();
- }
- else {
- for(size_t k=0; k<n; ++k) {
- notify_one();
- }
- }
-}
-
inline size_t AtomicNotifier::size() const noexcept {
return _waiters.size();
}
-inline void AtomicNotifier::prepare_wait(Waiter* waiter) noexcept {
- uint64_t prev = _state.fetch_add(WAITER_INC, std::memory_order_acq_rel);
- waiter->epoch = (prev >> EPOCH_SHIFT);
-}
-
-inline void AtomicNotifier::cancel_wait(Waiter*) noexcept {
- // memory_order_relaxed would suffice for correctness, but the faster
- // #waiters gets to 0, the less likely it is that we'll do spurious wakeups
- // (and thus system calls).
- _state.fetch_sub(WAITER_INC, std::memory_order_seq_cst);
-}
-
-inline void AtomicNotifier::commit_wait(Waiter* waiter) noexcept {
- uint64_t prev = _state.load(std::memory_order_acquire);
- while((prev >> EPOCH_SHIFT) == waiter->epoch) {
- _state.wait(prev, std::memory_order_acquire);
- prev = _state.load(std::memory_order_acquire);
- }
- // memory_order_relaxed would suffice for correctness, but the faster
- // #waiters gets to 0, the less likely it is that we'll do spurious wakeups
- // (and thus system calls)
- _state.fetch_sub(WAITER_INC, std::memory_order_seq_cst);
+inline size_t AtomicNotifier::num_waiters() const noexcept {
+ return _state.load(std::memory_order_relaxed) & WAITER_MASK;
}
-//-----------------------------------------------------------------------------
-
-class AtomicNotifierV2 {
-
- friend class Executor;
-
- public:
-
- struct Waiter {
- alignas (2*TF_CACHELINE_SIZE) uint32_t epoch;
- };
-
- AtomicNotifierV2(size_t N) noexcept : _state(0), _waiters(N) {}
- ~AtomicNotifierV2() { assert((_state.load() & WAITER_MASK) == 0); }
-
- void notify_one() noexcept;
- void notify_all() noexcept;
- void notify_n(size_t n) noexcept;
- void prepare_wait(Waiter*) noexcept;
- void cancel_wait(Waiter*) noexcept;
- void commit_wait(Waiter*) noexcept;
- size_t size() const noexcept;
-
- private:
-
- AtomicNotifierV2(const AtomicNotifierV2&) = delete;
- AtomicNotifierV2(AtomicNotifierV2&&) = delete;
- AtomicNotifierV2& operator=(const AtomicNotifierV2&) = delete;
- AtomicNotifierV2& operator=(AtomicNotifierV2&&) = delete;
-
- // This requires 64-bit
- static_assert(sizeof(int) == 4, "bad platform");
- static_assert(sizeof(uint32_t) == 4, "bad platform");
- static_assert(sizeof(uint64_t) == 8, "bad platform");
- static_assert(sizeof(std::atomic<uint64_t>) == 8, "bad platform");
-
- // _state stores the epoch in the most significant 32 bits and the
- // waiter count in the least significant 32 bits.
- std::atomic<uint64_t> _state;
- std::vector<Waiter> _waiters;
-
- static constexpr uint64_t WAITER_INC {1};
- static constexpr uint64_t EPOCH_SHIFT {32};
- static constexpr uint64_t EPOCH_INC {uint64_t(1) << EPOCH_SHIFT};
- static constexpr uint64_t WAITER_MASK {EPOCH_INC - 1};
-};
-
-inline void AtomicNotifierV2::notify_one() noexcept {
+inline void AtomicNotifier::notify_one() noexcept {
std::atomic_thread_fence(std::memory_order_seq_cst);
//if((_state.load(std::memory_order_acquire) & WAITER_MASK) != 0) {
- // _state.fetch_add(EPOCH_INC, std::memory_order_release);
+ // _state.fetch_add(EPOCH_INC, std::memory_order_relaxed);
// _state.notify_one();
//}
}
}
-inline void AtomicNotifierV2::notify_all() noexcept {
+inline void AtomicNotifier::notify_all() noexcept {
std::atomic_thread_fence(std::memory_order_seq_cst);
//if((_state.load(std::memory_order_acquire) & WAITER_MASK) != 0) {
- // _state.fetch_add(EPOCH_INC, std::memory_order_release);
+ // _state.fetch_add(EPOCH_INC, std::memory_order_relaxed);
// _state.notify_all();
//}
for(uint64_t state = _state.load(std::memory_order_acquire); state & WAITER_MASK;) {
}
}
-inline void AtomicNotifierV2::notify_n(size_t n) noexcept {
+inline void AtomicNotifier::notify_n(size_t n) noexcept {
if(n >= _waiters.size()) {
notify_all();
}
}
}
-inline size_t AtomicNotifierV2::size() const noexcept {
- return _waiters.size();
-}
-
-inline void AtomicNotifierV2::prepare_wait(Waiter* waiter) noexcept {
+inline void AtomicNotifier::prepare_wait(Waiter* waiter) noexcept {
auto prev = _state.fetch_add(WAITER_INC, std::memory_order_relaxed);
waiter->epoch = (prev >> EPOCH_SHIFT);
std::atomic_thread_fence(std::memory_order_seq_cst);
}
-inline void AtomicNotifierV2::cancel_wait(Waiter*) noexcept {
- _state.fetch_sub(WAITER_INC, std::memory_order_relaxed);
+inline void AtomicNotifier::cancel_wait(Waiter*) noexcept {
+ _state.fetch_sub(WAITER_INC, std::memory_order_seq_cst);
}
-inline void AtomicNotifierV2::commit_wait(Waiter* waiter) noexcept {
- uint64_t prev = _state.load(std::memory_order_seq_cst);
+inline void AtomicNotifier::commit_wait(Waiter* waiter) noexcept {
+ uint64_t prev = _state.load(std::memory_order_acquire);
while((prev >> EPOCH_SHIFT) == waiter->epoch) {
- _state.wait(prev, std::memory_order_seq_cst);
- prev = _state.load(std::memory_order_seq_cst);
+ _state.wait(prev, std::memory_order_acquire);
+ prev = _state.load(std::memory_order_acquire);
}
// memory_order_relaxed would suffice for correctness, but the faster
// #waiters gets to 0, the less likely it is that we'll do spurious wakeups
// ----------------------------------------------------------------------------
class Algorithm;
-class AsyncTopology;
class Node;
class Graph;
class FlowBuilder;
class cudaFlowSequentialOptimizer;
class cudaFlowRoundRobinOptimizer;
-// ----------------------------------------------------------------------------
-// syclFlow
-// ----------------------------------------------------------------------------
-class syclNode;
-class syclGraph;
-class syclTask;
-class syclFlow;
+template <typename C, typename D>
+class cudaGraphExecBase;
// ----------------------------------------------------------------------------
// struct
using underlying_type = int;
- constexpr static underlying_type NONE = 0x00000000;
- constexpr static underlying_type CONDITIONED = 0x10000000;
- constexpr static underlying_type DETACHED = 0x20000000;
- constexpr static underlying_type PREEMPTED = 0x40000000;
+ constexpr static underlying_type NONE = 0x00000000;
+ constexpr static underlying_type CONDITIONED = 0x10000000;
+ constexpr static underlying_type PREEMPTED = 0x20000000;
+ constexpr static underlying_type RETAIN_SUBFLOW = 0x40000000;
+ constexpr static underlying_type JOINED_SUBFLOW = 0x80000000;
// mask to isolate state bits - non-state bits store # weak dependents
constexpr static underlying_type MASK = 0xF0000000;
using underlying_type = int;
- constexpr static underlying_type NONE = 0;
- constexpr static underlying_type EXCEPTION = 1;
- constexpr static underlying_type CANCELLED = 2;
- constexpr static underlying_type ANCHORED = 4;
+ constexpr static underlying_type NONE = 0x00000000;
+ constexpr static underlying_type EXCEPTION = 0x10000000;
+ constexpr static underlying_type CANCELLED = 0x20000000;
+ constexpr static underlying_type ANCHORED = 0x40000000;
};
using estate_t = ESTATE::underlying_type;
@endcode
*/
size_t num_workers() const noexcept;
+
+ /**
+ @brief queries the number of workers that are currently not making any stealing attempts
+ */
+ size_t num_waiters() const noexcept;
+
+ /**
+ @brief queries the number of queues used in the work-stealing loop
+ */
+ size_t num_queues() const noexcept;
/**
@brief queries the number of running topologies at the time of this call
/**
@brief runs the given function asynchronously
- when the given dependents finish
+ when the given predecessors finish
@tparam F callable type
@tparam Tasks task types convertible to tf::AsyncTask
/**
@brief runs the given function asynchronously
- when the given dependents finish
+ when the given predecessors finish
@tparam F callable type
@tparam Tasks task types convertible to tf::AsyncTask
/**
@brief runs the given function asynchronously
- when the given range of dependents finish
+ when the given range of predecessors finish
@tparam F callable type
@tparam I iterator type
/**
@brief runs the given function asynchronously
- when the given range of dependents finish
+ when the given range of predecessors finish
@tparam F callable type
@tparam I iterator type
/**
@brief runs the given function asynchronously
- when the given dependents finish
+ when the given predecessors finish
@tparam F callable type
@tparam Tasks task types convertible to tf::AsyncTask
/**
@brief runs the given function asynchronously
- when the given dependents finish
+ when the given predecessors finish
@tparam P task parameters type
@tparam F callable type
/**
@brief runs the given function asynchronously
- when the given range of dependents finish
+ when the given range of predecessors finish
@tparam F callable type
@tparam I iterator type
/**
@brief runs the given function asynchronously
- when the given range of dependents finish
+ when the given range of predecessors finish
@tparam P task parameters type
@tparam F callable type
private:
- const size_t _MAX_STEALS;
-
std::mutex _taskflows_mutex;
std::vector<Worker> _workers;
DefaultNotifier _notifier;
#if __cplusplus >= TF_CPP20
- std::latch _latch;
std::atomic<size_t> _num_topologies {0};
- std::atomic_flag _done = ATOMIC_FLAG_INIT;
#else
- Latch _latch;
std::condition_variable _topology_cv;
std::mutex _topology_mutex;
size_t _num_topologies {0};
- std::atomic<bool> _done {0};
#endif
-
std::list<Taskflow> _taskflows;
- Freelist<Node*> _freelist;
+ Freelist<Node*> _buffers;
std::shared_ptr<WorkerInterface> _worker_interface;
std::unordered_set<std::shared_ptr<ObserverInterface>> _observers;
void _observer_epilogue(Worker&, Node*);
void _spawn(size_t);
void _exploit_task(Worker&, Node*&);
- void _explore_task(Worker&, Node*&);
+ bool _explore_task(Worker&, Node*&);
void _schedule(Worker&, Node*);
void _schedule(Node*);
void _set_up_topology(Worker*, Topology*);
bool _invoke_runtime_task_impl(Worker&, Node*, std::function<void(Runtime&, bool)>&);
template <typename I>
- I _set_up_graph(I, I, Topology*, Node*, nstate_t);
+ I _set_up_graph(I, I, Topology*, Node*);
template <typename P>
void _corun_until(Worker&, P&&);
template <typename I>
void _corun_graph(Worker&, Node*, I, I);
-
+
template <typename I>
void _schedule(Worker&, I, I);
void _schedule(I, I);
template <typename I>
- void _schedule_graph_with_parent(Worker&, I, I, Node*, nstate_t);
+ void _schedule_graph_with_parent(Worker&, I, I, Node*);
template <typename P, typename F>
auto _async(P&&, F&&, Topology*, Node*);
// Constructor
inline Executor::Executor(size_t N, std::shared_ptr<WorkerInterface> wix) :
- _MAX_STEALS ((N+1) << 1),
- _workers (N),
- _notifier (N),
- _latch (N+1),
- _freelist (N),
+ _workers (N),
+ _notifier (N),
+ _buffers (N),
_worker_interface(std::move(wix)) {
if(N == 0) {
wait_for_all();
// shut down the scheduler
+ for(size_t i=0; i<_workers.size(); ++i) {
+ #if __cplusplus >= TF_CPP20
+ _workers[i]._done.test_and_set(std::memory_order_relaxed);
+ #else
+ _workers[i]._done.store(true, std::memory_order_relaxed);
+ #endif
+ }
-#if __cplusplus >= TF_CPP20
- _done.test_and_set(std::memory_order_relaxed);
-#else
- _done = true;
-#endif
_notifier.notify_all();
for(auto& w : _workers) {
return _workers.size();
}
+// Function: num_waiters
+inline size_t Executor::num_waiters() const noexcept {
+#if __cplusplus >= TF_CPP20
+ return _notifier.num_waiters();
+#else
+ // Unfortunately, nonblocking notifier does not have an easy way to return
+ // the number of workers that are not making stealing attempts.
+ return 0;
+#endif
+}
+
+// Function: num_queues
+inline size_t Executor::num_queues() const noexcept {
+ return _workers.size() + _buffers.size();
+}
+
// Function: num_topologies
inline size_t Executor::num_topologies() const {
#if __cplusplus >= TF_CPP20
return _taskflows.size();
}
-// Function: _this_worker
-//inline Worker* Executor::_this_worker() const {
-// auto w = pt::this_worker;
-// return (w && w->_executor == this) ? w : nullptr;
-//}
-
// Function: this_worker_id
inline int Executor::this_worker_id() const {
auto w = pt::this_worker;
// Procedure: _spawn
inline void Executor::_spawn(size_t N) {
- // Note: we can't declare latch here as a local variable
- // since the main thread may leave quicker than other thread
- // and then destroy it, causing the other thread to dangle
- // with the latch
for(size_t id=0; id<N; ++id) {
_workers[id]._id = id;
pt::this_worker = &w;
- // synchronize with the main thread to ensure all worker data
- // has been set (e.g., _thread)
- _latch.arrive_and_wait();
-
- // initialize the random engine and seed for work-stealing
+ // initialize the random engine and seed for work-stealing loop
w._rdgen.seed(static_cast<std::default_random_engine::result_type>(
std::hash<std::thread::id>()(std::this_thread::get_id()))
);
- w._rdvtm = std::uniform_int_distribution<size_t>(0, 2*_workers.size()-2);
// before entering the work-stealing loop, call the scheduler prologue
if(_worker_interface) {
// the previous worker may stop while the following workers
// are still preparing for entering the scheduling loop
try {
+
+ // worker loop
while(1) {
- // execute the tasks.
+ // drain out the local queue
_exploit_task(w, t);
- // wait for tasks
+ // steal and wait for tasks
if(_wait_for_task(w, t) == false) {
break;
}
});
}
-
- _latch.arrive_and_wait();
}
// Function: _corun_until
template <typename P>
void Executor::_corun_until(Worker& w, P&& stop_predicate) {
+
+ const size_t MAX_STEALS = ((num_queues() + 1) << 1);
+
+ std::uniform_int_distribution<size_t> udist(0, num_queues()-1);
exploit:
}
else {
size_t num_steals = 0;
+ size_t vtm = w._vtm;
explore:
+
+ //auto vtm = udist(w._rdgen);
- //t = (w._id == w._vtm) ? _freelist.steal(w._id) : _workers[w._vtm]._wsq.steal();
- t = (w._vtm < _workers.size()) ? _workers[w._vtm]._wsq.steal() :
- _freelist.steal(w._vtm - _workers.size());
+ t = (vtm < _workers.size()) ? _workers[vtm]._wsq.steal() :
+ _buffers.steal(vtm - _workers.size());
if(t) {
_invoke(w, t);
+ w._vtm = vtm;
goto exploit;
}
else if(!stop_predicate()) {
- if(num_steals++ > _MAX_STEALS) {
+ if(++num_steals > MAX_STEALS) {
std::this_thread::yield();
}
- // skip worker-id
- //auto r = w._rdgen.random_range(0, 2*_workers.size()-2);
- auto r = w._rdvtm(w._rdgen);
- w._vtm = r + (r >= w._id);
+ vtm = udist(w._rdgen);
goto explore;
}
else {
}
// Function: _explore_task
-inline void Executor::_explore_task(Worker& w, Node*& t) {
+inline bool Executor::_explore_task(Worker& w, Node*& t) {
//assert(!t);
+
+ const size_t MAX_STEALS = ((num_queues() + 1) << 1);
+ std::uniform_int_distribution<size_t> udist(0, num_queues()-1);
size_t num_steals = 0;
+ size_t vtm = w._vtm;
+
+ // Make the worker steal immediately from the assigned victim.
+ while(true) {
+
+ // Randomely generate a next victim.
+ //vtm = udist(w._rdgen); //w._rdvtm();
- // Here, we write do-while to make the worker steal at once
- // from the assigned victim.
- do {
- //t = (w._id == w._vtm) ? _freelist.steal(w._id) : _workers[w._vtm]._wsq.steal();
- t = (w._vtm < _workers.size()) ? _workers[w._vtm]._wsq.steal() :
- _freelist.steal(w._vtm - _workers.size());
+ // If the worker's victim thread is within the worker pool, steal from the worker's queue.
+ // Otherwise, steal from the buffer, adjusting the victim index based on the worker pool size.
+ t = (vtm < _workers.size())
+ ? _workers[vtm]._wsq.steal()
+ : _buffers.steal(vtm - _workers.size());
if(t) {
+ w._vtm = vtm;
break;
}
- if (++num_steals > _MAX_STEALS) {
+ // Increment the steal count, and if it exceeds MAX_STEALS, yield the thread.
+ // If the number of *consecutive* empty steals reaches MAX_STEALS, exit the loop.
+ if (++num_steals > MAX_STEALS) {
std::this_thread::yield();
- if (num_steals > _MAX_STEALS + 100) {
+ if(num_steals > 100 + MAX_STEALS) {
break;
}
}
- // skip worker-id
- //auto r = w._rdgen.random_range(0, 2*_workers.size()-2);
- auto r = w._rdvtm(w._rdgen);
- w._vtm = r + (r >= w._id);
- }
-#if __cplusplus >= TF_CPP20
- // the _DONE can be checked later in wait_for_task?
- while(!_done.test(std::memory_order_relaxed));
-#else
- while(!_done);
-#endif
+ #if __cplusplus >= TF_CPP20
+ if(w._done.test(std::memory_order_relaxed)) {
+ #else
+ if(w._done.load(std::memory_order_relaxed)) {
+ #endif
+ return false;
+ }
+ vtm = udist(w._rdgen); //w._rdvtm();
+ }
+ return true;
}
// Procedure: _exploit_task
}
// Function: _wait_for_task
-inline bool Executor::_wait_for_task(Worker& worker, Node*& t) {
+inline bool Executor::_wait_for_task(Worker& w, Node*& t) {
explore_task:
- _explore_task(worker, t);
-
+ if(_explore_task(w, t) == false) {
+ return false;
+ }
+
+ // Go exploit the task if we successfully steal one.
if(t) {
return true;
}
-
- // The last thief who successfully stole a task will wake up
- // another thief worker to avoid starvation.
-// if(t) {
-// _notifier.notify_one();
-// return true;
-// }
-
- // ---- 2PC guard ----
- _notifier.prepare_wait(worker._waiter);
- if(!_freelist.empty()) {
- _notifier.cancel_wait(worker._waiter);
- worker._vtm = worker._id;
- goto explore_task;
+ // Entering the 2PC guard as all queues should be empty after many stealing attempts.
+ _notifier.prepare_wait(w._waiter);
+
+ // Condition #1: buffers should be empty
+ for(size_t vtm=0; vtm<_buffers.size(); ++vtm) {
+ if(!_buffers._buckets[vtm].queue.empty()) {
+ _notifier.cancel_wait(w._waiter);
+ w._vtm = vtm + _workers.size();
+ goto explore_task;
+ }
}
-
-#if __cplusplus >= TF_CPP20
- if(_done.test(std::memory_order_relaxed)) {
-#else
- if(_done) {
-#endif
- _notifier.cancel_wait(worker._waiter);
- _notifier.notify_all();
- return false;
+
+ // Condition #2: worker queues should be empty
+ // Note: We need to use index-based looping to avoid data race with _spawan
+ // which initializes other worker data structure at the same time
+ for(size_t vtm=0; vtm<w._id; ++vtm) {
+ if(!_workers[vtm]._wsq.empty()) {
+ _notifier.cancel_wait(w._waiter);
+ w._vtm = vtm;
+ goto explore_task;
+ }
}
- // We need to use index-based scanning to avoid data race
- // with _spawn which may initialize a worker at the same time.
- for(size_t vtm=0; vtm<_workers.size(); vtm++) {
+ // due to the property of the work-stealing queue, we don't need to check
+ // the queue of this worker
+ for(size_t vtm=w._id+1; vtm<_workers.size(); vtm++) {
if(!_workers[vtm]._wsq.empty()) {
- _notifier.cancel_wait(worker._waiter);
- worker._vtm = vtm;
+ _notifier.cancel_wait(w._waiter);
+ w._vtm = vtm;
goto explore_task;
}
}
- // Now I really need to relinquish my self to others
- _notifier.commit_wait(worker._waiter);
+ // Condition #3: worker should be alive
+#if __cplusplus >= TF_CPP20
+ if(w._done.test(std::memory_order_relaxed)) {
+#else
+ if(w._done.load(std::memory_order_relaxed)) {
+#endif
+ _notifier.cancel_wait(w._waiter);
+ return false;
+ }
+
+ // Now I really need to relinquish myself to others.
+ _notifier.commit_wait(w._waiter);
goto explore_task;
-
}
// Function: make_observer
// Procedure: _schedule
inline void Executor::_schedule(Worker& worker, Node* node) {
- // caller is a worker to this pool - starting at v3.5 we do not use
+ // caller is a worker of this executor - starting at v3.5 we do not use
// any complicated notification mechanism as the experimental result
// has shown no significant advantage.
if(worker._executor == this) {
- worker._wsq.push(node, [&](){ _freelist.push(worker._id, node); });
+ worker._wsq.push(node, [&](){ _buffers.push(node); });
_notifier.notify_one();
return;
}
- _freelist.push(node);
+ // caller is not a worker of this executor - go through the centralized queue
+ _buffers.push(node);
_notifier.notify_one();
}
// Procedure: _schedule
inline void Executor::_schedule(Node* node) {
- _freelist.push(node);
+ _buffers.push(node);
_notifier.notify_one();
}
return;
}
- // [NOTE]: We cannot use first/last as the for-loop condition
- // (e.g., for(; first != last; ++first)) since when a node is inserted
- // into the queue the node can run and finish immediately.
- // If this is the last node in the graph, it will tear down the parent
- // container which cause the last ++first to fail.
- // This problem is specific to MSVC which has strict iterator arithmetics.
+ // NOTE: We cannot use first/last in the for-loop (e.g., for(; first != last; ++first)).
+ // This is because when a node v is inserted into the queue, v can run and finish
+ // immediately. If v is the last node in the graph, it will tear down the parent task vector
+ // which cause the last ++first to fail. This problem is specific to MSVC which has a stricter
+ // iterator implementation in std::vector than GCC/Clang.
if(worker._executor == this) {
for(size_t i=0; i<num_nodes; i++) {
auto node = detail::get_node_ptr(first[i]);
- worker._wsq.push(node, [&](){ _freelist.push(worker._id, node); });
+ worker._wsq.push(node, [&](){ _buffers.push(node); });
_notifier.notify_one();
}
return;
}
-
+
+ // caller is not a worker of this executor - go through the centralized queue
for(size_t i=0; i<num_nodes; i++) {
- _freelist.push(detail::get_node_ptr(first[i]));
+ _buffers.push(detail::get_node_ptr(first[i]));
}
_notifier.notify_n(num_nodes);
}
return;
}
+ // NOTE: We cannot use first/last in the for-loop (e.g., for(; first != last; ++first)).
+ // This is because when a node v is inserted into the queue, v can run and finish
+ // immediately. If v is the last node in the graph, it will tear down the parent task vector
+ // which cause the last ++first to fail. This problem is specific to MSVC which has a stricter
+ // iterator implementation in std::vector than GCC/Clang.
for(size_t i=0; i<num_nodes; i++) {
- _freelist.push(detail::get_node_ptr(first[i]));
+ _buffers.push(detail::get_node_ptr(first[i]));
}
_notifier.notify_n(num_nodes);
}
template <typename I>
-void Executor::_schedule_graph_with_parent(
- Worker& worker, I beg, I end, Node* parent, nstate_t nstate
-) {
- auto send = _set_up_graph(beg, end, parent->_topology, parent, nstate);
+void Executor::_schedule_graph_with_parent(Worker& worker, I beg, I end, Node* parent) {
+ auto send = _set_up_graph(beg, end, parent->_topology, parent);
parent->_join_counter.fetch_add(send - beg, std::memory_order_relaxed);
_schedule(worker, beg, send);
}
-inline void Executor::_update_cache(Worker& worker, Node*& cache, Node* node) {
+TF_FORCE_INLINE void Executor::_update_cache(Worker& worker, Node*& cache, Node* node) {
if(cache) {
_schedule(worker, cache);
}
// Reset the join counter with strong dependencies to support cycles.
// + We must do this before scheduling the successors to avoid race
- // condition on _dependents.
+ // condition on _predecessors.
// + We must use fetch_add instead of direct assigning
// because the user-space call on "invoke" may explicitly schedule
// this task again (e.g., pipeline) which can access the join_counter.
node->_join_counter.fetch_add(
- node->num_dependents() - (node->_nstate & ~NSTATE::MASK), std::memory_order_relaxed
+ node->num_predecessors() - (node->_nstate & ~NSTATE::MASK), std::memory_order_relaxed
);
// acquire the parent flow counter
case Node::CONDITION:
case Node::MULTI_CONDITION: {
for(auto cond : conds) {
- if(cond >= 0 && static_cast<size_t>(cond) < node->_successors.size()) {
- auto s = node->_successors[cond];
+ if(cond >= 0 && static_cast<size_t>(cond) < node->_num_successors) {
+ auto s = node->_edges[cond];
// zeroing the join counter for invariant
s->_join_counter.store(0, std::memory_order_relaxed);
join_counter.fetch_add(1, std::memory_order_relaxed);
// non-condition task
default: {
- for(size_t i=0; i<node->_successors.size(); ++i) {
- //if(auto s = node->_successors[i]; --(s->_join_counter) == 0) {
- if(auto s = node->_successors[i];
- s->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1) {
+ for(size_t i=0; i<node->_num_successors; ++i) {
+ if(auto s = node->_edges[i]; s->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1) {
join_counter.fetch_add(1, std::memory_order_relaxed);
_update_cache(worker, cache, s);
}
}
}
else {
- // needs to fetch every data before join-counter becomes zero at which
+ // needs to fetch every data before join counter becomes zero at which
// the node may be deleted
auto state = parent->_nstate;
if(parent->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1) {
// Procedure: _invoke_subflow_task
inline bool Executor::_invoke_subflow_task(Worker& worker, Node* node) {
+
+ auto& h = *std::get_if<Node::Subflow>(&node->_handle);
+ auto& g = h.subgraph;
if((node->_nstate & NSTATE::PREEMPTED) == 0) {
- auto& h = *std::get_if<Node::Subflow>(&node->_handle);
- auto& g = h.subgraph;
-
// set up the subflow
Subflow sf(*this, worker, node, g);
// spawn the subflow if it is joinable and its graph is non-empty
// implicit join is faster than Subflow::join as it does not involve corun
- if(sf.joinable() && g.size() > sf._tag) {
+ if(sf.joinable() && g.size()) {
// signal the executor to preempt this node
node->_nstate |= NSTATE::PREEMPTED;
// set up and schedule the graph
- _schedule_graph_with_parent(worker, g.begin() + sf._tag, g.end(), node, NSTATE::NONE);
+ _schedule_graph_with_parent(worker, g.begin(), g.end(), node);
return true;
}
}
node->_nstate &= ~NSTATE::PREEMPTED;
}
+ // the subflow has finished or joined
+ if((node->_nstate & NSTATE::RETAIN_SUBFLOW) == 0) {
+ g.clear();
+ }
+
return false;
}
if((node->_nstate & NSTATE::PREEMPTED) == 0) {
// signal the executor to preempt this node
node->_nstate |= NSTATE::PREEMPTED;
- _schedule_graph_with_parent(w, graph.begin(), graph.end(), node, NSTATE::NONE);
+ _schedule_graph_with_parent(w, graph.begin(), graph.end(), node);
return true;
}
+
// second entry - already spawned
- else {
- node->_nstate &= ~NSTATE::PREEMPTED;
- }
+ node->_nstate &= ~NSTATE::PREEMPTED;
+
return false;
}
_observer_epilogue(worker, node);
break;
- // void(Runtime&)
+ // void(Runtime&) - silent async
case 1:
if(_invoke_runtime_task_impl(worker, node, *std::get_if<1>(&work))) {
return true;
}
break;
- // void(Runtime&, bool)
+ // void(Runtime&, bool) - async
case 2:
if(_invoke_runtime_task_impl(worker, node, *std::get_if<2>(&work))) {
return true;
_increment_topology();
- // Need to check the empty under the lock since subflow task may
- // define detached blocks that modify the taskflow at the same time
- bool empty;
- {
- std::lock_guard<std::mutex> lock(f._mutex);
- empty = f.empty();
- }
+ //// Need to check the empty under the lock since subflow task may
+ //// define detached blocks that modify the taskflow at the same time
+ //bool empty;
+ //{
+ // std::lock_guard<std::mutex> lock(f._mutex);
+ // empty = f.empty();
+ //}
// No need to create a real topology but returns an dummy future
- if(empty || p()) {
+ if(f.empty() || p()) {
c();
std::promise<void> promise;
promise.set_value();
// anchor this parent as the blocking point
{
AnchorGuard anchor(p);
- _schedule_graph_with_parent(w, first, last, p, NSTATE::NONE);
+ _schedule_graph_with_parent(w, first, last, p);
_corun_until(w, [p] () -> bool {
return p->_join_counter.load(std::memory_order_acquire) == 0; }
);
// ---- under taskflow lock ----
auto& g = tpg->_taskflow._graph;
- //g._clear_detached();
- auto send = _set_up_graph(g.begin(), g.end(), tpg, nullptr, NSTATE::NONE);
+ auto send = _set_up_graph(g.begin(), g.end(), tpg, nullptr);
tpg->_join_counter.store(send - g.begin(), std::memory_order_relaxed);
w ? _schedule(*w, g.begin(), send) : _schedule(g.begin(), send);
// Function: _set_up_graph
template <typename I>
-I Executor::_set_up_graph(I first, I last, Topology* tpg, Node* parent, nstate_t state) {
+I Executor::_set_up_graph(I first, I last, Topology* tpg, Node* parent) {
auto send = first;
for(; first != last; ++first) {
auto node = first->get();
node->_topology = tpg;
node->_parent = parent;
- node->_nstate = state;
- node->_estate.store(0, std::memory_order_relaxed);
+ node->_nstate = NSTATE::NONE;
+ node->_estate.store(ESTATE::NONE, std::memory_order_relaxed);
node->_set_up_join_counter();
node->_exception_ptr = nullptr;
// move source to the first partition
- if(node->num_dependents() == 0) {
+ // root, root, root, v1, v2, v3, v4, ...
+ if(node->num_predecessors() == 0) {
std::iter_swap(send++, first);
}
-
- // handle-specific clear
- switch(node->_handle.index()) {
-
- // clear detached nodes
- case Node::SUBFLOW: {
- std::get_if<Node::Subflow>(&node->_handle)->subgraph.clear();
- } break;
-
- default:
- break;
- }
}
return send;
}
if(!tpg->_exception_ptr && !tpg->cancelled() && !tpg->_pred()) {
//assert(tpg->_join_counter == 0);
std::lock_guard<std::mutex> lock(f._mutex);
- //auto& g = tpg->_taskflow._graph;
- //tpg->_join_counter.store(tpg->_num_sources, std::memory_order_relaxed);
- //_schedule(worker, g.begin(), g.begin() + tpg->_num_sources);
_set_up_topology(&worker, tpg);
}
// case 2: the final run of this topology
inline void Subflow::join() {
if(!joinable()) {
- TF_THROW("subflow already joined or detached");
+ TF_THROW("subflow already joined");
}
- // iterator to the begining of the subflow
- auto gbeg = _graph.begin() + _tag;
-
- // join here since corun graph may throw exception
- _tag |= JOINED_BIT;
-
- _executor._corun_graph(_worker, _parent, gbeg, _graph.end());
-}
-
-inline void Subflow::detach() {
-
- if(!joinable()) {
- TF_THROW("subflow already joined or detached");
- }
-
- if(_graph.size() > _tag) {
- auto sbeg = _graph.begin() + _tag;
- auto send = _executor._set_up_graph(
- sbeg, _graph.end(), _parent->_topology, nullptr, NSTATE::DETACHED
- );
- _parent->_topology->_join_counter.fetch_add(send - sbeg, std::memory_order_relaxed);
- _executor._schedule(_worker, sbeg, send);
- }
+ _executor._corun_graph(_worker, _parent, _graph.begin(), _graph.end());
- _tag |= JOINED_BIT;
+ // join here since corun graph may throw exception
+ _parent->_nstate |= NSTATE::JOINED_SUBFLOW;
}
#endif
tf::IndexRange<int> range(0, 17, 2);
// parallelize the sequence [0, 2, 4, 6, 8, 10, 12, 14, 16]
- taskflow.for_each_index(range, [](tf::IndexRange<int> range) {
+ taskflow.for_each_by_index(range, [](tf::IndexRange<int> range) {
// iterate each index in the subrange
for(int i=range.begin(); i<range.end(); i+=range.step_size()) {
printf("iterate %d\n", i);
Please refer to @ref ParallelIterations for details.
*/
template <typename R, typename C, typename P = DefaultPartitioner>
- Task for_each_index(R range, C callable, P part = P());
+ Task for_each_by_index(R range, C callable, P part = P());
// ------------------------------------------------------------------------
// transform
// Function: emplace
template <typename C, std::enable_if_t<is_static_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
- return Task(_graph._emplace_back("", nullptr, nullptr, 0,
+ return Task(_graph._emplace_back(NSTATE::NONE, ESTATE::NONE, DefaultTaskParams{}, nullptr, nullptr, 0,
std::in_place_type_t<Node::Static>{}, std::forward<C>(c)
));
}
// Function: emplace
template <typename C, std::enable_if_t<is_runtime_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
- return Task(_graph._emplace_back("", nullptr, nullptr, 0,
+ return Task(_graph._emplace_back(NSTATE::NONE, ESTATE::NONE, DefaultTaskParams{}, nullptr, nullptr, 0,
std::in_place_type_t<Node::Runtime>{}, std::forward<C>(c)
));
}
// Function: emplace
template <typename C, std::enable_if_t<is_subflow_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
- return Task(_graph._emplace_back("", nullptr, nullptr, 0,
+ return Task(_graph._emplace_back(NSTATE::NONE, ESTATE::NONE, DefaultTaskParams{}, nullptr, nullptr, 0,
std::in_place_type_t<Node::Subflow>{}, std::forward<C>(c)
));
}
// Function: emplace
template <typename C, std::enable_if_t<is_condition_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
- return Task(_graph._emplace_back("", nullptr, nullptr, 0,
+ return Task(_graph._emplace_back(NSTATE::NONE, ESTATE::NONE, DefaultTaskParams{}, nullptr, nullptr, 0,
std::in_place_type_t<Node::Condition>{}, std::forward<C>(c)
));
}
// Function: emplace
template <typename C, std::enable_if_t<is_multi_condition_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
- return Task(_graph._emplace_back("", nullptr, nullptr, 0,
+ return Task(_graph._emplace_back(NSTATE::NONE, ESTATE::NONE, DefaultTaskParams{}, nullptr, nullptr, 0,
std::in_place_type_t<Node::MultiCondition>{}, std::forward<C>(c)
));
}
+// Function: composed_of
+template <typename T>
+Task FlowBuilder::composed_of(T& object) {
+ auto node = _graph._emplace_back(NSTATE::NONE, ESTATE::NONE, DefaultTaskParams{}, nullptr, nullptr, 0,
+ std::in_place_type_t<Node::Module>{}, object
+ );
+ return Task(node);
+}
+
+// Function: placeholder
+inline Task FlowBuilder::placeholder() {
+ auto node = _graph._emplace_back(NSTATE::NONE, ESTATE::NONE, DefaultTaskParams{}, nullptr, nullptr, 0,
+ std::in_place_type_t<Node::Placeholder>{}
+ );
+ return Task(node);
+}
+
// Function: emplace
template <typename... C, std::enable_if_t<(sizeof...(C)>1), void>*>
auto FlowBuilder::emplace(C&&... cs) {
return;
}
- task.for_each_dependent([&] (Task dependent) {
- auto& S = dependent._node->_successors;
- if(auto I = std::find(S.begin(), S.end(), task._node); I != S.end()) {
- S.erase(I);
- }
- });
+ // remove task from its successors' predecessor list
+ for(size_t i=0; i<task._node->_num_successors; ++i) {
+ task._node->_edges[i]->_remove_predecessors(task._node);
+ }
- task.for_each_successor([&] (Task dependent) {
- auto& D = dependent._node->_dependents;
- if(auto I = std::find(D.begin(), D.end(), task._node); I != D.end()) {
- D.erase(I);
- }
- });
+ // remove task from its precedessors' successor list
+ for(size_t i=task._node->_num_successors; i<task._node->_edges.size(); ++i) {
+ task._node->_edges[i]->_remove_successors(task._node);
+ }
_graph._erase(task._node);
}
-// Function: composed_of
-template <typename T>
-Task FlowBuilder::composed_of(T& object) {
- auto node = _graph._emplace_back("", nullptr, nullptr, 0,
- std::in_place_type_t<Node::Module>{}, object
- );
- return Task(node);
-}
-
-// Function: placeholder
-inline Task FlowBuilder::placeholder() {
- auto node = _graph._emplace_back("", nullptr, nullptr, 0,
- std::in_place_type_t<Node::Placeholder>{}
- );
- return Task(node);
-}
// Procedure: _linearize
template <typename L>
tf::Subflow is spawned from the execution of a task to dynamically manage a
child graph that may depend on runtime variables.
-You can explicitly join or detach a subflow by calling tf::Subflow::join
-or tf::Subflow::detach, respectively.
+You can explicitly join a subflow by calling tf::Subflow::join, respectively.
By default, the %Taskflow runtime will implicitly join a subflow it is is joinable.
The following example creates a taskflow graph that spawns a subflow from
*/
void join();
- /**
- @brief enables the subflow to detach from its parent task
-
- Performs an immediate action to detach the subflow. Once the subflow is detached,
- it is considered finished and you may not modify the subflow anymore.
-
- @code{.cpp}
- taskflow.emplace([](tf::Subflow& sf){
- sf.emplace([](){});
- sf.detach();
- });
- @endcode
-
- Only the worker that spawns this subflow can detach it.
- */
- void detach();
-
/**
@brief queries if the subflow is joinable
This member function queries if the subflow is joinable.
- When a subflow is joined or detached, it becomes not joinable.
+ When a subflow is joined, it becomes not joinable.
@code{.cpp}
taskflow.emplace([](tf::Subflow& sf){
@brief acquires the associated graph
*/
Graph& graph() { return _graph; }
+
+ /**
+ @brief specifies whether to keep the subflow after it is joined
+
+ @param flag `true` to retain the subflow after it is joined; `false` to discard it
+
+ By default, the runtime automatically clears a spawned subflow once it is joined.
+ Setting this flag to `true` allows the application to retain the subflow's structure
+ for post-execution analysis like visualization.
+ */
+ void retain(bool flag) noexcept;
+
+ /**
+ @brief queries if the subflow will be retained after it is joined
+ @return `true` if the subflow will be retained after it is joined; `false` otherwise
+ */
+ bool retain() const;
private:
- // with only the most significant bit set: 1000...000
- constexpr static size_t JOINED_BIT = (~size_t(0)) ^ ((~size_t(0)) >> 1);
-
Subflow(Executor&, Worker&, Node*, Graph&);
Subflow() = delete;
Executor& _executor;
Worker& _worker;
Node* _parent;
-
- size_t _tag;
};
// Constructor
FlowBuilder {graph},
_executor {executor},
_worker {worker},
- _parent {parent},
- _tag {graph.size()} {
-
- // assert(_parent != nullptr);
- // clear undetached nodes in reversed order
- for(auto i = graph.rbegin(); i != graph.rend(); ++i) {
- if((i->get()->_nstate & NSTATE::DETACHED) == 0) {
- --_tag;
- }
- else {
- break;
- }
- }
- graph.resize(_tag);
+ _parent {parent} {
+
+ // need to reset since there could have iterative control flow
+ _parent->_nstate &= ~(NSTATE::JOINED_SUBFLOW | NSTATE::RETAIN_SUBFLOW);
+
+ // clear the graph
+ graph.clear();
}
// Function: joinable
inline bool Subflow::joinable() const noexcept {
- return (_tag & JOINED_BIT) == 0;
+ return !(_parent->_nstate & NSTATE::JOINED_SUBFLOW);
}
// Function: executor
return _executor;
}
+// Function: retain
+inline void Subflow::retain(bool flag) noexcept {
+ // default value is not to retain
+ if TF_LIKELY(flag == true) {
+ _parent->_nstate |= NSTATE::RETAIN_SUBFLOW;
+ }
+ else {
+ _parent->_nstate &= ~NSTATE::RETAIN_SUBFLOW;
+ }
+
+ //_parent->_nstate = (_parent->_nstate & ~NSTATE::RETAIN_SUBFLOW) |
+ // (-static_cast<int>(flag) & NSTATE::RETAIN_SUBFLOW);
+}
+
+// Function: retain
+inline bool Subflow::retain() const {
+ return _parent->_nstate & NSTATE::RETAIN_SUBFLOW;
+}
+
} // end of namespace tf. ---------------------------------------------------
template <typename T>
class Freelist {
- struct Head {
- std::mutex mutex;
- UnboundedTaskQueue<T> queue;
- };
+ friend class Executor;
public:
-
- Freelist(size_t N) : _heads(N) {}
-
- void push(size_t w, T item) {
- std::scoped_lock lock(_heads[w].mutex);
- _heads[w].queue.push(item);
+ struct Bucket {
+ std::mutex mutex;
+ UnboundedTaskQueue<T> queue;
+ };
+
+ // Here, we don't create just N task queues in the freelist as it will cause
+ // the work-stealing loop to spand a lot of time on stealing tasks.
+ // Experimentally speaking, we found floor_log2(N) is the best.
+ TF_FORCE_INLINE Freelist(size_t N) : _buckets(N < 4 ? 1 : floor_log2(N)) {}
+
+ // Pointers are aligned to 8 bytes. We perform a simple hash to avoid contention caused
+ // by hashing to the same slot.
+ TF_FORCE_INLINE void push(T item) {
+ //auto b = reinterpret_cast<uintptr_t>(item) % _buckets.size();
+ auto b = (reinterpret_cast<uintptr_t>(item) >> 16) % _buckets.size();
+ std::scoped_lock lock(_buckets[b].mutex);
+ _buckets[b].queue.push(item);
}
- void push(T item) {
- push(reinterpret_cast<uintptr_t>(item) % _heads.size(), item);
+ TF_FORCE_INLINE T steal(size_t w) {
+ return _buckets[w].queue.steal();
}
-
- T steal(size_t w) {
- return _heads[w].queue.steal();
+
+ TF_FORCE_INLINE T steal_with_hint(size_t w, size_t& num_empty_steals) {
+ return _buckets[w].queue.steal_with_hint(num_empty_steals);
}
-
- bool empty() const {
- for(const auto& q : _heads) {
- if(!q.queue.empty()) {
- return false;
- }
- }
- return true;
+ TF_FORCE_INLINE size_t size() const {
+ return _buckets.size();
}
private:
- std::vector<Head> _heads;
+ std::vector<Bucket> _buckets;
};
#include "../utility/math.hpp"
#include "../utility/small_vector.hpp"
#include "../utility/serializer.hpp"
-#include "../utility/latch.hpp"
-#include "../utility/mpmc.hpp"
+#include "../utility/lazy_string.hpp"
#include "error.hpp"
#include "declarations.hpp"
#include "semaphore.hpp"
#include "topology.hpp"
#include "tsq.hpp"
+
/**
@file graph.hpp
@brief graph include file
Node() = default;
- template <typename... Args>
- Node(const std::string&, Topology*, Node*, size_t, Args&&...);
-
- template <typename... Args>
- Node(nstate_t, estate_t, const std::string&, Topology*, Node*, size_t, Args&&...);
-
- template <typename... Args>
- Node(const TaskParams&, Topology*, Node*, size_t, Args&&...);
-
template <typename... Args>
Node(nstate_t, estate_t, const TaskParams&, Topology*, Node*, size_t, Args&&...);
- template <typename... Args>
- Node(const DefaultTaskParams&, Topology*, Node*, size_t, Args&&...);
-
template <typename... Args>
Node(nstate_t, estate_t, const DefaultTaskParams&, Topology*, Node*, size_t, Args&&...);
- //~Node();
-
size_t num_successors() const;
- size_t num_dependents() const;
- size_t num_strong_dependents() const;
- size_t num_weak_dependents() const;
+ size_t num_predecessors() const;
+ size_t num_strong_dependencies() const;
+ size_t num_weak_dependencies() const;
const std::string& name() const;
Topology* _topology {nullptr};
Node* _parent {nullptr};
- SmallVector<Node*> _successors;
- SmallVector<Node*> _dependents;
+ size_t _num_successors {0};
+ SmallVector<Node*, 4> _edges;
std::atomic<size_t> _join_counter {0};
std::exception_ptr _exception_ptr {nullptr};
- // free list
- //Node* _freelist_next{nullptr};
-
bool _is_cancelled() const;
bool _is_conditioner() const;
bool _is_preempted() const;
void _precede(Node*);
void _set_up_join_counter();
void _rethrow_exception();
+ void _remove_successors(Node*);
+ void _remove_predecessors(Node*);
};
// ----------------------------------------------------------------------------
// Definition for Node
// ----------------------------------------------------------------------------
-// Constructor
-template <typename... Args>
-Node::Node(
- const std::string& name,
- Topology* topology,
- Node* parent,
- size_t join_counter,
- Args&&... args
-) :
- _name {name},
- _topology {topology},
- _parent {parent},
- _join_counter {join_counter},
- _handle {std::forward<Args>(args)...} {
-}
-
-// Constructor
-template <typename... Args>
-Node::Node(
- nstate_t nstate,
- estate_t estate,
- const std::string& name,
- Topology* topology,
- Node* parent,
- size_t join_counter,
- Args&&... args
-) :
- _nstate {nstate},
- _estate {estate},
- _name {name},
- _topology {topology},
- _parent {parent},
- _join_counter {join_counter},
- _handle {std::forward<Args>(args)...} {
-}
-
-// Constructor
-template <typename... Args>
-Node::Node(
- const TaskParams& params,
- Topology* topology,
- Node* parent,
- size_t join_counter,
- Args&&... args
-) :
- _name {params.name},
- _data {params.data},
- _topology {topology},
- _parent {parent},
- _join_counter {join_counter},
- _handle {std::forward<Args>(args)...} {
-}
-
// Constructor
template <typename... Args>
Node::Node(
_handle {std::forward<Args>(args)...} {
}
-// Constructor
-template <typename... Args>
-Node::Node(
- const DefaultTaskParams&,
- Topology* topology,
- Node* parent,
- size_t join_counter,
- Args&&... args
-) :
- _topology {topology},
- _parent {parent},
- _join_counter {join_counter},
- _handle {std::forward<Args>(args)...} {
-}
-
// Constructor
template <typename... Args>
Node::Node(
_handle {std::forward<Args>(args)...} {
}
-// Destructor
-//inline Node::~Node() {
-// // this is to avoid stack overflow
-// if(_handle.index() == SUBFLOW) {
-// auto& subgraph = std::get_if<Subflow>(&_handle)->subgraph;
-// std::vector<Node*> nodes;
-// nodes.reserve(subgraph.size());
-//
-// std::move(
-// subgraph._nodes.begin(), subgraph._nodes.end(), std::back_inserter(nodes)
-// );
-// subgraph._nodes.clear();
-//
-// size_t i = 0;
-//
-// while(i < nodes.size()) {
-//
-// if(nodes[i]->_handle.index() == SUBFLOW) {
-// auto& sbg = std::get_if<Subflow>(&(nodes[i]->_handle))->subgraph;
-// std::move(
-// sbg._nodes.begin(), sbg._nodes.end(), std::back_inserter(nodes)
-// );
-// sbg._nodes.clear();
-// }
-//
-// ++i;
-// }
-//
-// //auto& np = Graph::_node_pool();
-// for(i=0; i<nodes.size(); ++i) {
-// recycle(nodes[i]);
-// }
-// }
-//}
-
// Procedure: _precede
+/*
+u successor layout: s1, s2, s3, p1, p2 (num_successors = 3)
+v predecessor layout: s1, p1, p2
+
+add a new successor: u->v
+u successor layout:
+ s1, s2, s3, p1, p2, v (push_back v)
+ s1, s2, s3, v, p2, p1 (swap adj[num_successors] with adj[n-1])
+v predecessor layout:
+ s1, p1, p2, u (push_back u)
+*/
inline void Node::_precede(Node* v) {
- _successors.push_back(v);
- v->_dependents.push_back(this);
+ _edges.push_back(v);
+ std::swap(_edges[_num_successors++], _edges[_edges.size() - 1]);
+ v->_edges.push_back(this);
+}
+
+// Function: _remove_successors
+inline void Node::_remove_successors(Node* node) {
+ auto sit = std::remove(_edges.begin(), _edges.begin() + _num_successors, node);
+ size_t new_num_successors = std::distance(_edges.begin(), sit);
+ std::move(_edges.begin() + _num_successors, _edges.end(), sit);
+ _edges.resize(_edges.size() - (_num_successors - new_num_successors));
+ _num_successors = new_num_successors;
+}
+
+// Function: _remove_predecessors
+inline void Node::_remove_predecessors(Node* node) {
+ _edges.erase(
+ std::remove(_edges.begin() + _num_successors, _edges.end(), node), _edges.end()
+ );
}
// Function: num_successors
inline size_t Node::num_successors() const {
- return _successors.size();
+ return _num_successors;
}
-// Function: dependents
-inline size_t Node::num_dependents() const {
- return _dependents.size();
+// Function: predecessors
+inline size_t Node::num_predecessors() const {
+ return _edges.size() - _num_successors;
}
-// Function: num_weak_dependents
-inline size_t Node::num_weak_dependents() const {
+// Function: num_weak_dependencies
+inline size_t Node::num_weak_dependencies() const {
size_t n = 0;
- for(size_t i=0; i<_dependents.size(); i++) {
- if(_dependents[i]->_is_conditioner()) {
- n++;
- }
+ for(size_t i=_num_successors; i<_edges.size(); i++) {
+ n += _edges[i]->_is_conditioner();
}
return n;
}
-// Function: num_strong_dependents
-inline size_t Node::num_strong_dependents() const {
+// Function: num_strong_dependencies
+inline size_t Node::num_strong_dependencies() const {
size_t n = 0;
- for(size_t i=0; i<_dependents.size(); i++) {
- if(!_dependents[i]->_is_conditioner()) {
- n++;
- }
+ for(size_t i=_num_successors; i<_edges.size(); i++) {
+ n += !_edges[i]->_is_conditioner();
}
return n;
}
// Procedure: _set_up_join_counter
inline void Node::_set_up_join_counter() {
size_t c = 0;
- for(auto p : _dependents) {
- if(p->_is_conditioner()) {
- //_nstate |= NSTATE::CONDITIONED;
- _nstate = (_nstate + 1) | NSTATE::CONDITIONED;
- }
- else {
- c++;
- }
+ //for(auto p : _predecessors) {
+ for(size_t i=_num_successors; i<_edges.size(); i++) {
+ bool is_cond = _edges[i]->_is_conditioner();
+ _nstate = (_nstate + is_cond) | (is_cond * NSTATE::CONDITIONED); // weak dependency
+ c += !is_cond; // strong dependency
}
_join_counter.store(c, std::memory_order_relaxed);
}
class NonblockingNotifierV2 {
friend class Executor;
+
+ // State_ layout:
+ // - low kWaiterBits is a stack of waiters committed wait
+ // (indexes in _waiters array are used as stack elements,
+ // kStackMask means empty stack).
+ // - next kWaiterBits is count of waiters in prewait state.
+ // - next kWaiterBits is count of pending signals.
+ // - remaining bits are ABA counter for the stack.
+ // (stored in Waiter node and incremented on push).
+ static const uint64_t kWaiterBits = 14;
+ static const uint64_t kStackMask = (1ull << kWaiterBits) - 1;
+ static const uint64_t kWaiterShift = kWaiterBits;
+ static const uint64_t kWaiterMask = ((1ull << kWaiterBits) - 1) << kWaiterShift;
+ static const uint64_t kWaiterInc = 1ull << kWaiterShift;
+ static const uint64_t kSignalShift = 2 * kWaiterBits;
+ static const uint64_t kSignalMask = ((1ull << kWaiterBits) - 1) << kSignalShift;
+ static const uint64_t kSignalInc = 1ull << kSignalShift;
+ static const uint64_t kEpochShift = 3 * kWaiterBits;
+ static const uint64_t kEpochBits = 64 - kEpochShift;
+ static const uint64_t kEpochMask = ((1ull << kEpochBits) - 1) << kEpochShift;
+ static const uint64_t kEpochInc = 1ull << kEpochShift;
+
+ static_assert(kEpochBits >= 20, "not enough bits to prevent ABA problem");
public:
struct Waiter {
- alignas (2*TF_CACHELINE_SIZE) std::atomic<uint64_t> next;
- uint64_t epoch;
+ alignas (2*TF_CACHELINE_SIZE) std::atomic<uint64_t> next{kStackMask};
+ uint64_t epoch{0};
enum : unsigned {
kNotSignaled = 0,
kWaiting,
};
#if __cplusplus >= TF_CPP20
- std::atomic<unsigned> state {0};
+ std::atomic<unsigned> state {kNotSignaled};
#else
std::mutex mu;
std::condition_variable cv;
- unsigned state;
+ unsigned state {kNotSignaled};
#endif
};
// prepare_wait prepares for waiting.
// After calling prepare_wait, the thread must re-check the wait predicate
// and then call either cancel_wait or commit_wait.
+ //void prepare_wait(Waiter*) {
+ // uint64_t state = _state.load(std::memory_order_relaxed);
+ // for (;;) {
+ // //_check_state(state);
+ // uint64_t newstate = state + kWaiterInc;
+ // //_check_state(newstate);
+ // if (_state.compare_exchange_weak(state, newstate, std::memory_order_seq_cst)) return;
+ // }
+ //}
+
void prepare_wait(Waiter*) {
- uint64_t state = _state.load(std::memory_order_relaxed);
- for (;;) {
- //_check_state(state);
- uint64_t newstate = state + kWaiterInc;
- //_check_state(newstate);
- if (_state.compare_exchange_weak(state, newstate, std::memory_order_seq_cst)) return;
- }
+ _state.fetch_add(kWaiterInc, std::memory_order_relaxed);
+ std::atomic_thread_fence(std::memory_order_seq_cst);
}
// commit_wait commits waiting after prepare_wait.
}
private:
- // State_ layout:
- // - low kWaiterBits is a stack of waiters committed wait
- // (indexes in _waiters array are used as stack elements,
- // kStackMask means empty stack).
- // - next kWaiterBits is count of waiters in prewait state.
- // - next kWaiterBits is count of pending signals.
- // - remaining bits are ABA counter for the stack.
- // (stored in Waiter node and incremented on push).
- static const uint64_t kWaiterBits = 14;
- static const uint64_t kStackMask = (1ull << kWaiterBits) - 1;
- static const uint64_t kWaiterShift = kWaiterBits;
- static const uint64_t kWaiterMask = ((1ull << kWaiterBits) - 1) << kWaiterShift;
- static const uint64_t kWaiterInc = 1ull << kWaiterShift;
- static const uint64_t kSignalShift = 2 * kWaiterBits;
- static const uint64_t kSignalMask = ((1ull << kWaiterBits) - 1) << kSignalShift;
- static const uint64_t kSignalInc = 1ull << kSignalShift;
- static const uint64_t kEpochShift = 3 * kWaiterBits;
- static const uint64_t kEpochBits = 64 - kEpochShift;
- static const uint64_t kEpochMask = ((1ull << kEpochBits) - 1) << kEpochShift;
- static const uint64_t kEpochInc = 1ull << kEpochShift;
-
- static_assert(kEpochBits >= 20, "not enough bits to prevent ABA problem");
+
std::atomic<uint64_t> _state;
std::vector<Waiter> _waiters;
std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){
if(i.count == 0) return;
- count_w = std::max(count_w, std::to_string(i.count).size());
+ count_w = (std::max)(count_w, std::to_string(i.count).size());
});
std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){
if(i.count == 0) return;
- time_w = std::max(time_w, std::to_string(i.total_span).size());
+ time_w = (std::max)(time_w, std::to_string(i.total_span).size());
});
std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){
if(i.count == 0) return;
- avg_w = std::max(time_w, std::to_string(i.avg_span()).size());
+ avg_w = (std::max)(time_w, std::to_string(i.avg_span()).size());
});
std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){
if(i.count == 0) return;
- min_w = std::max(min_w, std::to_string(i.min_span).size());
+ min_w = (std::max)(min_w, std::to_string(i.min_span).size());
});
std::for_each(tsum.begin(), tsum.end(), [&](const auto& i){
if(i.count == 0) return;
- max_w = std::max(max_w, std::to_string(i.max_span).size());
+ max_w = (std::max)(max_w, std::to_string(i.max_span).size());
});
os << std::setw(type_w) << "-Task-"
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
- l_w = std::max(l_w, std::to_string(i.level).size());
+ l_w = (std::max)(l_w, std::to_string(i.level).size());
});
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
- c_w = std::max(c_w, std::to_string(i.count).size());
+ c_w = (std::max)(c_w, std::to_string(i.count).size());
});
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
- d_w = std::max(d_w, std::to_string(i.total_span).size());
+ d_w = (std::max)(d_w, std::to_string(i.total_span).size());
});
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
- avg_w = std::max(avg_w, std::to_string(i.avg_span()).size());
+ avg_w = (std::max)(avg_w, std::to_string(i.avg_span()).size());
});
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
- min_w = std::max(min_w, std::to_string(i.min_span).size());
+ min_w = (std::max)(min_w, std::to_string(i.min_span).size());
});
std::for_each(wsum.begin(), wsum.end(), [&](const auto& i){
if(i.count == 0) return;
- max_w = std::max(max_w, std::to_string(i.max_span).size());
+ max_w = (std::max)(max_w, std::to_string(i.max_span).size());
});
os << std::setw(w_w) << "-Worker-"
// update the entire span
auto& s = _timeline.segments[w][l][i];
- view_beg = view_beg ? std::min(*view_beg, s.beg) : s.beg;
- view_end = view_end ? std::max(*view_end, s.end) : s.end;
+ view_beg = view_beg ? (std::min)(*view_beg, s.beg) : s.beg;
+ view_end = view_end ? (std::max)(*view_end, s.end) : s.end;
// update the task summary
size_t t = duration_cast<microseconds>(s.end - s.beg).count();
auto& x = summary.tsum[static_cast<int>(s.type)];
x.count += 1;
x.total_span += t;
- x.min_span = (x.count == 1) ? t : std::min(t, x.min_span);
- x.max_span = (x.count == 1) ? t : std::max(t, x.max_span);
+ x.min_span = (x.count == 1) ? t : (std::min)(t, x.min_span);
+ x.max_span = (x.count == 1) ? t : (std::max)(t, x.max_span);
// update the worker summary
ws.total_span += t;
- ws.min_span = (i == 0) ? t : std::min(t, ws.min_span);
- ws.max_span = (i == 0) ? t : std::max(t, ws.max_span);
+ ws.min_span = (i == 0) ? t : (std::min)(t, ws.min_span);
+ ws.max_span = (i == 0) ? t : (std::max)(t, ws.max_span);
auto&y = ws.tsum[static_cast<int>(s.type)];
y.count += 1;
y.total_span += t;
- y.min_span = (y.count == 1) ? t : std::min(t, y.min_span);
- y.max_span = (y.count == 1) ? t : std::max(t, y.max_span);
+ y.min_span = (y.count == 1) ? t : (std::min)(t, y.min_span);
+ y.max_span = (y.count == 1) ? t : (std::max)(t, y.max_span);
// update the delay
//if(i) {
A runtime object allows users to interact with the
scheduling runtime inside a task, such as scheduling an active task,
-spawning a subflow, and so on.
+spawning an asynchronous task, corunning a graph target, and so on.
@code{.cpp}
tf::Task A, B, C, D;
friend class PreemptionGuard;
friend class Algorithm;
- #define TF_RUNTIME_CHECK_CALLER(msg) \
- if(pt::this_worker == nullptr || pt::this_worker->_executor != &_executor) { \
- TF_THROW(msg); \
+ #define TF_RUNTIME_CHECK_CALLER(msg) \
+ if(pt::this_worker != &_worker) { \
+ TF_THROW(msg); \
}
public:
going through the normal taskflow graph scheduling process.
At this moment, task @c C is active because its parent taskflow is running.
When the taskflow finishes, we will see both @c B and @c C in the output.
+
+ @attention
+ This method can only be called by the parent worker of this runtime,
+ or the behavior is undefined.
*/
void schedule(Task task);
function on the given arguments.
The difference to tf::Executor::async is that the created asynchronous task
pertains to the runtime object.
- Applications can explicitly issue tf::Runtime::corun_all
+ Applications can explicitly issue tf::Runtime::corun
to wait for all spawned asynchronous tasks to finish.
For example:
// spawn 100 asynchronous tasks from the worker of the runtime
for(int i=0; i<100; i++) {
- rt.async([&](){ counter++; });
+ rt.silent_async([&](){ counter++; });
}
// wait for the 100 asynchronous tasks to finish
- rt.corun_all();
+ rt.corun();
assert(counter == 102);
});
@endcode
}
// wait for the 200 asynchronous tasks to finish
- rt.corun_all();
+ rt.corun();
assert(counter == 200);
});
@endcode
for(int i=0; i<100; i++) {
rt.silent_async([&](){ counter++; });
}
- rt.corun_all();
+ rt.corun();
assert(counter == 100);
});
@endcode
@code{.cpp}
taskflow.emplace([&](tf::Runtime& rt){
rt.silent_async("my task", [](){});
- rt.corun_all();
+ rt.corun();
});
@endcode
*/
and returns when all tasks in the target completes.
@attention
- The method is not thread-safe as it modifies the anchor state of the node for exception handling.
+ This method can only be called by the parent worker of this runtime,
+ or the behavior is undefined.
*/
template <typename T>
void corun(T&& target);
/**
- @brief corun all asynchronous tasks spawned by this runtime with other workers
+ @brief corun all tasks spawned by this runtime with other workers
- Coruns all asynchronous tasks (tf::Runtime::async,
- tf::Runtime::silent_async) with other workers until all those
- asynchronous tasks finish.
+ Coruns all tasks spawned by this runtime with other workers until all these tasks finish.
@code{.cpp}
std::atomic<size_t> counter{0};
for(int i=0; i<100; i++) {
rt.silent_async([&](){ counter++; });
}
- rt.corun_all();
+ rt.corun();
assert(counter == 100);
// spawn another 100 async tasks and wait
for(int i=0; i<100; i++) {
rt.silent_async([&](){ counter++; });
}
- rt.corun_all();
+ rt.corun();
assert(counter == 200);
});
@endcode
@attention
- The method is not thread-safe as it modifies the anchor state of the node for exception handling.
+ This method can only be called by the parent worker of this runtime,
+ or the behavior is undefined.
*/
- inline void corun_all();
+ void corun();
- protected:
-
+ /**
+ @brief equivalent to tf::Runtime::corun - just an alias for legacy purpose
+ */
+ void corun_all();
+
+ /**
+ @brief This method verifies if the task has been cancelled.
+ */
+ bool is_cancelled();
+
+protected:
/**
@private
*/
// Procedure: schedule
inline void Runtime::schedule(Task task) {
- TF_RUNTIME_CHECK_CALLER("schedule must be called by a worker of runtime's executor");
-
auto node = task._node;
// need to keep the invariant: when scheduling a task, the task must have
// zero dependency (join counter is 0)
// Procedure: corun
template <typename T>
void Runtime::corun(T&& target) {
-
static_assert(has_graph_v<T>, "target must define a member function 'Graph& graph()'");
-
- TF_RUNTIME_CHECK_CALLER("corun must be called by a worker of runtime's executor");
_executor._corun_graph(*pt::this_worker, _parent, target.graph().begin(), target.graph().end());
}
-// Function: corun_all
-inline void Runtime::corun_all() {
- TF_RUNTIME_CHECK_CALLER("corun_all must be called by a worker of runtime's executor");
+// Function: corun
+inline void Runtime::corun() {
{
AnchorGuard anchor(_parent);
_executor._corun_until(_worker, [this] () -> bool {
_parent->_rethrow_exception();
}
+// Function: corun_all
+inline void Runtime::corun_all() {
+ corun();
+}
+
+inline bool Runtime::is_cancelled() { return _parent->_is_cancelled(); }
+
// ------------------------------------
// Runtime::silent_async series
// ------------------------------------
}
~PreemptionGuard() {
+ // If I am the last to join, then there is not need to preempt the runtime
if(_runtime._parent->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1) {
_runtime._preempted = false;
_runtime._parent->_nstate &= ~NSTATE::PREEMPTED;
_observer_epilogue(worker, node);
// here, we cannot check the state from node->_nstate due to data race
+ // Ex: if preempted, another task may finish real quck and insert this parent task
+ // again into the scheduling queue. When running this parent task, it will jump to
+ // else branch below and modify tne nstate, thus incuring data race.
if(rt._preempted) {
return true;
}
}
-// ----------------------------------------------------------------------------
-// Executor Members that Depend on Runtime
-// ----------------------------------------------------------------------------
-
-template <typename P, typename F>
-auto Executor::_async(P&& params, F&& f, Topology* tpg, Node* parent) {
-
- // async task with runtime: [] (tf::Runtime&) { ... }
- if constexpr (is_runtime_task_v<F>) {
- std::promise<void> p;
- auto fu{p.get_future()};
-
- _schedule_async_task(animate(
- NSTATE::NONE, ESTATE::ANCHORED, std::forward<P>(params), tpg, parent, 0,
- std::in_place_type_t<Node::Async>{},
- [p=MoC{std::move(p)}, f=std::forward<F>(f)](Runtime& rt, bool reentered) mutable {
- if(!reentered) {
- f(rt);
- }
- else {
- auto& eptr = rt._parent->_exception_ptr;
- eptr ? p.object.set_exception(eptr) : p.object.set_value();
- }
- }
- ));
- return fu;
- }
- // async task with closure: [] () { ... }
- else if constexpr (std::is_invocable_v<F>){
- using R = std::invoke_result_t<F>;
- std::packaged_task<R()> p(std::forward<F>(f));
- auto fu{p.get_future()};
- _schedule_async_task(animate(
- std::forward<P>(params), tpg, parent, 0,
- std::in_place_type_t<Node::Async>{},
- [p=make_moc(std::move(p))]() mutable { p.object(); }
- ));
- return fu;
- }
- else {
- static_assert(dependent_false_v<F>,
- "invalid async target - must be one of the following types:\n\
- (1) [] (tf::Runtime&) -> void {}\n\
- (2) [] () -> auto { ... return ... }\n"
- );
- }
-
-}
-
-// Function: _silent_async
-template <typename P, typename F>
-void Executor::_silent_async(P&& params, F&& f, Topology* tpg, Node* parent) {
- // silent task
- if constexpr (is_runtime_task_v<F> || std::is_invocable_v<F>) {
- _schedule_async_task(animate(
- std::forward<P>(params), tpg, parent, 0,
- std::in_place_type_t<Node::Async>{}, std::forward<F>(f)
- ));
- }
- // invalid silent async target
- else {
- static_assert(dependent_false_v<F>,
- "invalid silent_async target - must be one of the following types:\n\
- (1) [] (tf::Runtime&) -> void {}\n\
- (2) [] () -> auto { ... return ... }\n"
- );
- }
-}
} // end of namespace tf -----------------------------------------------------
}
// ----------------------------------------------------------------------------
-// Task Traits
+// Static Task Trait
// ----------------------------------------------------------------------------
/**
-@brief determines if a callable is a dynamic task
+@private
+*/
+template <typename C, typename = void>
+struct is_static_task : std::false_type {};
-A dynamic task is a callable object constructible from std::function<void(Subflow&)>.
+/**
+@private
*/
template <typename C>
-constexpr bool is_subflow_task_v = std::is_invocable_r_v<void, C, Subflow&>;
+struct is_static_task<C, std::enable_if_t<std::is_invocable_v<C>>>
+ : std::is_same<std::invoke_result_t<C>, void> {};
/**
-@brief determines if a callable is a condition task
+@brief determines if a callable is a static task
-A condition task is a callable object constructible from std::function<int()>.
+A static task is a callable object constructible from std::function<void()>.
*/
template <typename C>
-constexpr bool is_condition_task_v = std::is_invocable_r_v<int, C>;
+constexpr bool is_static_task_v = is_static_task<C>::value;
+
+// ----------------------------------------------------------------------------
+// Subflow Task Trait
+// ----------------------------------------------------------------------------
/**
-@brief determines if a callable is a multi-condition task
+@private
+*/
+template <typename C, typename = void>
+struct is_subflow_task : std::false_type {};
-A multi-condition task is a callable object constructible from
-std::function<tf::SmallVector<int>()>.
+/**
+@private
*/
template <typename C>
-constexpr bool is_multi_condition_task_v = std::is_invocable_r_v<SmallVector<int>, C>;
+struct is_subflow_task<C, std::enable_if_t<std::is_invocable_v<C, tf::Subflow&>>>
+ : std::is_same<std::invoke_result_t<C, tf::Subflow&>, void> {};
/**
-@brief determines if a callable is a static task
+@brief determines if a callable is a subflow task
-A static task is a callable object constructible from std::function<void()>.
+A subflow task is a callable object constructible from std::function<void(Subflow&)>.
+*/
+template <typename C>
+constexpr bool is_subflow_task_v = is_subflow_task<C>::value;
+
+// ----------------------------------------------------------------------------
+// Runtime Task Trait
+// ----------------------------------------------------------------------------
+
+/**
+@private
+*/
+template <typename C, typename = void>
+struct is_runtime_task : std::false_type {};
+
+/**
+@private
*/
template <typename C>
-constexpr bool is_static_task_v = std::is_invocable_r_v<void, C> &&
- !is_condition_task_v<C> &&
- !is_multi_condition_task_v<C>;
+struct is_runtime_task<C, std::enable_if_t<std::is_invocable_v<C, tf::Runtime&>>>
+ : std::is_same<std::invoke_result_t<C, tf::Runtime&>, void> {};
/**
@brief determines if a callable is a runtime task
-A runtime task is a callable object constructible from std::function<void(tf::Runtime&)>.
+A runtime task is a callable object constructible from std::function<void(Runtime&)>.
+*/
+template <typename C>
+constexpr bool is_runtime_task_v = is_runtime_task<C>::value;
+
+
+// ----------------------------------------------------------------------------
+// Condition Task Trait
+// ----------------------------------------------------------------------------
+
+/**
+@brief determines if a callable is a condition task
+
+A condition task is a callable object constructible from std::function<int()>.
+*/
+template <typename C>
+constexpr bool is_condition_task_v = std::is_invocable_r_v<int, C>;
+
+/**
+@brief determines if a callable is a multi-condition task
+
+A multi-condition task is a callable object constructible from
+std::function<tf::SmallVector<int>()>.
*/
template <typename C>
-constexpr bool is_runtime_task_v = std::is_invocable_r_v<void, C, Runtime&>;
+constexpr bool is_multi_condition_task_v = std::is_invocable_r_v<SmallVector<int>, C>;
+
// ----------------------------------------------------------------------------
// Task
/**
@brief queries the number of predecessors of the task
*/
- size_t num_dependents() const;
+ size_t num_predecessors() const;
/**
@brief queries the number of strong dependents of the task
*/
- size_t num_strong_dependents() const;
+ size_t num_strong_dependencies() const;
/**
@brief queries the number of weak dependents of the task
*/
- size_t num_weak_dependents() const;
+ size_t num_weak_dependencies() const;
/**
@brief assigns a name to the task
@brief applies an visitor callable to each dependents of the task
*/
template <typename V>
- void for_each_dependent(V&& visitor) const;
+ void for_each_predecessor(V&& visitor) const;
/**
@brief obtains a hash value of the underlying node
return _node->_name;
}
-// Function: num_dependents
-inline size_t Task::num_dependents() const {
- return _node->num_dependents();
+// Function: num_predecessors
+inline size_t Task::num_predecessors() const {
+ return _node->num_predecessors();
}
-// Function: num_strong_dependents
-inline size_t Task::num_strong_dependents() const {
- return _node->num_strong_dependents();
+// Function: num_strong_dependencies
+inline size_t Task::num_strong_dependencies() const {
+ return _node->num_strong_dependencies();
}
-// Function: num_weak_dependents
-inline size_t Task::num_weak_dependents() const {
- return _node->num_weak_dependents();
+// Function: num_weak_dependencies
+inline size_t Task::num_weak_dependencies() const {
+ return _node->num_weak_dependencies();
}
// Function: num_successors
// Function: for_each_successor
template <typename V>
void Task::for_each_successor(V&& visitor) const {
- for(size_t i=0; i<_node->_successors.size(); ++i) {
- visitor(Task(_node->_successors[i]));
+ for(size_t i=0; i<_node->_num_successors; ++i) {
+ visitor(Task(_node->_edges[i]));
}
}
-// Function: for_each_dependent
+// Function: for_each_predecessor
template <typename V>
-void Task::for_each_dependent(V&& visitor) const {
- for(size_t i=0; i<_node->_dependents.size(); ++i) {
- visitor(Task(_node->_dependents[i]));
+void Task::for_each_predecessor(V&& visitor) const {
+ for(size_t i=_node->_num_successors; i<_node->_edges.size(); ++i) {
+ visitor(Task(_node->_edges[i]));
}
}
/**
@brief queries the number of predecessors of the task
*/
- size_t num_dependents() const;
+ size_t num_predecessors() const;
/**
@brief queries the number of strong dependents of the task
*/
- size_t num_strong_dependents() const;
+ size_t num_strong_dependencies() const;
/**
@brief queries the number of weak dependents of the task
*/
- size_t num_weak_dependents() const;
+ size_t num_weak_dependencies() const;
/**
@brief applies an visitor callable to each successor of the task
@brief applies an visitor callable to each dependents of the task
*/
template <typename V>
- void for_each_dependent(V&& visitor) const;
+ void for_each_predecessor(V&& visitor) const;
/**
@brief queries the task type
return _node._name;
}
-// Function: num_dependents
-inline size_t TaskView::num_dependents() const {
- return _node.num_dependents();
+// Function: num_predecessors
+inline size_t TaskView::num_predecessors() const {
+ return _node.num_predecessors();
}
-// Function: num_strong_dependents
-inline size_t TaskView::num_strong_dependents() const {
- return _node.num_strong_dependents();
+// Function: num_strong_dependencies
+inline size_t TaskView::num_strong_dependencies() const {
+ return _node.num_strong_dependencies();
}
-// Function: num_weak_dependents
-inline size_t TaskView::num_weak_dependents() const {
- return _node.num_weak_dependents();
+// Function: num_weak_dependencies
+inline size_t TaskView::num_weak_dependencies() const {
+ return _node.num_weak_dependencies();
}
// Function: num_successors
// Function: for_each_successor
template <typename V>
void TaskView::for_each_successor(V&& visitor) const {
- for(size_t i=0; i<_node._successors.size(); ++i) {
- visitor(TaskView(*_node._successors[i]));
+ for(size_t i=0; i<_node._num_successors; ++i) {
+ visitor(TaskView(*_node._edges[i]));
}
+ //for(size_t i=0; i<_node._successors.size(); ++i) {
+ // visitor(TaskView(*_node._successors[i]));
+ //}
}
-// Function: for_each_dependent
+// Function: for_each_predecessor
template <typename V>
-void TaskView::for_each_dependent(V&& visitor) const {
- for(size_t i=0; i<_node._dependents.size(); ++i) {
- visitor(TaskView(*_node._dependents[i]));
+void TaskView::for_each_predecessor(V&& visitor) const {
+ for(size_t i=_node._num_successors; i<_node._edges.size(); ++i) {
+ visitor(TaskView(*_node._edges[i]));
}
+ //for(size_t i=0; i<_node._predecessors.size(); ++i) {
+ // visitor(TaskView(*_node._predecessors[i]));
+ //}
}
} // end of namespace tf. ----------------------------------------------------
a.precede(b, c, d);
assert(a.num_successors() == 3);
- assert(b.num_dependents() == 1);
- assert(c.num_dependents() == 1);
- assert(d.num_dependents() == 1);
+ assert(b.num_predecessors() == 1);
+ assert(c.num_predecessors() == 1);
+ assert(d.num_predecessors() == 1);
taskflow.remove_dependency(a, b);
assert(a.num_successors() == 2);
- assert(b.num_dependents() == 0);
+ assert(b.num_predecessors() == 0);
@endcode
*/
inline void remove_dependency(Task from, Task to);
// Procedure: remove_dependency
inline void Taskflow::remove_dependency(Task from, Task to) {
- from._node->_successors.erase(std::remove_if(
- from._node->_successors.begin(), from._node->_successors.end(), [&](Node* i){
- return i == to._node;
- }
- ), from._node->_successors.end());
-
- to._node->_dependents.erase(std::remove_if(
- to._node->_dependents.begin(), to._node->_dependents.end(), [&](Node* i){
- return i == from._node;
- }
- ), to._node->_dependents.end());
+ // remove "to" from the succcessor list of "from"
+ from._node->_remove_successors(to._node);
+
+ // remove "from" from the predecessor list of "to"
+ to._node->_remove_predecessors(from._node);
}
// Procedure: dump
os << "];\n";
- for(size_t s=0; s<node->_successors.size(); ++s) {
+ for(size_t s=0; s<node->_num_successors; ++s) {
if(node->_is_conditioner()) {
// case edge is dashed
- os << 'p' << node << " -> p" << node->_successors[s]
+ os << 'p' << node << " -> p" << node->_edges[s]
<< " [style=dashed label=\"" << s << "\"];\n";
} else {
- os << 'p' << node << " -> p" << node->_successors[s] << ";\n";
+ os << 'p' << node << " -> p" << node->_edges[s] << ";\n";
}
}
// subflow join node
if(node->_parent && node->_parent->_handle.index() == Node::SUBFLOW &&
- node->_successors.size() == 0
+ node->_num_successors == 0
) {
os << 'p' << node << " -> p" << node->_parent << " [style=dashed color=blue];\n";
}
os << " [m" << dumper.visited[module] << "]\"];\n";
- for(const auto s : n->_successors) {
- os << 'p' << n << "->" << 'p' << s << ";\n";
+ //for(const auto s : n->_successors) {
+ for(size_t i=0; i<n->_num_successors; ++i) {
+ os << 'p' << n << "->" << 'p' << n->_edges[i] << ";\n";
}
}
}
*/
T steal();
+ /**
+ @brief attempts to steal a task with a hint mechanism
+
+ @param num_empty_steals a reference to a counter tracking consecutive empty steal attempts
+
+ This function tries to steal a task from the queue. If the steal attempt
+ is successful, the stolen task is returned.
+ Additionally, if the queue is empty, the provided counter `num_empty_steals` is incremented;
+ otherwise, `num_empty_steals` is reset to zero.
+
+ */
+ T steal_with_hint(size_t& num_empty_steals);
+
private:
Array* resize_array(Array* a, int64_t b, int64_t t);
// Function: empty
template <typename T>
bool UnboundedTaskQueue<T>::empty() const noexcept {
- int64_t b = _bottom.load(std::memory_order_relaxed);
int64_t t = _top.load(std::memory_order_relaxed);
+ int64_t b = _bottom.load(std::memory_order_relaxed);
return (b <= t);
}
// Function: size
template <typename T>
size_t UnboundedTaskQueue<T>::size() const noexcept {
- int64_t b = _bottom.load(std::memory_order_relaxed);
int64_t t = _top.load(std::memory_order_relaxed);
+ int64_t b = _bottom.load(std::memory_order_relaxed);
return static_cast<size_t>(b >= t ? b - t : 0);
}
int64_t t = _top.load(std::memory_order_acquire);
Array* a = _array.load(std::memory_order_relaxed);
- // queue is full
+ // queue is full with one additional item (b-t+1)
if TF_UNLIKELY(a->capacity() - 1 < (b - t)) {
a = resize_array(a, b, t);
}
return item;
}
+// Function: steal
+template <typename T>
+T UnboundedTaskQueue<T>::steal_with_hint(size_t& num_empty_steals) {
+
+ int64_t t = _top.load(std::memory_order_acquire);
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ int64_t b = _bottom.load(std::memory_order_acquire);
+
+ T item {nullptr};
+
+ if(t < b) {
+ num_empty_steals = 0;
+ Array* a = _array.load(std::memory_order_consume);
+ item = a->pop(t);
+ if(!_top.compare_exchange_strong(t, t+1,
+ std::memory_order_seq_cst,
+ std::memory_order_relaxed)) {
+ return nullptr;
+ }
+ }
+ else {
+ ++num_empty_steals;
+ }
+ return item;
+}
+
// Function: capacity
template <typename T>
int64_t UnboundedTaskQueue<T>::capacity() const noexcept {
typename UnboundedTaskQueue<T>::Array*
UnboundedTaskQueue<T>::resize_array(Array* a, int64_t b, int64_t t) {
+ //Array* tmp = a->resize(b, t);
+ //_garbage.push_back(a);
+ //std::swap(a, tmp);
+ //_array.store(a, std::memory_order_release);
+ //// Note: the original paper using relaxed causes t-san to complain
+ ////_array.store(a, std::memory_order_relaxed);
+ //return a;
+
+
Array* tmp = a->resize(b, t);
_garbage.push_back(a);
- std::swap(a, tmp);
- _array.store(a, std::memory_order_release);
+ _array.store(tmp, std::memory_order_release);
// Note: the original paper using relaxed causes t-san to complain
//_array.store(a, std::memory_order_relaxed);
- return a;
+ return tmp;
}
// ----------------------------------------------------------------------------
@tparam O data type
@tparam C callable type
@param item the item to perfect-forward to the queue
- @param on_full callable to invoke when the queue is faull (insertion fails)
+ @param on_full callable to invoke when the queue is full (insertion fails)
Only the owner thread can insert an item to the queue.
@brief pops out an item from the queue
Only the owner thread can pop out an item from the queue.
- The return can be a @std_nullopt if this operation failed (empty queue).
+ The return can be a `nullptr` if this operation failed (empty queue).
*/
T pop();
@brief steals an item from the queue
Any threads can try to steal an item from the queue.
- The return can be a @std_nullopt if this operation failed (not necessary empty).
+ The return can be a `nullptr` if this operation failed (not necessary empty).
*/
T steal();
+
+ /**
+ @brief attempts to steal a task with a hint mechanism
+
+ @param num_empty_steals a reference to a counter tracking consecutive empty steal attempts
+
+ This function tries to steal a task from the queue. If the steal attempt
+ is successful, the stolen task is returned.
+ Additionally, if the queue is empty, the provided counter `num_empty_steals` is incremented;
+ otherwise, `num_empty_steals` is reset to zero.
+ */
+ T steal_with_hint(size_t& num_empty_steals);
};
// Function: empty
template <typename T, size_t LogSize>
bool BoundedTaskQueue<T, LogSize>::empty() const noexcept {
- int64_t b = _bottom.load(std::memory_order_relaxed);
int64_t t = _top.load(std::memory_order_relaxed);
+ int64_t b = _bottom.load(std::memory_order_relaxed);
return b <= t;
}
// Function: size
template <typename T, size_t LogSize>
size_t BoundedTaskQueue<T, LogSize>::size() const noexcept {
- int64_t b = _bottom.load(std::memory_order_relaxed);
int64_t t = _top.load(std::memory_order_relaxed);
+ int64_t b = _bottom.load(std::memory_order_relaxed);
return static_cast<size_t>(b >= t ? b - t : 0);
}
int64_t b = _bottom.load(std::memory_order_relaxed);
int64_t t = _top.load(std::memory_order_acquire);
- // queue is full
- if TF_UNLIKELY((b - t) >= BufferSize - 1) {
+ // queue is full with one additional item (b-t+1)
+ if TF_UNLIKELY((b - t) > BufferSize - 1) {
return false;
}
int64_t b = _bottom.load(std::memory_order_relaxed);
int64_t t = _top.load(std::memory_order_acquire);
- // queue is full
- if TF_UNLIKELY((b - t) >= BufferSize - 1) {
+ // queue is full with one additional item (b-t+1)
+ if TF_UNLIKELY((b - t) > BufferSize - 1) {
on_full();
return;
}
return item;
}
+// Function: steal
+template <typename T, size_t LogSize>
+T BoundedTaskQueue<T, LogSize>::steal_with_hint(size_t& num_empty_steals) {
+ int64_t t = _top.load(std::memory_order_acquire);
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+ int64_t b = _bottom.load(std::memory_order_acquire);
+
+ T item {nullptr};
+
+ if(t < b) {
+ num_empty_steals = 0;
+ item = _buffer[t & BufferMask].load(std::memory_order_relaxed);
+ if(!_top.compare_exchange_strong(t, t+1,
+ std::memory_order_seq_cst,
+ std::memory_order_relaxed)) {
+ return nullptr;
+ }
+ }
+ else {
+ ++num_empty_steals;
+ }
+ return item;
+}
+
// Function: capacity
template <typename T, size_t LogSize>
constexpr size_t BoundedTaskQueue<T, LogSize>::capacity() const {
- return static_cast<size_t>(BufferSize - 1);
+ return static_cast<size_t>(BufferSize);
}
} // end of namespace tf -----------------------------------------------------
+
// Default Notifier
// ----------------------------------------------------------------------------
+
/**
@private
*/
#ifdef TF_ENABLE_ATOMIC_NOTIFIER
- using DefaultNotifier = AtomicNotifierV2;
-#else
- //using DefaultNotifier = AtomicNotifierV2;
+ using DefaultNotifier = AtomicNotifier;
+#elif TF_ENABLE_NONBLOCKING_NOTIFIER_V1
+ using DefaultNotifier = NonblockingNotifierV1;
+#elif TF_ENABLE_NONBLOCKING_NOTIFIER_V2
using DefaultNotifier = NonblockingNotifierV2;
+#else
+ #if __cplusplus >= TF_CPP20
+ using DefaultNotifier = AtomicNotifier;
+ #else
+ using DefaultNotifier = NonblockingNotifierV2;
+ #endif
#endif
// ----------------------------------------------------------------------------
std::thread& thread() { return _thread; }
private:
+
+ #if __cplusplus >= TF_CPP20
+ std::atomic_flag _done = ATOMIC_FLAG_INIT;
+ #else
+ std::atomic<bool> _done {false};
+ #endif
size_t _id;
size_t _vtm;
Executor* _executor {nullptr};
DefaultNotifier::Waiter* _waiter;
- std::default_random_engine _rdgen;
- std::uniform_int_distribution<size_t> _rdvtm;
std::thread _thread;
+
+ std::default_random_engine _rdgen;
+ //std::uniform_int_distribution<size_t> _udist;
+
BoundedTaskQueue<Node*> _wsq;
+
+ //TF_FORCE_INLINE size_t _rdvtm() {
+ // auto r = _udist(_rdgen);
+ // return r + (r >= _id);
+ //}
+
};
/**
@private
*/
-template <size_t nt, size_t vt, typename I, typename C>
+template <typename I, typename C, typename E>
__global__ void cuda_for_each_kernel(I first, unsigned count, C c) {
- auto tid = threadIdx.x;
- auto bid = blockIdx.x;
- auto tile = cuda_get_tile(bid, nt*vt, count);
- cuda_strided_iterate<nt, vt>(
+ auto tid = threadIdx.x;
+ auto bid = blockIdx.x;
+ auto tile = cuda_get_tile(bid, E::nv, count);
+ cuda_strided_iterate<E::nt, E::vt>(
[=](auto, auto j) {
c(*(first + tile.begin + j));
},
}
/** @private */
-template <size_t nt, size_t vt, typename I, typename C>
+template <typename I, typename C, typename E>
__global__ void cuda_for_each_index_kernel(I first, I inc, unsigned count, C c) {
auto tid = threadIdx.x;
auto bid = blockIdx.x;
- auto tile = cuda_get_tile(bid, nt*vt, count);
- cuda_strided_iterate<nt, vt>(
+ auto tile = cuda_get_tile(bid, E::nv, count);
+ cuda_strided_iterate<E::nt, E::vt>(
[=]__device__(auto, auto j) {
c(first + inc*(tile.begin+j));
},
} // end of namespace detail -------------------------------------------------
-// ----------------------------------------------------------------------------
-// cuda standard algorithms: single_task/for_each/for_each_index
-// ----------------------------------------------------------------------------
-
-/**
-@brief runs a callable asynchronously using one kernel thread
-
-@tparam P execution policy type
-@tparam C closure type
-
-@param p execution policy
-@param c closure to run by one kernel thread
-
-The function launches a single kernel thread to run the given callable
-through the stream in the execution policy object.
-*/
-template <typename P, typename C>
-void cuda_single_task(P&& p, C c) {
- cuda_kernel<<<1, 1, 0, p.stream()>>>(
- [=]__device__(auto, auto) mutable { c(); }
- );
-}
-
-/**
-@brief performs asynchronous parallel iterations over a range of items
-
-@tparam P execution policy type
-@tparam I input iterator type
-@tparam C unary operator type
-
-@param p execution policy object
-@param first iterator to the beginning of the range
-@param last iterator to the end of the range
-@param c unary operator to apply to each dereferenced iterator
-
-This function is equivalent to a parallel execution of the following loop
-on a GPU:
-
-@code{.cpp}
-for(auto itr = first; itr != last; itr++) {
- c(*itr);
-}
-@endcode
-*/
-template <typename P, typename I, typename C>
-void cuda_for_each(P&& p, I first, I last, C c) {
-
- using E = std::decay_t<P>;
-
- unsigned count = std::distance(first, last);
-
- if(count == 0) {
- return;
- }
-
- detail::cuda_for_each_kernel<E::nt, E::vt, I, C><<<E::num_blocks(count), E::nt, 0, p.stream()>>>(
- first, count, c
- );
-}
-
-/**
-@brief performs asynchronous parallel iterations over
- an index-based range of items
-
-@tparam P execution policy type
-@tparam I input index type
-@tparam C unary operator type
-
-@param p execution policy object
-@param first index to the beginning of the range
-@param last index to the end of the range
-@param inc step size between successive iterations
-@param c unary operator to apply to each index
-
-This function is equivalent to a parallel execution of
-the following loop on a GPU:
-
-@code{.cpp}
-// step is positive [first, last)
-for(auto i=first; i<last; i+=step) {
- c(i);
-}
-
-// step is negative [first, last)
-for(auto i=first; i>last; i+=step) {
- c(i);
-}
-@endcode
-*/
-template <typename P, typename I, typename C>
-void cuda_for_each_index(P&& p, I first, I last, I inc, C c) {
-
- using E = std::decay_t<P>;
-
- unsigned count = distance(first, last, inc);
-
- if(count == 0) {
- return;
- }
-
- detail::cuda_for_each_index_kernel<E::nt, E::vt, I, C><<<E::num_blocks(count), E::nt, 0, p.stream()>>>(
- first, inc, count, c
- );
-}
-
-// ----------------------------------------------------------------------------
-// single_task
-// ----------------------------------------------------------------------------
-
-/** @private */
-template <typename C>
-__global__ void cuda_single_task(C callable) {
- callable();
-}
-
-// Function: single_task
-template <typename C>
-cudaTask cudaFlow::single_task(C c) {
- return kernel(1, 1, 0, cuda_single_task<C>, c);
-}
-
-// Function: single_task
-template <typename C>
-void cudaFlow::single_task(cudaTask task, C c) {
- return kernel(task, 1, 1, 0, cuda_single_task<C>, c);
-}
-
-// Function: single_task
-template <typename C>
-cudaTask cudaFlowCapturer::single_task(C callable) {
- return on([=] (cudaStream_t stream) mutable {
- cuda_single_task(cudaDefaultExecutionPolicy(stream), callable);
- });
-}
-
-// Function: single_task
-template <typename C>
-void cudaFlowCapturer::single_task(cudaTask task, C callable) {
- on(task, [=] (cudaStream_t stream) mutable {
- cuda_single_task(cudaDefaultExecutionPolicy(stream), callable);
- });
-}
-
// ----------------------------------------------------------------------------
// cudaFlow: for_each, for_each_index
// ----------------------------------------------------------------------------
// Function: for_each
-template <typename I, typename C>
-cudaTask cudaFlow::for_each(I first, I last, C c) {
+template <typename Creator, typename Deleter>
+template <typename I, typename C, typename E>
+cudaTask cudaGraphBase<Creator, Deleter>::for_each(I first, I last, C c) {
- using E = cudaDefaultExecutionPolicy;
-
unsigned count = std::distance(first, last);
- // TODO:
- //if(count == 0) {
- // return;
- //}
-
return kernel(
E::num_blocks(count), E::nt, 0,
- detail::cuda_for_each_kernel<E::nt, E::vt, I, C>, first, count, c
+ detail::cuda_for_each_kernel<I, C, E>, first, count, c
);
}
// Function: for_each
-template <typename I, typename C>
-void cudaFlow::for_each(cudaTask task, I first, I last, C c) {
-
- using E = cudaDefaultExecutionPolicy;
+template <typename Creator, typename Deleter>
+template <typename I, typename C, typename E>
+void cudaGraphExecBase<Creator, Deleter>::for_each(cudaTask task, I first, I last, C c) {
unsigned count = std::distance(first, last);
- // TODO:
- //if(count == 0) {
- // return;
- //}
-
kernel(task,
E::num_blocks(count), E::nt, 0,
- detail::cuda_for_each_kernel<E::nt, E::vt, I, C>, first, count, c
+ detail::cuda_for_each_kernel<I, C, E>, first, count, c
);
}
// Function: for_each_index
-template <typename I, typename C>
-cudaTask cudaFlow::for_each_index(I first, I last, I inc, C c) {
-
- using E = cudaDefaultExecutionPolicy;
+template <typename Creator, typename Deleter>
+template <typename I, typename C, typename E>
+cudaTask cudaGraphBase<Creator, Deleter>::for_each_index(I first, I last, I inc, C c) {
unsigned count = distance(first, last, inc);
- // TODO:
- //if(count == 0) {
- // return;
- //}
-
return kernel(
E::num_blocks(count), E::nt, 0,
- detail::cuda_for_each_index_kernel<E::nt, E::vt, I, C>, first, inc, count, c
+ detail::cuda_for_each_index_kernel<I, C, E>, first, inc, count, c
);
}
// Function: for_each_index
-template <typename I, typename C>
-void cudaFlow::for_each_index(cudaTask task, I first, I last, I inc, C c) {
+template <typename Creator, typename Deleter>
+template <typename I, typename C, typename E>
+void cudaGraphExecBase<Creator, Deleter>::for_each_index(cudaTask task, I first, I last, I inc, C c) {
- using E = cudaDefaultExecutionPolicy;
-
unsigned count = distance(first, last, inc);
-
- // TODO:
- //if(count == 0) {
- // return;
- //}
return kernel(task,
E::num_blocks(count), E::nt, 0,
- detail::cuda_for_each_index_kernel<E::nt, E::vt, I, C>, first, inc, count, c
+ detail::cuda_for_each_index_kernel<I, C, E>, first, inc, count, c
);
}
-// ----------------------------------------------------------------------------
-// cudaFlowCapturer: for_each, for_each_index
-// ----------------------------------------------------------------------------
-
-// Function: for_each
-template <typename I, typename C>
-cudaTask cudaFlowCapturer::for_each(I first, I last, C c) {
- return on([=](cudaStream_t stream) mutable {
- cuda_for_each(cudaDefaultExecutionPolicy(stream), first, last, c);
- });
-}
-
-// Function: for_each_index
-template <typename I, typename C>
-cudaTask cudaFlowCapturer::for_each_index(I beg, I end, I inc, C c) {
- return on([=] (cudaStream_t stream) mutable {
- cuda_for_each_index(cudaDefaultExecutionPolicy(stream), beg, end, inc, c);
- });
-}
-
-// Function: for_each
-template <typename I, typename C>
-void cudaFlowCapturer::for_each(cudaTask task, I first, I last, C c) {
- on(task, [=](cudaStream_t stream) mutable {
- cuda_for_each(cudaDefaultExecutionPolicy(stream), first, last, c);
- });
-}
-
-// Function: for_each_index
-template <typename I, typename C>
-void cudaFlowCapturer::for_each_index(
- cudaTask task, I beg, I end, I inc, C c
-) {
- on(task, [=] (cudaStream_t stream) mutable {
- cuda_for_each_index(cudaDefaultExecutionPolicy(stream), beg, end, inc, c);
- });
-}
-
-
} // end of namespace tf -----------------------------------------------------
template<unsigned nt, typename T>
struct cudaBlockReduce {
- static const unsigned group_size = std::min(nt, CUDA_WARP_SIZE);
- static const unsigned num_passes = log2(group_size);
- static const unsigned num_items = nt / group_size;
+ static constexpr unsigned group_size = (std::min)(nt, CUDA_WARP_SIZE);
+ static constexpr unsigned num_passes = static_floor_log2<group_size>();
+ static constexpr unsigned num_items = nt / group_size;
static_assert(
nt && (0 == nt % CUDA_WARP_SIZE),
template<unsigned nt, typename T>
struct cudaBlockScan {
- const static unsigned num_warps = nt / CUDA_WARP_SIZE;
- const static unsigned num_passes = log2(nt);
- const static unsigned capacity = nt + num_warps;
+ static constexpr unsigned num_warps = nt / CUDA_WARP_SIZE;
+ static constexpr unsigned num_passes = static_floor_log2<nt>();
+ static constexpr unsigned capacity = nt + num_warps;
/** @private */
union storage_t {
--- /dev/null
+#pragma once
+
+/**
+@file taskflow/cuda/algorithm/single_task.hpp
+@brief cuda single-task algorithms include file
+*/
+
+namespace tf {
+
+/** @private */
+template <typename C>
+__global__ void cuda_single_task(C callable) {
+ callable();
+}
+
+// Function: single_task
+template <typename Creator, typename Deleter>
+template <typename C>
+cudaTask cudaGraphBase<Creator, Deleter>::single_task(C c) {
+ return kernel(1, 1, 0, cuda_single_task<C>, c);
+}
+
+// Function: single_task
+template <typename Creator, typename Deleter>
+template <typename C>
+void cudaGraphExecBase<Creator, Deleter>::single_task(cudaTask task, C c) {
+ return kernel(task, 1, 1, 0, cuda_single_task<C>, c);
+}
+
+} // end of namespace tf -----------------------------------------------------
+
+
+
+
+
+
struct cudaBlockSort {
static constexpr bool has_values = !std::is_same<V, cudaEmpty>::value;
- static constexpr unsigned num_passes = log2(nt);
+ static constexpr unsigned num_passes = static_floor_log2<nt>();
/** @private */
union Storage {
/**
@private
*/
-template <size_t nt, size_t vt, typename I, typename O, typename C>
+template <typename I, typename O, typename C, typename E>
__global__ void cuda_transform_kernel(I first, unsigned count, O output, C op) {
auto tid = threadIdx.x;
auto bid = blockIdx.x;
- auto tile = cuda_get_tile(bid, nt*vt, count);
- cuda_strided_iterate<nt, vt>(
+ auto tile = cuda_get_tile(bid, E::nv, count);
+ cuda_strided_iterate<E::nt, E::vt>(
[=]__device__(auto, auto j) {
auto offset = j + tile.begin;
*(output + offset) = op(*(first+offset));
/**
@private
*/
-template <size_t nt, size_t vt, typename I1, typename I2, typename O, typename C>
+template <typename I1, typename I2, typename O, typename C, typename E>
__global__ void cuda_transform_kernel(
I1 first1, I2 first2, unsigned count, O output, C op
) {
auto tid = threadIdx.x;
auto bid = blockIdx.x;
- auto tile = cuda_get_tile(bid, nt*vt, count);
- cuda_strided_iterate<nt, vt>(
+ auto tile = cuda_get_tile(bid, E::nv, count);
+ cuda_strided_iterate<E::nt, E::vt>(
[=]__device__(auto, auto j) {
auto offset = j + tile.begin;
*(output + offset) = op(*(first1+offset), *(first2+offset));
} // end of namespace detail -------------------------------------------------
-// ----------------------------------------------------------------------------
-// CUDA standard algorithms: transform
-// ----------------------------------------------------------------------------
-
-/**
-@brief performs asynchronous parallel transforms over a range of items
-
-@tparam P execution policy type
-@tparam I input iterator type
-@tparam O output iterator type
-@tparam C unary operator type
-
-@param p execution policy
-@param first iterator to the beginning of the range
-@param last iterator to the end of the range
-@param output iterator to the beginning of the output range
-@param op unary operator to apply to transform each item
-
-This method is equivalent to the parallel execution of the following loop on a GPU:
-
-@code{.cpp}
-while (first != last) {
- *output++ = op(*first++);
-}
-@endcode
-
-*/
-template <typename P, typename I, typename O, typename C>
-void cuda_transform(P&& p, I first, I last, O output, C op) {
-
- using E = std::decay_t<P>;
-
- unsigned count = std::distance(first, last);
-
- if(count == 0) {
- return;
- }
-
- detail::cuda_transform_kernel<E::nt, E::vt, I, O, C>
- <<<E::num_blocks(count), E::nt, 0, p.stream()>>> (
- first, count, output, op
- );
-}
-
-/**
-@brief performs asynchronous parallel transforms over two ranges of items
-
-@tparam P execution policy type
-@tparam I1 first input iterator type
-@tparam I2 second input iterator type
-@tparam O output iterator type
-@tparam C binary operator type
-
-@param p execution policy
-@param first1 iterator to the beginning of the first range
-@param last1 iterator to the end of the first range
-@param first2 iterator to the beginning of the second range
-@param output iterator to the beginning of the output range
-@param op binary operator to apply to transform each pair of items
-
-This method is equivalent to the parallel execution of the following loop on a GPU:
-
-@code{.cpp}
-while (first1 != last1) {
- *output++ = op(*first1++, *first2++);
-}
-@endcode
-*/
-template <typename P, typename I1, typename I2, typename O, typename C>
-void cuda_transform(
- P&& p, I1 first1, I1 last1, I2 first2, O output, C op
-) {
-
- using E = std::decay_t<P>;
-
- unsigned count = std::distance(first1, last1);
-
- if(count == 0) {
- return;
- }
-
- detail::cuda_transform_kernel<E::nt, E::vt, I1, I2, O, C>
- <<<E::num_blocks(count), E::nt, 0, p.stream()>>> (
- first1, first2, count, output, op
- );
-}
-
// ----------------------------------------------------------------------------
// cudaFlow
// ----------------------------------------------------------------------------
// Function: transform
-template <typename I, typename O, typename C>
-cudaTask cudaFlow::transform(I first, I last, O output, C c) {
+template <typename Creator, typename Deleter>
+template <typename I, typename O, typename C, typename E>
+cudaTask cudaGraphBase<Creator, Deleter>::transform(I first, I last, O output, C c) {
- using E = cudaDefaultExecutionPolicy;
-
unsigned count = std::distance(first, last);
- // TODO:
- //if(count == 0) {
- // return;
- //}
-
return kernel(
E::num_blocks(count), E::nt, 0,
- detail::cuda_transform_kernel<E::nt, E::vt, I, O, C>,
+ detail::cuda_transform_kernel<I, O, C, E>,
first, count, output, c
);
}
// Function: transform
-template <typename I1, typename I2, typename O, typename C>
-cudaTask cudaFlow::transform(I1 first1, I1 last1, I2 first2, O output, C c) {
+template <typename Creator, typename Deleter>
+template <typename I1, typename I2, typename O, typename C, typename E>
+cudaTask cudaGraphBase<Creator, Deleter>::transform(I1 first1, I1 last1, I2 first2, O output, C c) {
- using E = cudaDefaultExecutionPolicy;
-
unsigned count = std::distance(first1, last1);
- // TODO:
- //if(count == 0) {
- // return;
- //}
-
return kernel(
E::num_blocks(count), E::nt, 0,
- detail::cuda_transform_kernel<E::nt, E::vt, I1, I2, O, C>,
+ detail::cuda_transform_kernel<I1, I2, O, C, E>,
first1, first2, count, output, c
);
}
+
// Function: update transform
-template <typename I, typename O, typename C>
-void cudaFlow::transform(cudaTask task, I first, I last, O output, C c) {
+template <typename Creator, typename Deleter>
+template <typename I, typename O, typename C, typename E>
+void cudaGraphExecBase<Creator, Deleter>::transform(cudaTask task, I first, I last, O output, C c) {
- using E = cudaDefaultExecutionPolicy;
-
unsigned count = std::distance(first, last);
- // TODO:
- //if(count == 0) {
- // return;
- //}
-
kernel(task,
E::num_blocks(count), E::nt, 0,
- detail::cuda_transform_kernel<E::nt, E::vt, I, O, C>,
+ detail::cuda_transform_kernel<I, O, C, E>,
first, count, output, c
);
}
// Function: update transform
-template <typename I1, typename I2, typename O, typename C>
-void cudaFlow::transform(
+template <typename Creator, typename Deleter>
+template <typename I1, typename I2, typename O, typename C, typename E>
+void cudaGraphExecBase<Creator, Deleter>::transform(
cudaTask task, I1 first1, I1 last1, I2 first2, O output, C c
) {
- using E = cudaDefaultExecutionPolicy;
-
unsigned count = std::distance(first1, last1);
-
- // TODO:
- //if(count == 0) {
- // return;
- //}
kernel(task,
E::num_blocks(count), E::nt, 0,
- detail::cuda_transform_kernel<E::nt, E::vt, I1, I2, O, C>,
+ detail::cuda_transform_kernel<I1, I2, O, C, E>,
first1, first2, count, output, c
);
}
-// ----------------------------------------------------------------------------
-// cudaFlowCapturer
-// ----------------------------------------------------------------------------
-
-// Function: transform
-template <typename I, typename O, typename C>
-cudaTask cudaFlowCapturer::transform(I first, I last, O output, C op) {
- return on([=](cudaStream_t stream) mutable {
- cudaDefaultExecutionPolicy p(stream);
- cuda_transform(p, first, last, output, op);
- });
-}
-
-// Function: transform
-template <typename I1, typename I2, typename O, typename C>
-cudaTask cudaFlowCapturer::transform(
- I1 first1, I1 last1, I2 first2, O output, C op
-) {
- return on([=](cudaStream_t stream) mutable {
- cudaDefaultExecutionPolicy p(stream);
- cuda_transform(p, first1, last1, first2, output, op);
- });
-}
-
-// Function: transform
-template <typename I, typename O, typename C>
-void cudaFlowCapturer::transform(
- cudaTask task, I first, I last, O output, C op
-) {
- on(task, [=] (cudaStream_t stream) mutable {
- cudaDefaultExecutionPolicy p(stream);
- cuda_transform(p, first, last, output, op);
- });
-}
-
-// Function: transform
-template <typename I1, typename I2, typename O, typename C>
-void cudaFlowCapturer::transform(
- cudaTask task, I1 first1, I1 last1, I2 first2, O output, C op
-) {
- on(task, [=] (cudaStream_t stream) mutable {
- cudaDefaultExecutionPolicy p(stream);
- cuda_transform(p, first1, last1, first2, output, op);
- });
-}
-
} // end of namespace tf -----------------------------------------------------
#pragma once
-#include "cuda_task.hpp"
#include "cuda_optimizer.hpp"
/**
a native CUDA graph.
*/
template <typename OPT, typename... ArgsT>
- OPT& make_optimizer(ArgsT&&... args);
+ void make_optimizer(ArgsT&&... args);
/**
@brief captures the cudaFlow and turns it into a CUDA Graph
cudaGraph_t native_graph();
/**
- @brief acquires a reference to the underlying CUDA graph executable
+ @brief instantiates an executable graph from this cudaflow capturer
*/
- cudaGraphExec_t native_executable();
+ cudaGraphExec instantiate();
private:
cudaFlowGraph _cfg;
Optimizer _optimizer;
-
- cudaGraphExec _exe {nullptr};
};
// Function: empty
// Procedure: clear
inline void cudaFlowCapturer::clear() {
- _exe.clear();
_cfg.clear();
}
return on([](cudaStream_t){});
}
-// Function: noop
-inline void cudaFlowCapturer::noop(cudaTask task) {
- on(task, [](cudaStream_t){});
-}
// Function: memcpy
inline cudaTask cudaFlowCapturer::memcpy(
});
}
+// Function: make_optimizer
+template <typename OPT, typename ...ArgsT>
+void cudaFlowCapturer::make_optimizer(ArgsT&&... args) {
+ return _optimizer.emplace<OPT>(std::forward<ArgsT>(args)...);
+}
+
// Function: capture
inline cudaGraph_t cudaFlowCapturer::capture() {
return std::visit(
);
}
-// Procedure: run
-inline void cudaFlowCapturer::run(cudaStream_t stream) {
-
- // If the topology got changed, we need to destroy the executable
- // and create a new one
- if(_cfg._state & cudaFlowGraph::CHANGED) {
- _cfg._native_handle.reset(capture());
- _exe.instantiate(_cfg._native_handle);
- }
- // if the graph is just updated (i.e., topology does not change),
- // we can skip part of the optimization and just update the executable
- // with the new captured graph
- else if(_cfg._state & cudaFlowGraph::UPDATED) {
- // TODO: skip part of the optimization (e.g., levelization)
- _cfg._native_handle.reset(capture());
- if(_exe.update(_cfg._native_handle) != cudaGraphExecUpdateSuccess) {
- _exe.instantiate(_cfg._native_handle);
- }
- }
+// Function: instantiate
+inline cudaGraphExec cudaFlowCapturer::instantiate() {
+
+ _cfg._native_handle.reset(capture());
- // run the executable (should exist)
- _exe.launch(stream);
+ cudaGraphExec_t exec;
+ TF_CHECK_CUDA(
+ cudaGraphInstantiate(&exec, _cfg._native_handle, nullptr, nullptr, 0),
+ "failed to create an executable graph"
+ );
- _cfg._state = cudaFlowGraph::OFFLOADED;
+ return cudaGraphExec(exec);
}
+//// Procedure: run
+//inline void cudaFlowCapturer::run(cudaStream_t stream) {
+//
+// // If the topology got changed, we need to destroy the executable
+// // and create a new one
+// if(_cfg._state & cudaFlowGraph::CHANGED) {
+// _cfg._native_handle.reset(capture());
+// _exe.instantiate(_cfg._native_handle);
+// }
+// // if the graph is just updated (i.e., topology does not change),
+// // we can skip part of the optimization and just update the executable
+// // with the new captured graph
+// else if(_cfg._state & cudaFlowGraph::UPDATED) {
+// // TODO: skip part of the optimization (e.g., levelization)
+// _cfg._native_handle.reset(capture());
+// if(_exe.update(_cfg._native_handle) != cudaGraphExecUpdateSuccess) {
+// _exe.instantiate(_cfg._native_handle);
+// }
+// }
+//
+// // run the executable (should exist)
+// _exe.run(stream);
+//
+// _cfg._state = cudaFlowGraph::OFFLOADED;
+//}
+
// Function: native_graph
inline cudaGraph_t cudaFlowCapturer::native_graph() {
return _cfg._native_handle;
}
-// Function: native_executable
-inline cudaGraphExec_t cudaFlowCapturer::native_executable() {
- return _exe;
-}
-
-// Function: on
-template <typename C, std::enable_if_t<
- std::is_invocable_r_v<void, C, cudaStream_t>, void>*
->
-void cudaFlowCapturer::on(cudaTask task, C&& callable) {
-
- if(task.type() != cudaTaskType::CAPTURE) {
- TF_THROW("invalid cudaTask type (must be CAPTURE)");
- }
-
- _cfg._state |= cudaFlowGraph::UPDATED;
-
- std::get_if<cudaFlowNode::Capture>(&task._node->_handle)->work =
- std::forward<C>(callable);
-}
-
-// Function: memcpy
-inline void cudaFlowCapturer::memcpy(
- cudaTask task, void* dst, const void* src, size_t count
-) {
- on(task, [dst, src, count](cudaStream_t stream) mutable {
- TF_CHECK_CUDA(
- cudaMemcpyAsync(dst, src, count, cudaMemcpyDefault, stream),
- "failed to capture memcpy"
- );
- });
-}
+//// Function: on
+//template <typename C, std::enable_if_t<
+// std::is_invocable_r_v<void, C, cudaStream_t>, void>*
+//>
+//void cudaFlowCapturer::on(cudaTask task, C&& callable) {
+//
+// if(task.type() != cudaTaskType::CAPTURE) {
+// TF_THROW("invalid cudaTask type (must be CAPTURE)");
+// }
+//
+// _cfg._state |= cudaFlowGraph::UPDATED;
+//
+// std::get_if<cudaFlowNode::Capture>(&task._node->_handle)->work =
+// std::forward<C>(callable);
+//}
+//
+//// Function: noop
+//inline void cudaFlowCapturer::noop(cudaTask task) {
+// on(task, [](cudaStream_t){});
+//}
+////
+//// Function: memcpy
+//inline void cudaFlowCapturer::memcpy(
+// cudaTask task, void* dst, const void* src, size_t count
+//) {
+// on(task, [dst, src, count](cudaStream_t stream) mutable {
+// TF_CHECK_CUDA(
+// cudaMemcpyAsync(dst, src, count, cudaMemcpyDefault, stream),
+// "failed to capture memcpy"
+// );
+// });
+//}
+//
+//// Function: copy
+//template <typename T,
+// std::enable_if_t<!std::is_same_v<T, void>, void>*
+//>
+//void cudaFlowCapturer::copy(
+// cudaTask task, T* tgt, const T* src, size_t num
+//) {
+// on(task, [tgt, src, num] (cudaStream_t stream) mutable {
+// TF_CHECK_CUDA(
+// cudaMemcpyAsync(tgt, src, sizeof(T)*num, cudaMemcpyDefault, stream),
+// "failed to capture copy"
+// );
+// });
+//}
+//
+//// Function: memset
+//inline void cudaFlowCapturer::memset(
+// cudaTask task, void* ptr, int v, size_t n
+//) {
+// on(task, [ptr, v, n] (cudaStream_t stream) mutable {
+// TF_CHECK_CUDA(
+// cudaMemsetAsync(ptr, v, n, stream), "failed to capture memset"
+// );
+// });
+//}
+//
+//// Function: kernel
+//template <typename F, typename... ArgsT>
+//void cudaFlowCapturer::kernel(
+// cudaTask task, dim3 g, dim3 b, size_t s, F f, ArgsT&&... args
+//) {
+// on(task, [g, b, s, f, args...] (cudaStream_t stream) mutable {
+// f<<<g, b, s, stream>>>(args...);
+// });
+//}
+//
-// Function: copy
-template <typename T,
- std::enable_if_t<!std::is_same_v<T, void>, void>*
->
-void cudaFlowCapturer::copy(
- cudaTask task, T* tgt, const T* src, size_t num
-) {
- on(task, [tgt, src, num] (cudaStream_t stream) mutable {
- TF_CHECK_CUDA(
- cudaMemcpyAsync(tgt, src, sizeof(T)*num, cudaMemcpyDefault, stream),
- "failed to capture copy"
- );
- });
-}
-
-// Function: memset
-inline void cudaFlowCapturer::memset(
- cudaTask task, void* ptr, int v, size_t n
-) {
- on(task, [ptr, v, n] (cudaStream_t stream) mutable {
- TF_CHECK_CUDA(
- cudaMemsetAsync(ptr, v, n, stream), "failed to capture memset"
- );
- });
-}
-
-// Function: kernel
-template <typename F, typename... ArgsT>
-void cudaFlowCapturer::kernel(
- cudaTask task, dim3 g, dim3 b, size_t s, F f, ArgsT&&... args
-) {
- on(task, [g, b, s, f, args...] (cudaStream_t stream) mutable {
- f<<<g, b, s, stream>>>(args...);
- });
-}
-
-// Function: make_optimizer
-template <typename OPT, typename ...ArgsT>
-OPT& cudaFlowCapturer::make_optimizer(ArgsT&&... args) {
- return _optimizer.emplace<OPT>(std::forward<ArgsT>(args)...);
-}
} // end of namespace tf -----------------------------------------------------
const static unsigned nv = NT*VT;
/**
- @brief constructs an execution policy object with default stream
+ @brief constructs an execution policy object
*/
cudaExecutionPolicy() = default;
- /**
- @brief constructs an execution policy object with the given stream
- */
- explicit cudaExecutionPolicy(cudaStream_t s) : _stream{s} {}
-
- /**
- @brief queries the associated stream
- */
- cudaStream_t stream() noexcept { return _stream; };
-
- /**
- @brief assigns a stream
- */
- void stream(cudaStream_t stream) noexcept { _stream = stream; }
-
/**
@brief queries the number of blocks to accommodate N elements
*/
tf::cuda_merge and tf::cuda_merge_by_key.
*/
inline static unsigned merge_bufsz(unsigned a_count, unsigned b_count);
-
- private:
-
- cudaStream_t _stream {0};
};
/**
#pragma once
+#include <filesystem>
+
#include "cuda_memory.hpp"
#include "cuda_stream.hpp"
#include "cuda_meta.hpp"
return num_edges;
}
+
+
/**
@brief acquires the nodes in a native CUDA graph
*/
return type;
}
+// ----------------------------------------------------------------------------
+// cudaTask Types
+// ----------------------------------------------------------------------------
+
/**
-@brief convert the type of a native CUDA graph node to a readable string
+@brief convert a cuda_task type to a human-readable string
*/
-inline const char* cuda_graph_node_type_to_string(cudaGraphNodeType type) {
- switch(type) {
- case cudaGraphNodeTypeKernel : return "kernel";
- case cudaGraphNodeTypeMemcpy : return "memcpy";
- case cudaGraphNodeTypeMemset : return "memset";
- case cudaGraphNodeTypeHost : return "host";
- case cudaGraphNodeTypeGraph : return "graph";
- case cudaGraphNodeTypeEmpty : return "empty";
- case cudaGraphNodeTypeWaitEvent : return "event_wait";
- case cudaGraphNodeTypeEventRecord : return "event_record";
- default : return "undefined";
+constexpr const char* to_string(cudaGraphNodeType type) {
+ switch (type) {
+ case cudaGraphNodeTypeKernel: return "Kernel";
+ case cudaGraphNodeTypeMemcpy: return "Memcpy";
+ case cudaGraphNodeTypeMemset: return "Memset";
+ case cudaGraphNodeTypeHost: return "Host";
+ case cudaGraphNodeTypeGraph: return "Graph";
+ case cudaGraphNodeTypeEmpty: return "Empty";
+ case cudaGraphNodeTypeWaitEvent: return "WaitEvent";
+ case cudaGraphNodeTypeEventRecord: return "EventRecord";
+ case cudaGraphNodeTypeExtSemaphoreSignal: return "ExtSemaphoreSignal";
+ case cudaGraphNodeTypeExtSemaphoreWait: return "ExtSemaphoreWait";
+ case cudaGraphNodeTypeMemAlloc: return "MemAlloc";
+ case cudaGraphNodeTypeMemFree: return "MemFree";
+ case cudaGraphNodeTypeConditional: return "Conditional";
+ default: return "undefined";
}
}
+// ----------------------------------------------------------------------------
+// cudaTask
+// ----------------------------------------------------------------------------
+
/**
-@brief dumps a native CUDA graph and all associated child graphs to a DOT format
+@class cudaTask
-@tparam T output stream target
-@param os target output stream
-@param graph native CUDA graph
+@brief class to create a task handle of a CUDA Graph node
*/
-template <typename T>
-void cuda_dump_graph(T& os, cudaGraph_t g) {
-
- os << "digraph cudaGraph {\n";
-
- std::stack<std::tuple<cudaGraph_t, cudaGraphNode_t, int>> stack;
- stack.push(std::make_tuple(g, nullptr, 1));
-
- int pl = 0;
-
- while(stack.empty() == false) {
-
- auto [graph, parent, l] = stack.top();
- stack.pop();
-
- for(int i=0; i<pl-l+1; i++) {
- os << "}\n";
- }
-
- os << "subgraph cluster_p" << graph << " {\n"
- << "label=\"cudaGraph-L" << l << "\";\n"
- << "color=\"purple\";\n";
-
- auto nodes = cuda_graph_get_nodes(graph);
- auto edges = cuda_graph_get_edges(graph);
-
- for(auto& [from, to] : edges) {
- os << 'p' << from << " -> " << 'p' << to << ";\n";
- }
-
- for(auto& node : nodes) {
- auto type = cuda_get_graph_node_type(node);
- if(type == cudaGraphNodeTypeGraph) {
-
- cudaGraph_t child_graph;
- TF_CHECK_CUDA(cudaGraphChildGraphNodeGetGraph(node, &child_graph), "");
- stack.push(std::make_tuple(child_graph, node, l+1));
-
- os << 'p' << node << "["
- << "shape=folder, style=filled, fontcolor=white, fillcolor=purple, "
- << "label=\"cudaGraph-L" << l+1
- << "\"];\n";
- }
- else {
- os << 'p' << node << "[label=\""
- << cuda_graph_node_type_to_string(type)
- << "\"];\n";
- }
- }
-
- // precede to parent
- if(parent != nullptr) {
- std::unordered_set<cudaGraphNode_t> successors;
- for(const auto& p : edges) {
- successors.insert(p.first);
- }
- for(auto node : nodes) {
- if(successors.find(node) == successors.end()) {
- os << 'p' << node << " -> " << 'p' << parent << ";\n";
- }
- }
- }
-
- // set the previous level
- pl = l;
- }
+class cudaTask {
- for(int i=0; i<=pl; i++) {
- os << "}\n";
- }
+ template <typename Creator, typename Deleter>
+ friend class cudaGraphBase;
+
+ template <typename Creator, typename Deleter>
+ friend class cudaGraphExecBase;
+
+ friend class cudaFlow;
+ friend class cudaFlowCapturer;
+ friend class cudaFlowCapturerBase;
+
+ friend std::ostream& operator << (std::ostream&, const cudaTask&);
+
+ public:
+
+ /**
+ @brief constructs an empty cudaTask
+ */
+ cudaTask() = default;
+
+ /**
+ @brief copy-constructs a cudaTask
+ */
+ cudaTask(const cudaTask&) = default;
+
+ /**
+ @brief copy-assigns a cudaTask
+ */
+ cudaTask& operator = (const cudaTask&) = default;
+
+ /**
+ @brief adds precedence links from this to other tasks
+
+ @tparam Ts parameter pack
+
+ @param tasks one or multiple tasks
+
+ @return @c *this
+ */
+ template <typename... Ts>
+ cudaTask& precede(Ts&&... tasks);
+
+ /**
+ @brief adds precedence links from other tasks to this
+
+ @tparam Ts parameter pack
+
+ @param tasks one or multiple tasks
+
+ @return @c *this
+ */
+ template <typename... Ts>
+ cudaTask& succeed(Ts&&... tasks);
+
+ /**
+ @brief queries the number of successors
+ */
+ size_t num_successors() const;
+
+ /**
+ @brief queries the number of dependents
+ */
+ size_t num_predecessors() const;
+
+ /**
+ @brief queries the type of this task
+ */
+ auto type() const;
+
+ /**
+ @brief dumps the task through an output stream
+
+ @param os an output stream target
+ */
+ void dump(std::ostream& os) const;
+
+ private:
+
+ cudaTask(cudaGraph_t, cudaGraphNode_t);
+
+ cudaGraph_t _native_graph {nullptr};
+ cudaGraphNode_t _native_node {nullptr};
+};
+
+// Constructor
+inline cudaTask::cudaTask(cudaGraph_t native_graph, cudaGraphNode_t native_node) :
+ _native_graph {native_graph}, _native_node {native_node} {
+}
+
+// Function: precede
+template <typename... Ts>
+cudaTask& cudaTask::precede(Ts&&... tasks) {
+ (
+ cudaGraphAddDependencies(
+ _native_graph, &_native_node, &(tasks._native_node), 1
+ ), ...
+ );
+ return *this;
+}
+
+// Function: succeed
+template <typename... Ts>
+cudaTask& cudaTask::succeed(Ts&&... tasks) {
+ (tasks.precede(*this), ...);
+ return *this;
+}
+
+// Function: num_predecessors
+inline size_t cudaTask::num_predecessors() const {
+ size_t num_predecessors {0};
+ cudaGraphNodeGetDependencies(_native_node, nullptr, &num_predecessors);
+ return num_predecessors;
+}
+
+// Function: num_successors
+inline size_t cudaTask::num_successors() const {
+ size_t num_successors {0};
+ cudaGraphNodeGetDependentNodes(_native_node, nullptr, &num_successors);
+ return num_successors;
+}
+
+// Function: type
+inline auto cudaTask::type() const {
+ cudaGraphNodeType type;
+ cudaGraphNodeGetType(_native_node, &type);
+ return type;
+}
+
+// Function: dump
+inline void cudaTask::dump(std::ostream& os) const {
+ os << "cudaTask [type=" << to_string(type()) << ']';
+}
+
+/**
+@brief overload of ostream inserter operator for cudaTask
+*/
+inline std::ostream& operator << (std::ostream& os, const cudaTask& ct) {
+ ct.dump(os);
+ return os;
}
// ----------------------------------------------------------------------------
// cudaGraph
// ----------------------------------------------------------------------------
-
+
/**
-@private
-*/
+ * @struct cudaGraphCreator
+ * @brief a functor for creating a CUDA graph
+ *
+ * This structure provides an overloaded function call operator to create a
+ * new CUDA graph using `cudaGraphCreate`.
+ *
+ */
struct cudaGraphCreator {
- cudaGraph_t operator () () const {
+
+ /**
+ * @brief creates a new CUDA graph
+ *
+ * Calls `cudaGraphCreate` to generate a CUDA native graph and returns it.
+ * If the graph creation fails, an error is reported.
+ *
+ * @return A newly created `cudaGraph_t` instance.
+ * @throws If CUDA graph creation fails, an error is logged.
+ */
+ cudaGraph_t operator () () const {
cudaGraph_t g;
TF_CHECK_CUDA(cudaGraphCreate(&g, 0), "failed to create a CUDA native graph");
- return g;
+ return g;
+ }
+
+ /**
+ @brief return the given CUDA graph
+ */
+ cudaGraph_t operator () (cudaGraph_t graph) const {
+ return graph;
}
+
};
/**
-@private
-*/
+ * @struct cudaGraphDeleter
+ * @brief a functor for deleting a CUDA graph
+ *
+ * This structure provides an overloaded function call operator to safely
+ * destroy a CUDA graph using `cudaGraphDestroy`.
+ *
+ */
struct cudaGraphDeleter {
+
+ /**
+ * @brief deletes a CUDA graph
+ *
+ * Calls `cudaGraphDestroy` to release the CUDA graph resource if it is valid.
+ *
+ * @param g the CUDA graph to be destroyed
+ */
void operator () (cudaGraph_t g) const {
- if(g) {
- cudaGraphDestroy(g);
- }
+ cudaGraphDestroy(g);
}
};
+
/**
-@class cudaGraph
+@class cudaGraphBase
-@brief class to create an RAII-styled wrapper over a CUDA executable graph
+@brief class to create a CUDA graph managed by C++ smart pointer
-A cudaGraph object is an RAII-styled wrapper over
-a native CUDA graph (@c cudaGraph_t).
-A cudaGraph object is move-only.
+@tparam Creator functor to create the stream (used in constructor)
+@tparam Deleter functor to delete the stream (used in destructor)
+
+This class wraps a `cudaGraph_t` handle with std::unique_ptr to ensure proper
+resource management and automatic cleanup.
*/
-class cudaGraph :
- public cudaObject<cudaGraph_t, cudaGraphCreator, cudaGraphDeleter> {
+template <typename Creator, typename Deleter>
+class cudaGraphBase : public std::unique_ptr<std::remove_pointer_t<cudaGraph_t>, cudaGraphDeleter> {
+
+ static_assert(std::is_pointer_v<cudaGraph_t>, "cudaGraph_t is not a pointer type");
public:
+
+ /**
+ @brief base std::unique_ptr type
+ */
+ using base_type = std::unique_ptr<std::remove_pointer_t<cudaGraph_t>, Deleter>;
/**
- @brief constructs an RAII-styled object from the given CUDA exec
+ @brief constructs a `cudaGraph` object by passing the given arguments to the executable CUDA graph creator
- Constructs a cudaGraph object from the given CUDA graph @c native.
+ Constructs a `cudaGraph` object by passing the given arguments to the executable CUDA graph creator
+
+ @param args arguments to pass to the executable CUDA graph creator
*/
- explicit cudaGraph(cudaGraph_t native) : cudaObject(native) { }
+ template <typename... ArgsT>
+ explicit cudaGraphBase(ArgsT&& ... args) : base_type(
+ Creator{}(std::forward<ArgsT>(args)...), Deleter()
+ ) {
+ }
/**
- @brief constructs a cudaGraph object with a new CUDA graph
+ @brief constructs a `cudaGraph` from the given rhs using move semantics
*/
- cudaGraph() = default;
-};
+ cudaGraphBase(cudaGraphBase&&) = default;
-// ----------------------------------------------------------------------------
-// cudaGraphExec
-// ----------------------------------------------------------------------------
+ /**
+ @brief assign the rhs to `*this` using move semantics
+ */
+ cudaGraphBase& operator = (cudaGraphBase&&) = default;
+
+ /**
+ @brief queries the number of nodes in a native CUDA graph
+ */
+ size_t num_nodes() const;
-/**
-@private
-*/
-struct cudaGraphExecCreator {
- cudaGraphExec_t operator () () const { return nullptr; }
-};
+ /**
+ @brief queries the number of edges in a native CUDA graph
+ */
+ size_t num_edges() const;
-/**
-@private
-*/
-struct cudaGraphExecDeleter {
- void operator () (cudaGraphExec_t executable) const {
- if(executable) {
- cudaGraphExecDestroy(executable);
- }
- }
-};
+ /**
+ @brief queries if the graph is empty
+ */
+ bool empty() const;
-/**
-@class cudaGraphExec
+ /**
+ @brief dumps the CUDA graph to a DOT format through the given output stream
+
+ @param os target output stream
+ */
+ void dump(std::ostream& os);
-@brief class to create an RAII-styled wrapper over a CUDA executable graph
+ // ------------------------------------------------------------------------
+ // Graph building routines
+ // ------------------------------------------------------------------------
-A cudaGraphExec object is an RAII-styled wrapper over
-a native CUDA executable graph (@c cudaGraphExec_t).
-A cudaGraphExec object is move-only.
-*/
-class cudaGraphExec :
- public cudaObject<cudaGraphExec_t, cudaGraphExecCreator, cudaGraphExecDeleter> {
+ /**
+ @brief creates a no-operation task
- public:
+ @return a tf::cudaTask handle
+
+ An empty node performs no operation during execution,
+ but can be used for transitive ordering.
+ For example, a phased execution graph with 2 groups of @c n nodes
+ with a barrier between them can be represented using an empty node
+ and @c 2*n dependency edges,
+ rather than no empty node and @c n^2 dependency edges.
+ */
+ cudaTask noop();
/**
- @brief constructs an RAII-styled object from the given CUDA exec
+ @brief creates a host task that runs a callable on the host
+
+ @tparam C callable type
+
+ @param callable a callable object with neither arguments nor return
+ (i.e., constructible from @c std::function<void()>)
+ @param user_data a pointer to the user data
+
+ @return a tf::cudaTask handle
- Constructs a cudaGraphExec object which owns @c exec.
+ A host task can only execute CPU-specific functions and cannot do any CUDA calls
+ (e.g., @c cudaMalloc).
*/
- explicit cudaGraphExec(cudaGraphExec_t exec) : cudaObject(exec) { }
-
+ template <typename C>
+ cudaTask host(C&& callable, void* user_data);
+
/**
- @brief default constructor
+ @brief creates a kernel task
+
+ @tparam F kernel function type
+ @tparam ArgsT kernel function parameters type
+
+ @param g configured grid
+ @param b configured block
+ @param s configured shared memory size in bytes
+ @param f kernel function
+ @param args arguments to forward to the kernel function by copy
+
+ @return a tf::cudaTask handle
*/
- cudaGraphExec() = default;
-
+ template <typename F, typename... ArgsT>
+ cudaTask kernel(dim3 g, dim3 b, size_t s, F f, ArgsT... args);
+
/**
- @brief instantiates the executable from the given CUDA graph
+ @brief creates a memset task that fills untyped data with a byte value
+
+ @param dst pointer to the destination device memory area
+ @param v value to set for each byte of specified memory
+ @param count size in bytes to set
+
+ @return a tf::cudaTask handle
+
+ A memset task fills the first @c count bytes of device memory area
+ pointed by @c dst with the byte value @c v.
*/
- void instantiate(cudaGraph_t graph) {
- cudaGraphExecDeleter {} (object);
- TF_CHECK_CUDA(
- cudaGraphInstantiate(&object, graph, nullptr, nullptr, 0),
- "failed to create an executable graph"
- );
- }
-
+ cudaTask memset(void* dst, int v, size_t count);
+
/**
- @brief updates the executable from the given CUDA graph
+ @brief creates a memcpy task that copies untyped data in bytes
+
+ @param tgt pointer to the target memory block
+ @param src pointer to the source memory block
+ @param bytes bytes to copy
+
+ @return a tf::cudaTask handle
+
+ A memcpy task transfers @c bytes of data from a source location
+ to a target location. Direction can be arbitrary among CPUs and GPUs.
*/
- cudaGraphExecUpdateResult update(cudaGraph_t graph) {
- cudaGraphNode_t error_node;
- cudaGraphExecUpdateResult error_result;
- cudaGraphExecUpdate(object, graph, &error_node, &error_result);
- return error_result;
- }
-
+ cudaTask memcpy(void* tgt, const void* src, size_t bytes);
+
/**
- @brief launches the executable graph via the given stream
- */
- void launch(cudaStream_t stream) {
- TF_CHECK_CUDA(
- cudaGraphLaunch(object, stream), "failed to launch a CUDA executable graph"
- );
- }
-};
+ @brief creates a memset task that sets a typed memory block to zero
-// ----------------------------------------------------------------------------
-// cudaFlowGraph class
-// ----------------------------------------------------------------------------
+ @tparam T element type (size of @c T must be either 1, 2, or 4)
+ @param dst pointer to the destination device memory area
+ @param count number of elements
-// class: cudaFlowGraph
-class cudaFlowGraph {
+ @return a tf::cudaTask handle
- friend class cudaFlowNode;
- friend class cudaTask;
- friend class cudaFlowCapturer;
- friend class cudaFlow;
- friend class cudaFlowOptimizerBase;
- friend class cudaFlowSequentialOptimizer;
- friend class cudaFlowLinearOptimizer;
- friend class cudaFlowRoundRobinOptimizer;
- friend class Taskflow;
- friend class Executor;
+ A zero task zeroes the first @c count elements of type @c T
+ in a device memory area pointed by @c dst.
+ */
+ template <typename T, std::enable_if_t<
+ is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr
+ >
+ cudaTask zero(T* dst, size_t count);
- constexpr static int OFFLOADED = 0x01;
- constexpr static int CHANGED = 0x02;
- constexpr static int UPDATED = 0x04;
+ /**
+ @brief creates a memset task that fills a typed memory block with a value
- public:
+ @tparam T element type (size of @c T must be either 1, 2, or 4)
- cudaFlowGraph() = default;
- ~cudaFlowGraph() = default;
+ @param dst pointer to the destination device memory area
+ @param value value to fill for each element of type @c T
+ @param count number of elements
- cudaFlowGraph(const cudaFlowGraph&) = delete;
- cudaFlowGraph(cudaFlowGraph&&) = default;
+ @return a tf::cudaTask handle
- cudaFlowGraph& operator = (const cudaFlowGraph&) = delete;
- cudaFlowGraph& operator = (cudaFlowGraph&&) = default;
+ A fill task fills the first @c count elements of type @c T with @c value
+ in a device memory area pointed by @c dst.
+ The value to fill is interpreted in type @c T rather than byte.
+ */
+ template <typename T, std::enable_if_t<
+ is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr
+ >
+ cudaTask fill(T* dst, T value, size_t count);
- template <typename... ArgsT>
- cudaFlowNode* emplace_back(ArgsT&&...);
+ /**
+ @brief creates a memcopy task that copies typed data
- bool empty() const;
+ @tparam T element type (non-void)
- void clear();
- void dump(std::ostream&, const void*, const std::string&) const ;
+ @param tgt pointer to the target memory block
+ @param src pointer to the source memory block
+ @param num number of elements to copy
- private:
+ @return a tf::cudaTask handle
- int _state{CHANGED};
- cudaGraph _native_handle {nullptr};
- std::vector<std::unique_ptr<cudaFlowNode>> _nodes;
-};
+ A copy task transfers <tt>num*sizeof(T)</tt> bytes of data from a source location
+ to a target location. Direction can be arbitrary among CPUs and GPUs.
+ */
+ template <typename T,
+ std::enable_if_t<!std::is_same_v<T, void>, void>* = nullptr
+ >
+ cudaTask copy(T* tgt, const T* src, size_t num);
+
+ // ------------------------------------------------------------------------
+ // generic algorithms
+ // ------------------------------------------------------------------------
-// ----------------------------------------------------------------------------
-// cudaFlowNode class
-// ----------------------------------------------------------------------------
+ /**
+ @brief runs a callable with only a single kernel thread
-/**
-@private
-@class: cudaFlowNode
-*/
-class cudaFlowNode {
+ @tparam C callable type
- friend class cudaFlowGraph;
- friend class cudaTask;
- friend class cudaFlow;
- friend class cudaFlowCapturer;
- friend class cudaFlowOptimizerBase;
- friend class cudaFlowSequentialOptimizer;
- friend class cudaFlowLinearOptimizer;
- friend class cudaFlowRoundRobinOptimizer;
- friend class Taskflow;
- friend class Executor;
+ @param c callable to run by a single kernel thread
- // Empty handle
- struct Empty {
- };
+ @return a tf::cudaTask handle
+ */
+ template <typename C>
+ cudaTask single_task(C c);
+
+ /**
+ @brief applies a callable to each dereferenced element of the data array
- // Host handle
- struct Host {
+ @tparam I iterator type
+ @tparam C callable type
+ @tparam E execution poligy (default tf::cudaDefaultExecutionPolicy)
- template <typename C>
- Host(C&&);
+ @param first iterator to the beginning (inclusive)
+ @param last iterator to the end (exclusive)
+ @param callable a callable object to apply to the dereferenced iterator
- std::function<void()> func;
+ @return a tf::cudaTask handle
- static void callback(void*);
- };
+ This method is equivalent to the parallel execution of the following loop on a GPU:
- // Memset handle
- struct Memset {
- };
+ @code{.cpp}
+ for(auto itr = first; itr != last; itr++) {
+ callable(*itr);
+ }
+ @endcode
+ */
+ template <typename I, typename C, typename E = cudaDefaultExecutionPolicy>
+ cudaTask for_each(I first, I last, C callable);
+
+ /**
+ @brief applies a callable to each index in the range with the step size
- // Memcpy handle
- struct Memcpy {
- };
+ @tparam I index type
+ @tparam C callable type
+ @tparam E execution poligy (default tf::cudaDefaultExecutionPolicy)
- // Kernel handle
- struct Kernel {
+ @param first beginning index
+ @param last last index
+ @param step step size
+ @param callable the callable to apply to each element in the data array
- template <typename F>
- Kernel(F&& f);
+ @return a tf::cudaTask handle
- void* func {nullptr};
- };
+ This method is equivalent to the parallel execution of the following loop on a GPU:
- // Subflow handle
- struct Subflow {
- cudaFlowGraph cfg;
- };
+ @code{.cpp}
+ // step is positive [first, last)
+ for(auto i=first; i<last; i+=step) {
+ callable(i);
+ }
- // Capture
- struct Capture {
+ // step is negative [first, last)
+ for(auto i=first; i>last; i+=step) {
+ callable(i);
+ }
+ @endcode
+ */
+ template <typename I, typename C, typename E = cudaDefaultExecutionPolicy>
+ cudaTask for_each_index(I first, I last, I step, C callable);
+
+ /**
+ @brief applies a callable to a source range and stores the result in a target range
- template <typename C>
- Capture(C&&);
+ @tparam I input iterator type
+ @tparam O output iterator type
+ @tparam C unary operator type
+ @tparam E execution poligy (default tf::cudaDefaultExecutionPolicy)
- std::function<void(cudaStream_t)> work;
+ @param first iterator to the beginning of the input range
+ @param last iterator to the end of the input range
+ @param output iterator to the beginning of the output range
+ @param op the operator to apply to transform each element in the range
- cudaEvent_t event;
- size_t level;
- size_t lid;
- size_t idx;
- };
+ @return a tf::cudaTask handle
- using handle_t = std::variant<
- Empty,
- Host,
- Memset,
- Memcpy,
- Kernel,
- Subflow,
- Capture
- >;
+ This method is equivalent to the parallel execution of the following loop on a GPU:
- public:
+ @code{.cpp}
+ while (first != last) {
+ *output++ = callable(*first++);
+ }
+ @endcode
+ */
+ template <typename I, typename O, typename C, typename E = cudaDefaultExecutionPolicy>
+ cudaTask transform(I first, I last, O output, C op);
+
+ /**
+ @brief creates a task to perform parallel transforms over two ranges of items
+
+ @tparam I1 first input iterator type
+ @tparam I2 second input iterator type
+ @tparam O output iterator type
+ @tparam C unary operator type
+ @tparam E execution poligy (default tf::cudaDefaultExecutionPolicy)
+
+ @param first1 iterator to the beginning of the input range
+ @param last1 iterator to the end of the input range
+ @param first2 iterato
+ @param output iterator to the beginning of the output range
+ @param op binary operator to apply to transform each pair of items in the
+ two input ranges
- // variant index
- constexpr static auto EMPTY = get_index_v<Empty, handle_t>;
- constexpr static auto HOST = get_index_v<Host, handle_t>;
- constexpr static auto MEMSET = get_index_v<Memset, handle_t>;
- constexpr static auto MEMCPY = get_index_v<Memcpy, handle_t>;
- constexpr static auto KERNEL = get_index_v<Kernel, handle_t>;
- constexpr static auto SUBFLOW = get_index_v<Subflow, handle_t>;
- constexpr static auto CAPTURE = get_index_v<Capture, handle_t>;
+ @return cudaTask handle
- cudaFlowNode() = delete;
+ This method is equivalent to the parallel execution of the following loop on a GPU:
- template <typename... ArgsT>
- cudaFlowNode(cudaFlowGraph&, ArgsT&&...);
+ @code{.cpp}
+ while (first1 != last1) {
+ *output++ = op(*first1++, *first2++);
+ }
+ @endcode
+ */
+ template <typename I1, typename I2, typename O, typename C, typename E = cudaDefaultExecutionPolicy>
+ cudaTask transform(I1 first1, I1 last1, I2 first2, O output, C op);
private:
- cudaFlowGraph& _cfg;
+ cudaGraphBase(const cudaGraphBase&) = delete;
+ cudaGraphBase& operator = (const cudaGraphBase&) = delete;
+};
- std::string _name;
+// query the number of nodes
+template <typename Creator, typename Deleter>
+size_t cudaGraphBase<Creator, Deleter>::num_nodes() const {
+ size_t n;
+ TF_CHECK_CUDA(
+ cudaGraphGetNodes(this->get(), nullptr, &n),
+ "failed to get native graph nodes"
+ );
+ return n;
+}
- handle_t _handle;
+// query the emptiness
+template <typename Creator, typename Deleter>
+bool cudaGraphBase<Creator, Deleter>::empty() const {
+ return num_nodes() == 0;
+}
- cudaGraphNode_t _native_handle {nullptr};
+// query the number of edges
+template <typename Creator, typename Deleter>
+size_t cudaGraphBase<Creator, Deleter>::num_edges() const {
+ size_t num_edges;
+ TF_CHECK_CUDA(
+ cudaGraphGetEdges(this->get(), nullptr, nullptr, &num_edges),
+ "failed to get native graph edges"
+ );
+ return num_edges;
+}
- SmallVector<cudaFlowNode*> _successors;
- SmallVector<cudaFlowNode*> _dependents;
+//// dump the graph
+//inline void cudaGraph::dump(std::ostream& os) {
+//
+// // acquire the native handle
+// auto g = this->get();
+//
+// os << "digraph cudaGraph {\n";
+//
+// std::stack<std::tuple<cudaGraph_t, cudaGraphNode_t, int>> stack;
+// stack.push(std::make_tuple(g, nullptr, 1));
+//
+// int pl = 0;
+//
+// while(stack.empty() == false) {
+//
+// auto [graph, parent, l] = stack.top();
+// stack.pop();
+//
+// for(int i=0; i<pl-l+1; i++) {
+// os << "}\n";
+// }
+//
+// os << "subgraph cluster_p" << graph << " {\n"
+// << "label=\"cudaGraph-L" << l << "\";\n"
+// << "color=\"purple\";\n";
+//
+// auto nodes = cuda_graph_get_nodes(graph);
+// auto edges = cuda_graph_get_edges(graph);
+//
+// for(auto& [from, to] : edges) {
+// os << 'p' << from << " -> " << 'p' << to << ";\n";
+// }
+//
+// for(auto& node : nodes) {
+// auto type = cuda_get_graph_node_type(node);
+// if(type == cudaGraphNodeTypeGraph) {
+//
+// cudaGraph_t child_graph;
+// TF_CHECK_CUDA(cudaGraphChildGraphNodeGetGraph(node, &child_graph), "");
+// stack.push(std::make_tuple(child_graph, node, l+1));
+//
+// os << 'p' << node << "["
+// << "shape=folder, style=filled, fontcolor=white, fillcolor=purple, "
+// << "label=\"cudaGraph-L" << l+1
+// << "\"];\n";
+// }
+// else {
+// os << 'p' << node << "[label=\""
+// << to_string(type)
+// << "\"];\n";
+// }
+// }
+//
+// // precede to parent
+// if(parent != nullptr) {
+// std::unordered_set<cudaGraphNode_t> successors;
+// for(const auto& p : edges) {
+// successors.insert(p.first);
+// }
+// for(auto node : nodes) {
+// if(successors.find(node) == successors.end()) {
+// os << 'p' << node << " -> " << 'p' << parent << ";\n";
+// }
+// }
+// }
+//
+// // set the previous level
+// pl = l;
+// }
+//
+// for(int i=0; i<=pl; i++) {
+// os << "}\n";
+// }
+//}
+
+// dump the graph
+template <typename Creator, typename Deleter>
+void cudaGraphBase<Creator, Deleter>::dump(std::ostream& os) {
+
+ // Generate a unique temporary filename in the system's temp directory using filesystem
+ auto temp_path = std::filesystem::temp_directory_path() / "graph_";
+ std::random_device rd;
+ std::uniform_int_distribution<int> dist(100000, 999999); // Generates a random number
+ temp_path += std::to_string(dist(rd)) + ".dot";
+
+ // Call the original function with the temporary file
+ TF_CHECK_CUDA(cudaGraphDebugDotPrint(this->get(), temp_path.string().c_str(), 0), "");
+
+ // Read the file and write to the output stream
+ std::ifstream file(temp_path);
+ if (file) {
+ os << file.rdbuf(); // Copy file contents to the stream
+ file.close();
+ std::filesystem::remove(temp_path); // Clean up the temporary file
+ } else {
+ TF_THROW("failed to open ", temp_path, " for dumping the CUDA graph");
+ }
+}
- void _precede(cudaFlowNode*);
-};
+// Function: noop
+template <typename Creator, typename Deleter>
+cudaTask cudaGraphBase<Creator, Deleter>::noop() {
-// ----------------------------------------------------------------------------
-// cudaFlowNode definitions
-// ----------------------------------------------------------------------------
+ cudaGraphNode_t node;
-// Host handle constructor
+ TF_CHECK_CUDA(
+ cudaGraphAddEmptyNode(&node, this->get(), nullptr, 0),
+ "failed to create a no-operation (empty) node"
+ );
+
+ return cudaTask(this->get(), node);
+}
+
+// Function: host
+template <typename Creator, typename Deleter>
template <typename C>
-cudaFlowNode::Host::Host(C&& c) : func {std::forward<C>(c)} {
+cudaTask cudaGraphBase<Creator, Deleter>::host(C&& callable, void* user_data) {
+
+ cudaGraphNode_t node;
+ cudaHostNodeParams p {callable, user_data};
+
+ TF_CHECK_CUDA(
+ cudaGraphAddHostNode(&node, this->get(), nullptr, 0, &p),
+ "failed to create a host node"
+ );
+
+ return cudaTask(this->get(), node);
}
-// Host callback
-inline void cudaFlowNode::Host::callback(void* data) {
- static_cast<Host*>(data)->func();
-};
+// Function: kernel
+template <typename Creator, typename Deleter>
+template <typename F, typename... ArgsT>
+cudaTask cudaGraphBase<Creator, Deleter>::kernel(
+ dim3 g, dim3 b, size_t s, F f, ArgsT... args
+) {
-// Kernel handle constructor
-template <typename F>
-cudaFlowNode::Kernel::Kernel(F&& f) :
- func {std::forward<F>(f)} {
+ cudaGraphNode_t node;
+ cudaKernelNodeParams p;
+
+ void* arguments[sizeof...(ArgsT)] = { (void*)(&args)... };
+
+ p.func = (void*)f;
+ p.gridDim = g;
+ p.blockDim = b;
+ p.sharedMemBytes = s;
+ p.kernelParams = arguments;
+ p.extra = nullptr;
+
+ TF_CHECK_CUDA(
+ cudaGraphAddKernelNode(&node, this->get(), nullptr, 0, &p),
+ "failed to create a kernel task"
+ );
+
+ return cudaTask(this->get(), node);
}
-// Capture handle constructor
-template <typename C>
-cudaFlowNode::Capture::Capture(C&& c) :
- work {std::forward<C>(c)} {
+// Function: zero
+template <typename Creator, typename Deleter>
+template <typename T, std::enable_if_t<
+ is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>*
+>
+cudaTask cudaGraphBase<Creator, Deleter>::zero(T* dst, size_t count) {
+
+ cudaGraphNode_t node;
+ auto p = cuda_get_zero_parms(dst, count);
+
+ TF_CHECK_CUDA(
+ cudaGraphAddMemsetNode(&node, this->get(), nullptr, 0, &p),
+ "failed to create a memset (zero) task"
+ );
+
+ return cudaTask(this->get(), node);
}
-// Constructor
-template <typename... ArgsT>
-cudaFlowNode::cudaFlowNode(cudaFlowGraph& graph, ArgsT&&... args) :
- _cfg {graph},
- _handle {std::forward<ArgsT>(args)...} {
+// Function: fill
+template <typename Creator, typename Deleter>
+template <typename T, std::enable_if_t<
+ is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>*
+>
+cudaTask cudaGraphBase<Creator, Deleter>::fill(T* dst, T value, size_t count) {
+
+ cudaGraphNode_t node;
+ auto p = cuda_get_fill_parms(dst, value, count);
+ TF_CHECK_CUDA(
+ cudaGraphAddMemsetNode(&node, this->get(), nullptr, 0, &p),
+ "failed to create a memset (fill) task"
+ );
+
+ return cudaTask(this->get(), node);
}
-// Procedure: _precede
-inline void cudaFlowNode::_precede(cudaFlowNode* v) {
+// Function: copy
+template <typename Creator, typename Deleter>
+template <
+ typename T,
+ std::enable_if_t<!std::is_same_v<T, void>, void>*
+>
+cudaTask cudaGraphBase<Creator, Deleter>::copy(T* tgt, const T* src, size_t num) {
- _cfg._state |= cudaFlowGraph::CHANGED;
+ cudaGraphNode_t node;
+ auto p = cuda_get_copy_parms(tgt, src, num);
- _successors.push_back(v);
- v->_dependents.push_back(this);
+ TF_CHECK_CUDA(
+ cudaGraphAddMemcpyNode(&node, this->get(), nullptr, 0, &p),
+ "failed to create a memcpy (copy) task"
+ );
- // capture node doesn't have the native graph yet
- if(_handle.index() != cudaFlowNode::CAPTURE) {
- TF_CHECK_CUDA(
- cudaGraphAddDependencies(
- _cfg._native_handle, &_native_handle, &v->_native_handle, 1
- ),
- "failed to add a preceding link ", this, "->", v
- );
- }
+ return cudaTask(this->get(), node);
}
-// ----------------------------------------------------------------------------
-// cudaGraph definitions
-// ----------------------------------------------------------------------------
+// Function: memset
+template <typename Creator, typename Deleter>
+cudaTask cudaGraphBase<Creator, Deleter>::memset(void* dst, int ch, size_t count) {
-// Function: empty
-inline bool cudaFlowGraph::empty() const {
- return _nodes.empty();
-}
-
-// Procedure: clear
-inline void cudaFlowGraph::clear() {
- _state |= cudaFlowGraph::CHANGED;
- _nodes.clear();
- _native_handle.clear();
-}
-
-// Function: emplace_back
-template <typename... ArgsT>
-cudaFlowNode* cudaFlowGraph::emplace_back(ArgsT&&... args) {
-
- _state |= cudaFlowGraph::CHANGED;
-
- auto node = std::make_unique<cudaFlowNode>(std::forward<ArgsT>(args)...);
- _nodes.emplace_back(std::move(node));
- return _nodes.back().get();
-
- // TODO: use object pool to save memory
- //auto node = new cudaFlowNode(std::forward<ArgsT>(args)...);
- //_nodes.push_back(node);
- //return node;
-}
-
-// Procedure: dump the graph to a DOT format
-inline void cudaFlowGraph::dump(
- std::ostream& os, const void* root, const std::string& root_name
-) const {
-
- // recursive dump with stack
- std::stack<std::tuple<const cudaFlowGraph*, const cudaFlowNode*, int>> stack;
- stack.push(std::make_tuple(this, nullptr, 1));
-
- int pl = 0;
-
- while(!stack.empty()) {
-
- auto [graph, parent, l] = stack.top();
- stack.pop();
-
- for(int i=0; i<pl-l+1; i++) {
- os << "}\n";
- }
-
- if(parent == nullptr) {
- if(root) {
- os << "subgraph cluster_p" << root << " {\nlabel=\"cudaFlow: ";
- if(root_name.empty()) os << 'p' << root;
- else os << root_name;
- os << "\";\n" << "color=\"purple\"\n";
- }
- else {
- os << "digraph cudaFlow {\n";
- }
- }
- else {
- os << "subgraph cluster_p" << parent << " {\nlabel=\"cudaSubflow: ";
- if(parent->_name.empty()) os << 'p' << parent;
- else os << parent->_name;
- os << "\";\n" << "color=\"purple\"\n";
- }
-
- for(auto& node : graph->_nodes) {
-
- auto v = node.get();
-
- os << 'p' << v << "[label=\"";
- if(v->_name.empty()) {
- os << 'p' << v << "\"";
- }
- else {
- os << v->_name << "\"";
- }
-
- switch(v->_handle.index()) {
- case cudaFlowNode::KERNEL:
- os << " style=\"filled\""
- << " color=\"white\" fillcolor=\"black\""
- << " fontcolor=\"white\""
- << " shape=\"box3d\"";
- break;
-
- case cudaFlowNode::SUBFLOW:
- stack.push(std::make_tuple(
- &(std::get_if<cudaFlowNode::Subflow>(&v->_handle)->cfg), v, l+1)
- );
- os << " style=\"filled\""
- << " color=\"black\" fillcolor=\"purple\""
- << " fontcolor=\"white\""
- << " shape=\"folder\"";
- break;
-
- default:
- break;
- }
-
- os << "];\n";
-
- for(const auto s : v->_successors) {
- os << 'p' << v << " -> " << 'p' << s << ";\n";
- }
-
- if(v->_successors.size() == 0) {
- if(parent == nullptr) {
- if(root) {
- os << 'p' << v << " -> p" << root << ";\n";
- }
- }
- else {
- os << 'p' << v << " -> p" << parent << ";\n";
- }
- }
- }
-
- // set the previous level
- pl = l;
- }
+ cudaGraphNode_t node;
+ auto p = cuda_get_memset_parms(dst, ch, count);
- for(int i=0; i<pl; i++) {
- os << "}\n";
- }
+ TF_CHECK_CUDA(
+ cudaGraphAddMemsetNode(&node, this->get(), nullptr, 0, &p),
+ "failed to create a memset task"
+ );
+ return cudaTask(this->get(), node);
}
+// Function: memcpy
+template <typename Creator, typename Deleter>
+cudaTask cudaGraphBase<Creator, Deleter>::memcpy(void* tgt, const void* src, size_t bytes) {
+
+ cudaGraphNode_t node;
+ auto p = cuda_get_memcpy_parms(tgt, src, bytes);
+
+ TF_CHECK_CUDA(
+ cudaGraphAddMemcpyNode(&node, this->get(), nullptr, 0, &p),
+ "failed to create a memcpy task"
+ );
+
+ return cudaTask(this->get(), node);
+}
+
+
+
+
} // end of namespace tf -----------------------------------------------------
--- /dev/null
+#pragma once
+
+#include "cuda_graph.hpp"
+
+
+namespace tf {
+
+// ----------------------------------------------------------------------------
+// cudaGraphExec
+// ----------------------------------------------------------------------------
+
+/**
+@struct cudaGraphExecCreator
+@brief a functor for creating an executable CUDA graph
+
+This structure provides an overloaded function call operator to create a
+new executable CUDA graph using `cudaGraphCreate`.
+*/
+struct cudaGraphExecCreator {
+ /**
+ @brief returns a null executable CUDA graph
+ */
+ cudaGraphExec_t operator () () const {
+ return nullptr;
+ }
+
+ /**
+ @brief returns the given executable graph
+ */
+ cudaGraphExec_t operator () (cudaGraphExec_t exec) const {
+ return exec;
+ }
+
+ /**
+ @brief returns a newly instantiated executable graph from the given CUDA graph
+ */
+ cudaGraphExec_t operator () (cudaGraph_t graph) const {
+ cudaGraphExec_t exec;
+ TF_CHECK_CUDA(
+ cudaGraphInstantiate(&exec, graph, nullptr, nullptr, 0),
+ "failed to create an executable graph"
+ );
+ return exec;
+ }
+
+ /**
+ @brief returns a newly instantiated executable graph from the given CUDA graph
+ */
+ template <typename C, typename D>
+ cudaGraphExec_t operator () (const cudaGraphBase<C, D>& graph) const {
+ return this->operator()(graph.get());
+ }
+};
+
+/**
+@struct cudaGraphExecDeleter
+@brief a functor for deleting an executable CUDA graph
+
+This structure provides an overloaded function call operator to safely
+destroy a CUDA graph using `cudaGraphDestroy`.
+*/
+struct cudaGraphExecDeleter {
+ /**
+ * @brief deletes an executable CUDA graph
+ *
+ * Calls `cudaGraphDestroy` to release the CUDA graph resource if it is valid.
+ *
+ * @param executable the executable CUDA graph to be destroyed
+ */
+ void operator () (cudaGraphExec_t executable) const {
+ cudaGraphExecDestroy(executable);
+ }
+};
+
+/**
+@class cudaGraphExecBase
+
+@brief class to create an executable CUDA graph managed by C++ smart pointer
+
+@tparam Creator functor to create the stream (used in constructor)
+@tparam Deleter functor to delete the stream (used in destructor)
+
+This class wraps a `cudaGraphExec_t` handle with `std::unique_ptr` to ensure proper
+resource management and automatic cleanup.
+*/
+template <typename Creator, typename Deleter>
+class cudaGraphExecBase : public std::unique_ptr<std::remove_pointer_t<cudaGraphExec_t>, Deleter> {
+
+ static_assert(std::is_pointer_v<cudaGraphExec_t>, "cudaGraphExec_t is not a pointer type");
+
+ public:
+
+ /**
+ @brief base std::unique_ptr type
+ */
+ using base_type = std::unique_ptr<std::remove_pointer_t<cudaGraphExec_t>, Deleter>;
+
+ /**
+ @brief constructs a `cudaGraphExec` object by passing the given arguments to the executable CUDA graph creator
+
+ Constructs a `cudaGraphExec` object by passing the given arguments to the executable CUDA graph creator
+
+ @param args arguments to pass to the executable CUDA graph creator
+ */
+ template <typename... ArgsT>
+ explicit cudaGraphExecBase(ArgsT&& ... args) : base_type(
+ Creator{}(std::forward<ArgsT>(args)...), Deleter()
+ ) {}
+
+ /**
+ @brief constructs a `cudaGraphExec` from the given rhs using move semantics
+ */
+ cudaGraphExecBase(cudaGraphExecBase&&) = default;
+
+ /**
+ @brief assign the rhs to `*this` using move semantics
+ */
+ cudaGraphExecBase& operator = (cudaGraphExecBase&&) = default;
+
+ // ----------------------------------------------------------------------------------------------
+ // Update Methods
+ // ----------------------------------------------------------------------------------------------
+
+ /**
+ @brief updates parameters of a host task
+
+ This method updates the parameter of the given host task (similar to tf::cudaFlow::host).
+ */
+ template <typename C>
+ void host(cudaTask task, C&& callable, void* user_data);
+
+ /**
+ @brief updates parameters of a kernel task
+
+ The method is similar to tf::cudaFlow::kernel but operates on a task
+ of type tf::cudaTaskType::KERNEL.
+ The kernel function name must NOT change.
+ */
+ template <typename F, typename... ArgsT>
+ void kernel(
+ cudaTask task, dim3 g, dim3 b, size_t shm, F f, ArgsT... args
+ );
+
+ /**
+ @brief updates parameters of a memset task
+
+ The method is similar to tf::cudaFlow::memset but operates on a task
+ of type tf::cudaTaskType::MEMSET.
+ The source/destination memory may have different address values but
+ must be allocated from the same contexts as the original
+ source/destination memory.
+ */
+ void memset(cudaTask task, void* dst, int ch, size_t count);
+
+ /**
+ @brief updates parameters of a memcpy task
+
+ The method is similar to tf::cudaFlow::memcpy but operates on a task
+ of type tf::cudaTaskType::MEMCPY.
+ The source/destination memory may have different address values but
+ must be allocated from the same contexts as the original
+ source/destination memory.
+ */
+ void memcpy(cudaTask task, void* tgt, const void* src, size_t bytes);
+
+ /**
+ @brief updates parameters of a memset task to a zero task
+
+ The method is similar to tf::cudaFlow::zero but operates on
+ a task of type tf::cudaTaskType::MEMSET.
+
+ The source/destination memory may have different address values but
+ must be allocated from the same contexts as the original
+ source/destination memory.
+ */
+ template <typename T, std::enable_if_t<
+ is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr
+ >
+ void zero(cudaTask task, T* dst, size_t count);
+
+ /**
+ @brief updates parameters of a memset task to a fill task
+
+ The method is similar to tf::cudaFlow::fill but operates on a task
+ of type tf::cudaTaskType::MEMSET.
+
+ The source/destination memory may have different address values but
+ must be allocated from the same contexts as the original
+ source/destination memory.
+ */
+ template <typename T, std::enable_if_t<
+ is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr
+ >
+ void fill(cudaTask task, T* dst, T value, size_t count);
+
+ /**
+ @brief updates parameters of a memcpy task to a copy task
+
+ The method is similar to tf::cudaFlow::copy but operates on a task
+ of type tf::cudaTaskType::MEMCPY.
+ The source/destination memory may have different address values but
+ must be allocated from the same contexts as the original
+ source/destination memory.
+ */
+ template <typename T,
+ std::enable_if_t<!std::is_same_v<T, void>, void>* = nullptr
+ >
+ void copy(cudaTask task, T* tgt, const T* src, size_t num);
+
+ //---------------------------------------------------------------------------
+ // Algorithm Primitives
+ //---------------------------------------------------------------------------
+
+ /**
+ @brief updates a single-threaded kernel task
+
+ This method is similar to cudaFlow::single_task but operates
+ on an existing task.
+ */
+ template <typename C>
+ void single_task(cudaTask task, C c);
+
+ /**
+ @brief updates parameters of a `for_each` kernel task created from the CUDA graph of `*this`
+ */
+ template <typename I, typename C, typename E = cudaDefaultExecutionPolicy>
+ void for_each(cudaTask task, I first, I last, C callable);
+
+ /**
+ @brief updates parameters of a `for_each_index` kernel task created from the CUDA graph of `*this`
+ */
+ template <typename I, typename C, typename E = cudaDefaultExecutionPolicy>
+ void for_each_index(cudaTask task, I first, I last, I step, C callable);
+
+ /**
+ @brief updates parameters of a `transform` kernel task created from the CUDA graph of `*this`
+ */
+ template <typename I, typename O, typename C, typename E = cudaDefaultExecutionPolicy>
+ void transform(cudaTask task, I first, I last, O output, C c);
+
+ /**
+ @brief updates parameters of a `transform` kernel task created from the CUDA graph of `*this`
+ */
+ template <typename I1, typename I2, typename O, typename C, typename E = cudaDefaultExecutionPolicy>
+ void transform(cudaTask task, I1 first1, I1 last1, I2 first2, O output, C c);
+
+
+ private:
+
+ cudaGraphExecBase(const cudaGraphExecBase&) = delete;
+
+ cudaGraphExecBase& operator = (const cudaGraphExecBase&) = delete;
+};
+
+// ------------------------------------------------------------------------------------------------
+// update methods
+// ------------------------------------------------------------------------------------------------
+
+// Function: host
+template <typename Creator, typename Deleter>
+template <typename C>
+void cudaGraphExecBase<Creator, Deleter>::host(cudaTask task, C&& func, void* user_data) {
+ cudaHostNodeParams p {func, user_data};
+ TF_CHECK_CUDA(
+ cudaGraphExecHostNodeSetParams(this->get(), task._native_node, &p),
+ "failed to update kernel parameters on ", task
+ );
+}
+
+// Function: update kernel parameters
+template <typename Creator, typename Deleter>
+template <typename F, typename... ArgsT>
+void cudaGraphExecBase<Creator, Deleter>::kernel(
+ cudaTask task, dim3 g, dim3 b, size_t s, F f, ArgsT... args
+) {
+ cudaKernelNodeParams p;
+
+ void* arguments[sizeof...(ArgsT)] = { (void*)(&args)... };
+ p.func = (void*)f;
+ p.gridDim = g;
+ p.blockDim = b;
+ p.sharedMemBytes = s;
+ p.kernelParams = arguments;
+ p.extra = nullptr;
+
+ TF_CHECK_CUDA(
+ cudaGraphExecKernelNodeSetParams(this->get(), task._native_node, &p),
+ "failed to update kernel parameters on ", task
+ );
+}
+
+// Function: update copy parameters
+template <typename Creator, typename Deleter>
+template <typename T, std::enable_if_t<!std::is_same_v<T, void>, void>*>
+void cudaGraphExecBase<Creator, Deleter>::copy(cudaTask task, T* tgt, const T* src, size_t num) {
+ auto p = cuda_get_copy_parms(tgt, src, num);
+ TF_CHECK_CUDA(
+ cudaGraphExecMemcpyNodeSetParams(this->get(), task._native_node, &p),
+ "failed to update memcpy parameters on ", task
+ );
+}
+
+// Function: update memcpy parameters
+template <typename Creator, typename Deleter>
+void cudaGraphExecBase<Creator, Deleter>::memcpy(
+ cudaTask task, void* tgt, const void* src, size_t bytes
+) {
+ auto p = cuda_get_memcpy_parms(tgt, src, bytes);
+
+ TF_CHECK_CUDA(
+ cudaGraphExecMemcpyNodeSetParams(this->get(), task._native_node, &p),
+ "failed to update memcpy parameters on ", task
+ );
+}
+
+// Procedure: memset
+template <typename Creator, typename Deleter>
+void cudaGraphExecBase<Creator, Deleter>::memset(cudaTask task, void* dst, int ch, size_t count) {
+ auto p = cuda_get_memset_parms(dst, ch, count);
+ TF_CHECK_CUDA(
+ cudaGraphExecMemsetNodeSetParams(this->get(), task._native_node, &p),
+ "failed to update memset parameters on ", task
+ );
+}
+
+// Procedure: fill
+template <typename Creator, typename Deleter>
+template <typename T, std::enable_if_t<
+ is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>*
+>
+void cudaGraphExecBase<Creator, Deleter>::fill(cudaTask task, T* dst, T value, size_t count) {
+ auto p = cuda_get_fill_parms(dst, value, count);
+ TF_CHECK_CUDA(
+ cudaGraphExecMemsetNodeSetParams(this->get(), task._native_node, &p),
+ "failed to update memset parameters on ", task
+ );
+}
+
+// Procedure: zero
+template <typename Creator, typename Deleter>
+template <typename T, std::enable_if_t<
+ is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>*
+>
+void cudaGraphExecBase<Creator, Deleter>::zero(cudaTask task, T* dst, size_t count) {
+ auto p = cuda_get_zero_parms(dst, count);
+ TF_CHECK_CUDA(
+ cudaGraphExecMemsetNodeSetParams(this->get(), task._native_node, &p),
+ "failed to update memset parameters on ", task
+ );
+}
+
+//-------------------------------------------------------------------------------------------------
+// forward declaration
+//-------------------------------------------------------------------------------------------------
+
+/**
+@private
+*/
+template <typename SC, typename SD>
+cudaStreamBase<SC, SD>& cudaStreamBase<SC, SD>::run(cudaGraphExec_t exec) {
+ TF_CHECK_CUDA(
+ cudaGraphLaunch(exec, this->get()), "failed to launch a CUDA executable graph"
+ );
+ return *this;
+}
+
+/**
+@private
+*/
+template <typename SC, typename SD>
+template <typename EC, typename ED>
+cudaStreamBase<SC, SD>& cudaStreamBase<SC, SD>::run(const cudaGraphExecBase<EC, ED>& exec) {
+ return run(exec.get());
+}
+
+
+
+} // end of namespace tf -------------------------------------------------------------------------
#pragma once
-#include "cuda_object.hpp"
+#include "cuda_error.hpp"
/**
@file cuda_stream.hpp
namespace tf {
+// ----------------------------------------------------------------------------
+// cudaEventBase
+// ----------------------------------------------------------------------------
+
+/**
+@struct cudaEventCreator
+
+@brief functor to create a `cudaEvent_t` object
+*/
+struct cudaEventCreator {
+
+ /**
+ @brief creates a new `cudaEvent_t` object using `cudaEventCreate`
+ */
+ cudaEvent_t operator () () const {
+ cudaEvent_t event;
+ TF_CHECK_CUDA(cudaEventCreate(&event), "failed to create a CUDA event");
+ return event;
+ }
+
+ /**
+ @brief creates a new `cudaEvent_t` object using `cudaEventCreate` with the given `flag`
+ */
+ cudaEvent_t operator () (unsigned int flag) const {
+ cudaEvent_t event;
+ TF_CHECK_CUDA(
+ cudaEventCreateWithFlags(&event, flag),
+ "failed to create a CUDA event with flag=", flag
+ );
+ return event;
+ }
+
+ /**
+ @brief returns the given `cudaEvent_t` object
+ */
+ cudaEvent_t operator () (cudaEvent_t event) const {
+ return event;
+ }
+};
+
+/**
+@struct cudaEventDeleter
+
+@brief functor to delete a `cudaEvent_t` object
+*/
+struct cudaEventDeleter {
+
+ /**
+ @brief deletes the given `cudaEvent_t` object using `cudaEventDestroy`
+ */
+ void operator () (cudaEvent_t event) const {
+ cudaEventDestroy(event);
+ }
+};
+
+/**
+@class cudaEventBase
+
+@brief class to create a smart pointer wrapper for managing `cudaEvent_t`
+
+@tparam Creator functor to create the stream (used in constructor)
+@tparam Deleter functor to delete the stream (used in destructor)
+
+The `cudaEventBase` class encapsulates a `cudaEvent_t` using `std::unique_ptr`, ensuring that
+CUDA events are properly created and destroyed with a unique ownership.
+*/
+template <typename Creator, typename Deleter>
+class cudaEventBase : public std::unique_ptr<std::remove_pointer_t<cudaEvent_t>, Deleter> {
+
+ static_assert(std::is_pointer_v<cudaEvent_t>, "cudaEvent_t is not a pointer type");
+
+ public:
+
+ /**
+ @brief base type for the underlying unique pointer
+
+ This alias provides a shorthand for the underlying `std::unique_ptr` type that manages
+ CUDA event resources with an associated deleter.
+ */
+ using base_type = std::unique_ptr<std::remove_pointer_t<cudaEvent_t>, Deleter>;
+
+ /**
+ @brief constructs a `cudaEvent` object by passing the given arguments to the event creator
+
+ Constructs a `cudaEvent` object by passing the given arguments to the event creator
+
+ @param args arguments to pass to the event creator
+ */
+ template <typename... ArgsT>
+ explicit cudaEventBase(ArgsT&& ... args) : base_type(
+ Creator{}(std::forward<ArgsT>(args)...), Deleter()
+ ) {
+ }
+
+ /**
+ @brief constructs a `cudaEvent` from the given rhs using move semantics
+ */
+ cudaEventBase(cudaEventBase&&) = default;
+
+ /**
+ @brief assign the rhs to `*this` using move semantics
+ */
+ cudaEventBase& operator = (cudaEventBase&&) = default;
+
+ private:
+
+ cudaEventBase(const cudaEventBase&) = delete;
+ cudaEventBase& operator = (const cudaEventBase&) = delete;
+};
+
+/**
+@brief default smart pointer type to manage a `cudaEvent_t` object with unique ownership
+*/
+using cudaEvent = cudaEventBase<cudaEventCreator, cudaEventDeleter>;
// ----------------------------------------------------------------------------
// cudaStream
// ----------------------------------------------------------------------------
/**
-@private
+@struct cudaStreamCreator
+
+@brief functor to create a `cudaStream_t` object
*/
struct cudaStreamCreator {
+
+ /**
+ @brief constructs a new `cudaStream_t` object using `cudaStreamCreate`
+ */
cudaStream_t operator () () const {
cudaStream_t stream;
TF_CHECK_CUDA(cudaStreamCreate(&stream), "failed to create a CUDA stream");
return stream;
}
+
+ /**
+ @brief returns the given `cudaStream_t` object
+ */
+ cudaStream_t operator () (cudaStream_t stream) const {
+ return stream;
+ }
};
/**
-@private
+@struct cudaStreamDeleter
+
+@brief functor to delete a `cudaStream_t` object
*/
struct cudaStreamDeleter {
+
+ /**
+ @brief deletes the given `cudaStream_t` object
+ */
void operator () (cudaStream_t stream) const {
- if(stream) {
- cudaStreamDestroy(stream);
- }
+ cudaStreamDestroy(stream);
}
};
/**
-@class cudaStream
+@class cudaStreamBase
-@brief class to create an RAII-styled wrapper over a native CUDA stream
+@brief class to create a smart pointer wrapper for managing `cudaStream_t`
-A cudaStream object is an RAII-styled wrapper over a native CUDA stream
-(@c cudaStream_t).
-A cudaStream object is move-only.
+@tparam Creator functor to create the stream (used in constructor)
+@tparam Deleter functor to delete the stream (used in destructor)
+
+The `cudaStream` class encapsulates a `cudaStream_t` using `std::unique_ptr`, ensuring that
+CUDA events are properly created and destroyed with a unique ownership.
*/
-class cudaStream :
+template <typename Creator, typename Deleter>
+class cudaStreamBase : public std::unique_ptr<std::remove_pointer_t<cudaStream_t>, Deleter> {
- public cudaObject <cudaStream_t, cudaStreamCreator, cudaStreamDeleter> {
+ static_assert(std::is_pointer_v<cudaStream_t>, "cudaStream_t is not a pointer type");
public:
-
- /**
- @brief constructs an RAII-styled object from the given CUDA stream
-
- Constructs a cudaStream object which owns @c stream.
- */
- explicit cudaStream(cudaStream_t stream) : cudaObject(stream) {
- }
-
- /**
- @brief default constructor
- */
- cudaStream() = default;
-
- /**
- @brief synchronizes the associated stream
-
- Equivalently calling @c cudaStreamSynchronize to block
- until this stream has completed all operations.
- */
- void synchronize() const {
- TF_CHECK_CUDA(
- cudaStreamSynchronize(object), "failed to synchronize a CUDA stream"
- );
- }
-
- /**
- @brief begins graph capturing on the stream
-
- When a stream is in capture mode, all operations pushed into the stream
- will not be executed, but will instead be captured into a graph,
- which will be returned via cudaStream::end_capture.
-
- A thread's mode can be one of the following:
- + @c cudaStreamCaptureModeGlobal: This is the default mode.
- If the local thread has an ongoing capture sequence that was not initiated
- with @c cudaStreamCaptureModeRelaxed at @c cuStreamBeginCapture,
- or if any other thread has a concurrent capture sequence initiated with
- @c cudaStreamCaptureModeGlobal, this thread is prohibited from potentially
- unsafe API calls.
-
- + @c cudaStreamCaptureModeThreadLocal: If the local thread has an ongoing capture
- sequence not initiated with @c cudaStreamCaptureModeRelaxed,
- it is prohibited from potentially unsafe API calls.
- Concurrent capture sequences in other threads are ignored.
-
- + @c cudaStreamCaptureModeRelaxed: The local thread is not prohibited
- from potentially unsafe API calls. Note that the thread is still prohibited
- from API calls which necessarily conflict with stream capture, for example,
- attempting @c cudaEventQuery on an event that was last recorded
- inside a capture sequence.
- */
- void begin_capture(cudaStreamCaptureMode m = cudaStreamCaptureModeGlobal) const {
- TF_CHECK_CUDA(
- cudaStreamBeginCapture(object, m),
- "failed to begin capture on stream ", object, " with thread mode ", m
- );
- }
-
- /**
- @brief ends graph capturing on the stream
-
- Equivalently calling @c cudaStreamEndCapture to
- end capture on stream and returning the captured graph.
- Capture must have been initiated on stream via a call to cudaStream::begin_capture.
- If capture was invalidated, due to a violation of the rules of stream capture,
- then a NULL graph will be returned.
- */
- cudaGraph_t end_capture() const {
- cudaGraph_t native_g;
- TF_CHECK_CUDA(
- cudaStreamEndCapture(object, &native_g),
- "failed to end capture on stream ", object
- );
- return native_g;
- }
-
- /**
- @brief records an event on the stream
-
- Equivalently calling @c cudaEventRecord to record an event on this stream,
- both of which must be on the same CUDA context.
- */
- void record(cudaEvent_t event) const {
- TF_CHECK_CUDA(
- cudaEventRecord(event, object),
- "failed to record event ", event, " on stream ", object
- );
- }
-
- /**
- @brief waits on an event
-
- Equivalently calling @c cudaStreamWaitEvent to make all future work
- submitted to stream wait for all work captured in event.
- */
- void wait(cudaEvent_t event) const {
- TF_CHECK_CUDA(
- cudaStreamWaitEvent(object, event, 0),
- "failed to wait for event ", event, " on stream ", object
- );
- }
-};
-
-// ----------------------------------------------------------------------------
-// cudaEvent
-// ----------------------------------------------------------------------------
-/**
-@private
-*/
-struct cudaEventCreator {
+ /**
+ @brief base type for the underlying unique pointer
+
+ This alias provides a shorthand for the underlying `std::unique_ptr` type that manages
+ CUDA stream resources with an associated deleter.
+ */
+ using base_type = std::unique_ptr<std::remove_pointer_t<cudaStream_t>, Deleter>;
+
+ /**
+ @brief constructs a `cudaStream` object by passing the given arguments to the stream creator
+
+ Constructs a `cudaStream` object by passing the given arguments to the stream creator
+
+ @param args arguments to pass to the stream creator
+ */
+ template <typename... ArgsT>
+ explicit cudaStreamBase(ArgsT&& ... args) : base_type(
+ Creator{}(std::forward<ArgsT>(args)...), Deleter()
+ ) {
+ }
+
+ /**
+ @brief constructs a `cudaStream` from the given rhs using move semantics
+ */
+ cudaStreamBase(cudaStreamBase&&) = default;
+
+ /**
+ @brief assign the rhs to `*this` using move semantics
+ */
+ cudaStreamBase& operator = (cudaStreamBase&&) = default;
+
+ /**
+ @brief synchronizes the associated stream
- cudaEvent_t operator () () const {
- cudaEvent_t event;
- TF_CHECK_CUDA(cudaEventCreate(&event), "failed to create a CUDA event");
- return event;
+ Equivalently calling @c cudaStreamSynchronize to block
+ until this stream has completed all operations.
+ */
+ cudaStreamBase& synchronize() {
+ TF_CHECK_CUDA(
+ cudaStreamSynchronize(this->get()), "failed to synchronize a CUDA stream"
+ );
+ return *this;
}
- cudaEvent_t operator () (unsigned int flag) const {
- cudaEvent_t event;
+ /**
+ @brief begins graph capturing on the stream
+
+ When a stream is in capture mode, all operations pushed into the stream
+ will not be executed, but will instead be captured into a graph,
+ which will be returned via cudaStream::end_capture.
+
+ A thread's mode can be one of the following:
+ + @c cudaStreamCaptureModeGlobal: This is the default mode.
+ If the local thread has an ongoing capture sequence that was not initiated
+ with @c cudaStreamCaptureModeRelaxed at @c cuStreamBeginCapture,
+ or if any other thread has a concurrent capture sequence initiated with
+ @c cudaStreamCaptureModeGlobal, this thread is prohibited from potentially
+ unsafe API calls.
+
+ + @c cudaStreamCaptureModeThreadLocal: If the local thread has an ongoing capture
+ sequence not initiated with @c cudaStreamCaptureModeRelaxed,
+ it is prohibited from potentially unsafe API calls.
+ Concurrent capture sequences in other threads are ignored.
+
+ + @c cudaStreamCaptureModeRelaxed: The local thread is not prohibited
+ from potentially unsafe API calls. Note that the thread is still prohibited
+ from API calls which necessarily conflict with stream capture, for example,
+ attempting @c cudaEventQuery on an event that was last recorded
+ inside a capture sequence.
+ */
+ void begin_capture(cudaStreamCaptureMode m = cudaStreamCaptureModeGlobal) const {
TF_CHECK_CUDA(
- cudaEventCreateWithFlags(&event, flag),
- "failed to create a CUDA event with flag=", flag
+ cudaStreamBeginCapture(this->get(), m),
+ "failed to begin capture on stream ", this->get(), " with thread mode ", m
);
- return event;
}
-};
-/**
-@private
-*/
-struct cudaEventDeleter {
- void operator () (cudaEvent_t event) const {
- if (event != nullptr) {
- cudaEventDestroy(event);
- }
+ /**
+ @brief ends graph capturing on the stream
+
+ Equivalently calling @c cudaStreamEndCapture to
+ end capture on stream and returning the captured graph.
+ Capture must have been initiated on stream via a call to cudaStream::begin_capture.
+ If capture was invalidated, due to a violation of the rules of stream capture,
+ then a NULL graph will be returned.
+ */
+ cudaGraph_t end_capture() const {
+ cudaGraph_t native_g;
+ TF_CHECK_CUDA(
+ cudaStreamEndCapture(this->get(), &native_g),
+ "failed to end capture on stream ", this->get()
+ );
+ return native_g;
}
-};
+
+ /**
+ @brief records an event on the stream
-/**
-@class cudaEvent
+ Equivalently calling @c cudaEventRecord to record an event on this stream,
+ both of which must be on the same CUDA context.
+ */
+ void record(cudaEvent_t event) const {
+ TF_CHECK_CUDA(
+ cudaEventRecord(event, this->get()),
+ "failed to record event ", event, " on stream ", this->get()
+ );
+ }
-@brief class to create an RAII-styled wrapper over a native CUDA event
+ /**
+ @brief waits on an event
-A cudaEvent object is an RAII-styled wrapper over a native CUDA event
-(@c cudaEvent_t).
-A cudaEvent object is move-only.
-*/
-class cudaEvent :
- public cudaObject<cudaEvent_t, cudaEventCreator, cudaEventDeleter> {
+ Equivalently calling @c cudaStreamWaitEvent to make all future work
+ submitted to stream wait for all work captured in event.
+ */
+ void wait(cudaEvent_t event) const {
+ TF_CHECK_CUDA(
+ cudaStreamWaitEvent(this->get(), event, 0),
+ "failed to wait for event ", event, " on stream ", this->get()
+ );
+ }
- public:
+ /**
+ @brief runs the given executable CUDA graph
- /**
- @brief constructs an RAII-styled CUDA event object from the given CUDA event
- */
- explicit cudaEvent(cudaEvent_t event) : cudaObject(event) { }
+ @param exec the given `cudaGraphExec`
+ */
+ template <typename C, typename D>
+ cudaStreamBase& run(const cudaGraphExecBase<C, D>& exec);
- /**
- @brief constructs an RAII-styled CUDA event object
- */
- cudaEvent() = default;
-
- /**
- @brief constructs an RAII-styled CUDA event object with the given flag
- */
- explicit cudaEvent(unsigned int flag) : cudaObject(cudaEventCreator{}(flag)) { }
+ /**
+ @brief runs the given executable CUDA graph
+
+ @param exec the given `cudaGraphExec_t`
+ */
+ cudaStreamBase& run(cudaGraphExec_t exec);
+
+ private:
+
+ cudaStreamBase(const cudaStreamBase&) = delete;
+ cudaStreamBase& operator = (const cudaStreamBase&) = delete;
};
+/**
+@brief default smart pointer type to manage a `cudaStream_t` object with unique ownership
+*/
+using cudaStream = cudaStreamBase<cudaStreamCreator, cudaStreamDeleter>;
} // end of namespace tf -----------------------------------------------------
#pragma once
#include "../taskflow.hpp"
-#include "cuda_task.hpp"
-#include "cuda_capturer.hpp"
+#include "cuda_graph.hpp"
+#include "cuda_graph_exec.hpp"
+#include "algorithm/single_task.hpp"
/**
@file taskflow/cuda/cudaflow.hpp
namespace tf {
-// ----------------------------------------------------------------------------
-// class definition: cudaFlow
-// ----------------------------------------------------------------------------
-
/**
-@class cudaFlow
-
-@brief class to create a %cudaFlow task dependency graph
-
-A %cudaFlow is a high-level interface over CUDA Graph to perform GPU operations
-using the task dependency graph model.
-The class provides a set of methods for creating and launch different tasks
-on one or multiple CUDA devices,
-for instance, kernel tasks, data transfer tasks, and memory operation tasks.
-The following example creates a %cudaFlow of two kernel tasks, @c task1 and
-@c task2, where @c task1 runs before @c task2.
-
-@code{.cpp}
-tf::Taskflow taskflow;
-tf::Executor executor;
-
-taskflow.emplace([&](tf::cudaFlow& cf){
- // create two kernel tasks
- tf::cudaTask task1 = cf.kernel(grid1, block1, shm_size1, kernel1, args1);
- tf::cudaTask task2 = cf.kernel(grid2, block2, shm_size2, kernel2, args2);
-
- // kernel1 runs before kernel2
- task1.precede(task2);
-});
-
-executor.run(taskflow).wait();
-@endcode
-
-A %cudaFlow is a task (tf::Task) created from tf::Taskflow
-and will be run by @em one worker thread in the executor.
-That is, the callable that describes a %cudaFlow
-will be executed sequentially.
-Inside a %cudaFlow task, different GPU tasks (tf::cudaTask) may run
-in parallel scheduled by the CUDA runtime.
-
-Please refer to @ref GPUTaskingcudaFlow for details.
+@brief default smart pointer type to manage a `cudaGraph_t` object with unique ownership
*/
-class cudaFlow {
-
- public:
-
- /**
- @brief constructs a %cudaFlow
- */
- cudaFlow();
-
- /**
- @brief destroys the %cudaFlow and its associated native CUDA graph
- and executable graph
- */
- ~cudaFlow() = default;
-
- /**
- @brief default move constructor
- */
- cudaFlow(cudaFlow&&) = default;
-
- /**
- @brief default move assignment operator
- */
- cudaFlow& operator = (cudaFlow&&) = default;
-
- /**
- @brief queries the emptiness of the graph
- */
- bool empty() const;
-
- /**
- @brief queries the number of tasks
- */
- size_t num_tasks() const;
-
- /**
- @brief clears the %cudaFlow object
- */
- void clear();
-
- /**
- @brief dumps the %cudaFlow graph into a DOT format through an
- output stream
- */
- void dump(std::ostream& os) const;
-
- /**
- @brief dumps the native CUDA graph into a DOT format through an
- output stream
-
- The native CUDA graph may be different from the upper-level %cudaFlow
- graph when flow capture is involved.
- */
- void dump_native_graph(std::ostream& os) const;
-
- // ------------------------------------------------------------------------
- // Graph building routines
- // ------------------------------------------------------------------------
-
- /**
- @brief creates a no-operation task
-
- @return a tf::cudaTask handle
-
- An empty node performs no operation during execution,
- but can be used for transitive ordering.
- For example, a phased execution graph with 2 groups of @c n nodes
- with a barrier between them can be represented using an empty node
- and @c 2*n dependency edges,
- rather than no empty node and @c n^2 dependency edges.
- */
- cudaTask noop();
-
- /**
- @brief creates a host task that runs a callable on the host
-
- @tparam C callable type
-
- @param callable a callable object with neither arguments nor return
- (i.e., constructible from @c std::function<void()>)
-
- @return a tf::cudaTask handle
-
- A host task can only execute CPU-specific functions and cannot do any CUDA calls
- (e.g., @c cudaMalloc).
- */
- template <typename C>
- cudaTask host(C&& callable);
-
- /**
- @brief updates parameters of a host task
-
- The method is similar to tf::cudaFlow::host but operates on a task
- of type tf::cudaTaskType::HOST.
- */
- template <typename C>
- void host(cudaTask task, C&& callable);
-
- /**
- @brief creates a kernel task
-
- @tparam F kernel function type
- @tparam ArgsT kernel function parameters type
-
- @param g configured grid
- @param b configured block
- @param s configured shared memory size in bytes
- @param f kernel function
- @param args arguments to forward to the kernel function by copy
-
- @return a tf::cudaTask handle
- */
- template <typename F, typename... ArgsT>
- cudaTask kernel(dim3 g, dim3 b, size_t s, F f, ArgsT... args);
-
- /**
- @brief updates parameters of a kernel task
-
- The method is similar to tf::cudaFlow::kernel but operates on a task
- of type tf::cudaTaskType::KERNEL.
- The kernel function name must NOT change.
- */
- template <typename F, typename... ArgsT>
- void kernel(
- cudaTask task, dim3 g, dim3 b, size_t shm, F f, ArgsT... args
- );
-
- /**
- @brief creates a memset task that fills untyped data with a byte value
-
- @param dst pointer to the destination device memory area
- @param v value to set for each byte of specified memory
- @param count size in bytes to set
-
- @return a tf::cudaTask handle
-
- A memset task fills the first @c count bytes of device memory area
- pointed by @c dst with the byte value @c v.
- */
- cudaTask memset(void* dst, int v, size_t count);
-
- /**
- @brief updates parameters of a memset task
-
- The method is similar to tf::cudaFlow::memset but operates on a task
- of type tf::cudaTaskType::MEMSET.
- The source/destination memory may have different address values but
- must be allocated from the same contexts as the original
- source/destination memory.
- */
- void memset(cudaTask task, void* dst, int ch, size_t count);
-
- /**
- @brief creates a memcpy task that copies untyped data in bytes
-
- @param tgt pointer to the target memory block
- @param src pointer to the source memory block
- @param bytes bytes to copy
-
- @return a tf::cudaTask handle
-
- A memcpy task transfers @c bytes of data from a source location
- to a target location. Direction can be arbitrary among CPUs and GPUs.
- */
- cudaTask memcpy(void* tgt, const void* src, size_t bytes);
-
- /**
- @brief updates parameters of a memcpy task
-
- The method is similar to tf::cudaFlow::memcpy but operates on a task
- of type tf::cudaTaskType::MEMCPY.
- The source/destination memory may have different address values but
- must be allocated from the same contexts as the original
- source/destination memory.
- */
- void memcpy(cudaTask task, void* tgt, const void* src, size_t bytes);
-
- /**
- @brief creates a memset task that sets a typed memory block to zero
-
- @tparam T element type (size of @c T must be either 1, 2, or 4)
- @param dst pointer to the destination device memory area
- @param count number of elements
-
- @return a tf::cudaTask handle
-
- A zero task zeroes the first @c count elements of type @c T
- in a device memory area pointed by @c dst.
- */
- template <typename T, std::enable_if_t<
- is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr
- >
- cudaTask zero(T* dst, size_t count);
-
- /**
- @brief updates parameters of a memset task to a zero task
-
- The method is similar to tf::cudaFlow::zero but operates on
- a task of type tf::cudaTaskType::MEMSET.
-
- The source/destination memory may have different address values but
- must be allocated from the same contexts as the original
- source/destination memory.
- */
- template <typename T, std::enable_if_t<
- is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr
- >
- void zero(cudaTask task, T* dst, size_t count);
-
- /**
- @brief creates a memset task that fills a typed memory block with a value
-
- @tparam T element type (size of @c T must be either 1, 2, or 4)
-
- @param dst pointer to the destination device memory area
- @param value value to fill for each element of type @c T
- @param count number of elements
-
- @return a tf::cudaTask handle
-
- A fill task fills the first @c count elements of type @c T with @c value
- in a device memory area pointed by @c dst.
- The value to fill is interpreted in type @c T rather than byte.
- */
- template <typename T, std::enable_if_t<
- is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr
- >
- cudaTask fill(T* dst, T value, size_t count);
-
- /**
- @brief updates parameters of a memset task to a fill task
-
- The method is similar to tf::cudaFlow::fill but operates on a task
- of type tf::cudaTaskType::MEMSET.
-
- The source/destination memory may have different address values but
- must be allocated from the same contexts as the original
- source/destination memory.
- */
- template <typename T, std::enable_if_t<
- is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>* = nullptr
- >
- void fill(cudaTask task, T* dst, T value, size_t count);
-
- /**
- @brief creates a memcopy task that copies typed data
-
- @tparam T element type (non-void)
-
- @param tgt pointer to the target memory block
- @param src pointer to the source memory block
- @param num number of elements to copy
-
- @return a tf::cudaTask handle
-
- A copy task transfers <tt>num*sizeof(T)</tt> bytes of data from a source location
- to a target location. Direction can be arbitrary among CPUs and GPUs.
- */
- template <typename T,
- std::enable_if_t<!std::is_same_v<T, void>, void>* = nullptr
- >
- cudaTask copy(T* tgt, const T* src, size_t num);
-
- /**
- @brief updates parameters of a memcpy task to a copy task
-
- The method is similar to tf::cudaFlow::copy but operates on a task
- of type tf::cudaTaskType::MEMCPY.
- The source/destination memory may have different address values but
- must be allocated from the same contexts as the original
- source/destination memory.
- */
- template <typename T,
- std::enable_if_t<!std::is_same_v<T, void>, void>* = nullptr
- >
- void copy(cudaTask task, T* tgt, const T* src, size_t num);
-
- // ------------------------------------------------------------------------
- // run method
- // ------------------------------------------------------------------------
- /**
- @brief offloads the %cudaFlow onto a GPU asynchronously via a stream
-
- @param stream stream for performing this operation
-
- Offloads the present %cudaFlow onto a GPU asynchronously via
- the given stream.
-
- An offloaded %cudaFlow forces the underlying graph to be instantiated.
- After the instantiation, you should not modify the graph topology
- but update node parameters.
- */
- void run(cudaStream_t stream);
-
- /**
- @brief acquires a reference to the underlying CUDA graph
- */
- cudaGraph_t native_graph();
-
- /**
- @brief acquires a reference to the underlying CUDA graph executable
- */
- cudaGraphExec_t native_executable();
-
- // ------------------------------------------------------------------------
- // generic algorithms
- // ------------------------------------------------------------------------
-
- /**
- @brief runs a callable with only a single kernel thread
-
- @tparam C callable type
-
- @param c callable to run by a single kernel thread
-
- @return a tf::cudaTask handle
- */
- template <typename C>
- cudaTask single_task(C c);
-
- /**
- @brief updates a single-threaded kernel task
-
- This method is similar to cudaFlow::single_task but operates
- on an existing task.
- */
- template <typename C>
- void single_task(cudaTask task, C c);
-
- /**
- @brief applies a callable to each dereferenced element of the data array
-
- @tparam I iterator type
- @tparam C callable type
-
- @param first iterator to the beginning (inclusive)
- @param last iterator to the end (exclusive)
- @param callable a callable object to apply to the dereferenced iterator
-
- @return a tf::cudaTask handle
-
- This method is equivalent to the parallel execution of the following loop on a GPU:
-
- @code{.cpp}
- for(auto itr = first; itr != last; itr++) {
- callable(*itr);
- }
- @endcode
- */
- template <typename I, typename C>
- cudaTask for_each(I first, I last, C callable);
-
- /**
- @brief updates parameters of a kernel task created from
- tf::cudaFlow::for_each
-
- The type of the iterators and the callable must be the same as
- the task created from tf::cudaFlow::for_each.
- */
- template <typename I, typename C>
- void for_each(cudaTask task, I first, I last, C callable);
-
- /**
- @brief applies a callable to each index in the range with the step size
-
- @tparam I index type
- @tparam C callable type
-
- @param first beginning index
- @param last last index
- @param step step size
- @param callable the callable to apply to each element in the data array
-
- @return a tf::cudaTask handle
-
- This method is equivalent to the parallel execution of the following loop on a GPU:
-
- @code{.cpp}
- // step is positive [first, last)
- for(auto i=first; i<last; i+=step) {
- callable(i);
- }
-
- // step is negative [first, last)
- for(auto i=first; i>last; i+=step) {
- callable(i);
- }
- @endcode
- */
- template <typename I, typename C>
- cudaTask for_each_index(I first, I last, I step, C callable);
-
- /**
- @brief updates parameters of a kernel task created from
- tf::cudaFlow::for_each_index
-
- The type of the iterators and the callable must be the same as
- the task created from tf::cudaFlow::for_each_index.
- */
- template <typename I, typename C>
- void for_each_index(
- cudaTask task, I first, I last, I step, C callable
- );
-
- /**
- @brief applies a callable to a source range and stores the result in a target range
-
- @tparam I input iterator type
- @tparam O output iterator type
- @tparam C unary operator type
-
- @param first iterator to the beginning of the input range
- @param last iterator to the end of the input range
- @param output iterator to the beginning of the output range
- @param op the operator to apply to transform each element in the range
-
- @return a tf::cudaTask handle
-
- This method is equivalent to the parallel execution of the following loop on a GPU:
-
- @code{.cpp}
- while (first != last) {
- *output++ = callable(*first++);
- }
- @endcode
- */
- template <typename I, typename O, typename C>
- cudaTask transform(I first, I last, O output, C op);
-
- /**
- @brief updates parameters of a kernel task created from
- tf::cudaFlow::transform
-
- The type of the iterators and the callable must be the same as
- the task created from tf::cudaFlow::for_each.
- */
- template <typename I, typename O, typename C>
- void transform(cudaTask task, I first, I last, O output, C c);
-
- /**
- @brief creates a task to perform parallel transforms over two ranges of items
-
- @tparam I1 first input iterator type
- @tparam I2 second input iterator type
- @tparam O output iterator type
- @tparam C unary operator type
-
- @param first1 iterator to the beginning of the input range
- @param last1 iterator to the end of the input range
- @param first2 iterato
- @param output iterator to the beginning of the output range
- @param op binary operator to apply to transform each pair of items in the
- two input ranges
-
- @return cudaTask handle
-
- This method is equivalent to the parallel execution of the following loop on a GPU:
-
- @code{.cpp}
- while (first1 != last1) {
- *output++ = op(*first1++, *first2++);
- }
- @endcode
- */
- template <typename I1, typename I2, typename O, typename C>
- cudaTask transform(I1 first1, I1 last1, I2 first2, O output, C op);
-
- /**
- @brief updates parameters of a kernel task created from
- tf::cudaFlow::transform
+using cudaGraph = cudaGraphBase<cudaGraphCreator, cudaGraphDeleter>;
- The type of the iterators and the callable must be the same as
- the task created from tf::cudaFlow::for_each.
- */
- template <typename I1, typename I2, typename O, typename C>
- void transform(
- cudaTask task, I1 first1, I1 last1, I2 first2, O output, C c
- );
-
- // ------------------------------------------------------------------------
- // subflow
- // ------------------------------------------------------------------------
-
- /**
- @brief constructs a subflow graph through tf::cudaFlowCapturer
-
- @tparam C callable type constructible from
- @c std::function<void(tf::cudaFlowCapturer&)>
-
- @param callable the callable to construct a capture flow
-
- @return a tf::cudaTask handle
-
- A captured subflow forms a sub-graph to the %cudaFlow and can be used to
- capture custom (or third-party) kernels that cannot be directly constructed
- from the %cudaFlow.
-
- Example usage:
-
- @code{.cpp}
- taskflow.emplace([&](tf::cudaFlow& cf){
-
- tf::cudaTask my_kernel = cf.kernel(my_arguments);
-
- // create a flow capturer to capture custom kernels
- tf::cudaTask my_subflow = cf.capture([&](tf::cudaFlowCapturer& capturer){
- capturer.on([&](cudaStream_t stream){
- invoke_custom_kernel_with_stream(stream, custom_arguments);
- });
- });
-
- my_kernel.precede(my_subflow);
- });
- @endcode
- */
- template <typename C>
- cudaTask capture(C&& callable);
-
- /**
- @brief updates the captured child graph
-
- The method is similar to tf::cudaFlow::capture but operates on a task
- of type tf::cudaTaskType::SUBFLOW.
- The new captured graph must be topologically identical to the original
- captured graph.
- */
- template <typename C>
- void capture(cudaTask task, C callable);
-
- private:
-
- cudaFlowGraph _cfg;
- cudaGraphExec _exe {nullptr};
-};
-
-// Construct a standalone cudaFlow
-inline cudaFlow::cudaFlow() {
- _cfg._native_handle.create();
-}
-
-// Procedure: clear
-inline void cudaFlow::clear() {
- _exe.clear();
- _cfg.clear();
- _cfg._native_handle.create();
-}
-
-// Function: empty
-inline bool cudaFlow::empty() const {
- return _cfg._nodes.empty();
-}
-
-// Function: num_tasks
-inline size_t cudaFlow::num_tasks() const {
- return _cfg._nodes.size();
-}
-
-// Procedure: dump
-inline void cudaFlow::dump(std::ostream& os) const {
- _cfg.dump(os, nullptr, "");
-}
-
-// Procedure: dump
-inline void cudaFlow::dump_native_graph(std::ostream& os) const {
- cuda_dump_graph(os, _cfg._native_handle);
-}
-
-// ----------------------------------------------------------------------------
-// Graph building methods
-// ----------------------------------------------------------------------------
-
-// Function: noop
-inline cudaTask cudaFlow::noop() {
-
- auto node = _cfg.emplace_back(
- _cfg, std::in_place_type_t<cudaFlowNode::Empty>{}
- );
-
- TF_CHECK_CUDA(
- cudaGraphAddEmptyNode(
- &node->_native_handle, _cfg._native_handle, nullptr, 0
- ),
- "failed to create a no-operation (empty) node"
- );
-
- return cudaTask(node);
-}
-
-// Function: host
-template <typename C>
-cudaTask cudaFlow::host(C&& c) {
-
- auto node = _cfg.emplace_back(
- _cfg, std::in_place_type_t<cudaFlowNode::Host>{}, std::forward<C>(c)
- );
-
- auto h = std::get_if<cudaFlowNode::Host>(&node->_handle);
-
- cudaHostNodeParams p;
- p.fn = cudaFlowNode::Host::callback;
- p.userData = h;
-
- TF_CHECK_CUDA(
- cudaGraphAddHostNode(
- &node->_native_handle, _cfg._native_handle, nullptr, 0, &p
- ),
- "failed to create a host node"
- );
-
- return cudaTask(node);
-}
-
-// Function: kernel
-template <typename F, typename... ArgsT>
-cudaTask cudaFlow::kernel(
- dim3 g, dim3 b, size_t s, F f, ArgsT... args
-) {
-
- auto node = _cfg.emplace_back(
- _cfg, std::in_place_type_t<cudaFlowNode::Kernel>{}, (void*)f
- );
-
- cudaKernelNodeParams p;
- void* arguments[sizeof...(ArgsT)] = { (void*)(&args)... };
- p.func = (void*)f;
- p.gridDim = g;
- p.blockDim = b;
- p.sharedMemBytes = s;
- p.kernelParams = arguments;
- p.extra = nullptr;
-
- TF_CHECK_CUDA(
- cudaGraphAddKernelNode(
- &node->_native_handle, _cfg._native_handle, nullptr, 0, &p
- ),
- "failed to create a kernel task"
- );
-
- return cudaTask(node);
-}
-
-// Function: zero
-template <typename T, std::enable_if_t<
- is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>*
->
-cudaTask cudaFlow::zero(T* dst, size_t count) {
-
- auto node = _cfg.emplace_back(
- _cfg, std::in_place_type_t<cudaFlowNode::Memset>{}
- );
-
- auto p = cuda_get_zero_parms(dst, count);
-
- TF_CHECK_CUDA(
- cudaGraphAddMemsetNode(
- &node->_native_handle, _cfg._native_handle, nullptr, 0, &p
- ),
- "failed to create a memset (zero) task"
- );
-
- return cudaTask(node);
-}
-
-// Function: fill
-template <typename T, std::enable_if_t<
- is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>*
->
-cudaTask cudaFlow::fill(T* dst, T value, size_t count) {
-
- auto node = _cfg.emplace_back(
- _cfg, std::in_place_type_t<cudaFlowNode::Memset>{}
- );
-
- auto p = cuda_get_fill_parms(dst, value, count);
-
- TF_CHECK_CUDA(
- cudaGraphAddMemsetNode(
- &node->_native_handle, _cfg._native_handle, nullptr, 0, &p
- ),
- "failed to create a memset (fill) task"
- );
-
- return cudaTask(node);
-}
-
-// Function: copy
-template <
- typename T,
- std::enable_if_t<!std::is_same_v<T, void>, void>*
->
-cudaTask cudaFlow::copy(T* tgt, const T* src, size_t num) {
-
- auto node = _cfg.emplace_back(
- _cfg, std::in_place_type_t<cudaFlowNode::Memcpy>{}
- );
-
- auto p = cuda_get_copy_parms(tgt, src, num);
-
- TF_CHECK_CUDA(
- cudaGraphAddMemcpyNode(
- &node->_native_handle, _cfg._native_handle, nullptr, 0, &p
- ),
- "failed to create a memcpy (copy) task"
- );
-
- return cudaTask(node);
-}
-
-// Function: memset
-inline cudaTask cudaFlow::memset(void* dst, int ch, size_t count) {
-
- auto node = _cfg.emplace_back(
- _cfg, std::in_place_type_t<cudaFlowNode::Memset>{}
- );
-
- auto p = cuda_get_memset_parms(dst, ch, count);
-
- TF_CHECK_CUDA(
- cudaGraphAddMemsetNode(
- &node->_native_handle, _cfg._native_handle, nullptr, 0, &p
- ),
- "failed to create a memset task"
- );
-
- return cudaTask(node);
-}
-
-// Function: memcpy
-inline cudaTask cudaFlow::memcpy(void* tgt, const void* src, size_t bytes) {
-
- auto node = _cfg.emplace_back(
- _cfg, std::in_place_type_t<cudaFlowNode::Memcpy>{}
- );
-
- auto p = cuda_get_memcpy_parms(tgt, src, bytes);
-
- TF_CHECK_CUDA(
- cudaGraphAddMemcpyNode(
- &node->_native_handle, _cfg._native_handle, nullptr, 0, &p
- ),
- "failed to create a memcpy task"
- );
-
- return cudaTask(node);
-}
-
-// ------------------------------------------------------------------------
-// update methods
-// ------------------------------------------------------------------------
-
-// Function: host
-template <typename C>
-void cudaFlow::host(cudaTask task, C&& c) {
-
- if(task.type() != cudaTaskType::HOST) {
- TF_THROW(task, " is not a host task");
- }
-
- auto h = std::get_if<cudaFlowNode::Host>(&task._node->_handle);
-
- h->func = std::forward<C>(c);
-}
-
-// Function: update kernel parameters
-template <typename F, typename... ArgsT>
-void cudaFlow::kernel(
- cudaTask task, dim3 g, dim3 b, size_t s, F f, ArgsT... args
-) {
-
- if(task.type() != cudaTaskType::KERNEL) {
- TF_THROW(task, " is not a kernel task");
- }
-
- cudaKernelNodeParams p;
-
- void* arguments[sizeof...(ArgsT)] = { (void*)(&args)... };
- p.func = (void*)f;
- p.gridDim = g;
- p.blockDim = b;
- p.sharedMemBytes = s;
- p.kernelParams = arguments;
- p.extra = nullptr;
-
- TF_CHECK_CUDA(
- cudaGraphExecKernelNodeSetParams(_exe, task._node->_native_handle, &p),
- "failed to update kernel parameters on ", task
- );
-}
-
-// Function: update copy parameters
-template <typename T, std::enable_if_t<!std::is_same_v<T, void>, void>*>
-void cudaFlow::copy(cudaTask task, T* tgt, const T* src, size_t num) {
-
- if(task.type() != cudaTaskType::MEMCPY) {
- TF_THROW(task, " is not a memcpy task");
- }
-
- auto p = cuda_get_copy_parms(tgt, src, num);
-
- TF_CHECK_CUDA(
- cudaGraphExecMemcpyNodeSetParams(_exe, task._node->_native_handle, &p),
- "failed to update memcpy parameters on ", task
- );
-}
-
-// Function: update memcpy parameters
-inline void cudaFlow::memcpy(
- cudaTask task, void* tgt, const void* src, size_t bytes
-) {
-
- if(task.type() != cudaTaskType::MEMCPY) {
- TF_THROW(task, " is not a memcpy task");
- }
-
- auto p = cuda_get_memcpy_parms(tgt, src, bytes);
-
- TF_CHECK_CUDA(
- cudaGraphExecMemcpyNodeSetParams(_exe, task._node->_native_handle, &p),
- "failed to update memcpy parameters on ", task
- );
-}
-
-// Procedure: memset
-inline void cudaFlow::memset(cudaTask task, void* dst, int ch, size_t count) {
-
- if(task.type() != cudaTaskType::MEMSET) {
- TF_THROW(task, " is not a memset task");
- }
-
- auto p = cuda_get_memset_parms(dst, ch, count);
-
- TF_CHECK_CUDA(
- cudaGraphExecMemsetNodeSetParams(_exe, task._node->_native_handle, &p),
- "failed to update memset parameters on ", task
- );
-}
-
-// Procedure: fill
-template <typename T, std::enable_if_t<
- is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>*
->
-void cudaFlow::fill(cudaTask task, T* dst, T value, size_t count) {
-
- if(task.type() != cudaTaskType::MEMSET) {
- TF_THROW(task, " is not a memset task");
- }
-
- auto p = cuda_get_fill_parms(dst, value, count);
-
- TF_CHECK_CUDA(
- cudaGraphExecMemsetNodeSetParams(_exe, task._node->_native_handle, &p),
- "failed to update memset parameters on ", task
- );
-}
-
-// Procedure: zero
-template <typename T, std::enable_if_t<
- is_pod_v<T> && (sizeof(T)==1 || sizeof(T)==2 || sizeof(T)==4), void>*
->
-void cudaFlow::zero(cudaTask task, T* dst, size_t count) {
-
- if(task.type() != cudaTaskType::MEMSET) {
- TF_THROW(task, " is not a memset task");
- }
-
- auto p = cuda_get_zero_parms(dst, count);
-
- TF_CHECK_CUDA(
- cudaGraphExecMemsetNodeSetParams(_exe, task._node->_native_handle, &p),
- "failed to update memset parameters on ", task
- );
-}
-
-// Function: capture
-template <typename C>
-void cudaFlow::capture(cudaTask task, C c) {
-
- if(task.type() != cudaTaskType::SUBFLOW) {
- TF_THROW(task, " is not a subflow task");
- }
-
- // insert a subflow node
- // construct a captured flow from the callable
- auto node_handle = std::get_if<cudaFlowNode::Subflow>(&task._node->_handle);
- //node_handle->graph.clear();
-
- cudaFlowCapturer capturer;
- c(capturer);
-
- // obtain the optimized captured graph
- capturer._cfg._native_handle.reset(capturer.capture());
- node_handle->cfg = std::move(capturer._cfg);
-
- TF_CHECK_CUDA(
- cudaGraphExecChildGraphNodeSetParams(
- _exe,
- task._node->_native_handle,
- node_handle->cfg._native_handle
- ),
- "failed to update a captured child graph"
- );
-}
-
-// ----------------------------------------------------------------------------
-// captured flow
-// ----------------------------------------------------------------------------
-
-// Function: capture
-template <typename C>
-cudaTask cudaFlow::capture(C&& c) {
-
- // insert a subflow node
- auto node = _cfg.emplace_back(
- _cfg, std::in_place_type_t<cudaFlowNode::Subflow>{}
- );
-
- // construct a captured flow from the callable
- auto node_handle = std::get_if<cudaFlowNode::Subflow>(&node->_handle);
-
- // perform capturing
- cudaFlowCapturer capturer;
- c(capturer);
-
- // obtain the optimized captured graph
- capturer._cfg._native_handle.reset(capturer.capture());
-
- // move capturer's cudaFlow graph into node
- node_handle->cfg = std::move(capturer._cfg);
-
- TF_CHECK_CUDA(
- cudaGraphAddChildGraphNode(
- &node->_native_handle,
- _cfg._native_handle,
- nullptr,
- 0,
- node_handle->cfg._native_handle
- ),
- "failed to add a cudaFlow capturer task"
- );
-
- return cudaTask(node);
-}
-
-// ----------------------------------------------------------------------------
-// run method
-// ----------------------------------------------------------------------------
-
-// Procedure: run
-inline void cudaFlow::run(cudaStream_t stream) {
- if(!_exe) {
- _exe.instantiate(_cfg._native_handle);
- }
- _exe.launch(stream);
- _cfg._state = cudaFlowGraph::OFFLOADED;
-}
-
-// Function: native_cfg
-inline cudaGraph_t cudaFlow::native_graph() {
- return _cfg._native_handle;
-}
-
-// Function: native_executable
-inline cudaGraphExec_t cudaFlow::native_executable() {
- return _exe;
-}
+/**
+@brief default smart pointer type to manage a `cudaGraphExec_t` object with unique ownership
+*/
+using cudaGraphExec = cudaGraphExecBase<cudaGraphExecCreator, cudaGraphExecDeleter>;
} // end of namespace tf -----------------------------------------------------
--- /dev/null
+#pragma once
+
+#include <memory>
+#include <string>
+
+namespace tf {
+
+class LazyString {
+
+ public:
+
+ LazyString() = default;
+
+ LazyString(const std::string& str) :
+ _str(str.empty() ? nullptr : std::make_unique<std::string>(str)) {
+ }
+
+ LazyString(std::string&& str) :
+ _str(str.empty() ? nullptr : std::make_unique<std::string>(std::move(str))) {
+ }
+
+ LazyString(const char* str) :
+ _str((!str || str[0] == '\0') ? nullptr : std::make_unique<std::string>(str)) {
+ }
+
+ // Modify the operator to return a const reference
+ operator const std::string& () const noexcept {
+ static const std::string empty_string;
+ return _str ? *_str : empty_string;
+ }
+
+ LazyString& operator = (const std::string& str) {
+ if(_str == nullptr) {
+ _str = std::make_unique<std::string>(str);
+ }
+ else {
+ *_str = str;
+ }
+ return *this;
+ }
+
+ LazyString& operator = (std::string&& str) {
+ if(_str == nullptr) {
+ _str = std::make_unique<std::string>(std::move(str));
+ }
+ else {
+ *_str = std::move(str);
+ }
+ return *this;
+ }
+
+ bool empty() const noexcept {
+ return !_str || _str->empty();
+ }
+
+ size_t size() const noexcept {
+ return _str ? _str->size() : 0;
+ }
+
+ friend std::ostream& operator<<(std::ostream& os, const LazyString& ls) {
+ os << (ls._str ? *ls._str : "");
+ return os;
+ }
+
+ private:
+
+ std::unique_ptr<std::string> _str;
+
+};
+
+
+
+} // end of namespace tf -------------------------------------------------------------------------
#pragma once
#include <atomic>
+#include <chrono>
namespace tf {
}
/**
- * @brief Computes the floor of log2 of the given positive integer.
+ * @brief computes the floor of the base-2 logarithm of a number using count-leading-zeros (CTL).
*
- * This function calculates the largest integer `log` such that `2^log <= n`.
- *
- * @tparam T The type of the input. Must be an integral type.
- * @param n The positive integer to compute log2 for. Assumes `n > 0`.
- * @return The floor of log2 of `n`.
- *
- * @attention This function is constexpr and can be evaluated at compile time.
+ * This function efficiently calculates the floor of `log2(n)` for both 32-bit and 64-bit integers.
*
+ * @tparam T integer type (uint32_t or uint64_t).
+ * @param n input number.
+ * @return floor of `log2(n)`
*/
-template<typename T>
-constexpr int log2(T n) {
- int log = 0;
+template <typename T>
+constexpr size_t floor_log2(T n) {
+
+ static_assert(std::is_unsigned_v<T>, "log2 only supports unsigned integer types");
+
+#if defined(_MSC_VER)
+ unsigned long index;
+ if constexpr (sizeof(T) == 8) {
+ _BitScanReverse64(&index, n);
+ } else {
+ _BitScanReverse(&index, static_cast<unsigned long>(n));
+ }
+ return static_cast<size_t>(index);
+#elif defined(__GNUC__) || defined(__clang__)
+ if constexpr (sizeof(T) == 8) {
+ return 63 - __builtin_clzll(n);
+ } else {
+ return 31 - __builtin_clz(n);
+ }
+#else
+ // Portable fallback: Uses bit shifts to count leading zeros manually
+ size_t log = 0;
while (n >>= 1) {
++log;
}
return log;
+#endif
+}
+
+/**
+@brief returns the floor of `log2(N)` at compile time
+*/
+template<size_t N>
+constexpr size_t static_floor_log2() {
+ return (N < 2) ? 0 : 1 + static_floor_log2<N / 2>();
+ //auto log = 0;
+ //while (N >>= 1) {
+ // ++log;
+ //}
+ //return log;
}
/**
return std::chrono::system_clock::now().time_since_epoch().count();
}
+/**
+ * @brief counts the number of trailing zeros in an integer.
+ *
+ * This function provides a portable implementation for counting the number of
+ * trailing zeros across different platforms and integer sizes (32-bit and 64-bit).
+ *
+ * @tparam T integer type (32-bit or 64-bit).
+ * @param x non-zero integer to count trailing zeros from
+ * @return the number of trailing zeros in @c x
+ *
+ * @attention
+ * The behavior is undefined when @c x is 0.
+ */
+template <typename T, typename = std::enable_if_t<std::is_unsigned_v<T>>>
+auto ctz(T x) {
+
+ #if defined(_MSC_VER)
+ unsigned long index;
+ if constexpr (sizeof(T) == 8) {
+ _BitScanForward64(&index, x);
+ } else {
+ _BitScanForward(&index, (unsigned long)x);
+ }
+ return index;
+ #elif defined(__GNUC__) || defined(__clang__)
+ if constexpr (sizeof(T) == 8) {
+ return __builtin_ctzll(x);
+ } else {
+ return __builtin_ctz(x);
+ }
+ #else
+ size_t r = 0;
+ while ((x & 1) == 0) {
+ x >>= 1;
+ r++;
+ }
+ return r;
+ #endif
+}
+
+// ------------------------------------------------------------------------------------------------
+// coprime
+// ------------------------------------------------------------------------------------------------
+
+/**
+ * @brief computes a coprime of a given number
+ *
+ * This function finds the largest number less than N that is coprime (i.e., has a greatest common divisor of 1) with @c N.
+ * If @c N is less than 3, it returns 1 as a default coprime.
+ *
+ * @param N input number for which a coprime is to be found.
+ * @return the largest number < @c N that is coprime to N
+ */
+constexpr size_t coprime(size_t N) {
+ if(N < 3) {
+ return 1;
+ }
+ for (size_t x = N; --x > 0;) {
+ if (std::gcd(x, N) == 1) {
+ return x;
+ }
+ }
+ return 1;
+}
+
+/**
+ * @brief generates a compile-time array of coprimes for numbers from 0 to N-1
+ *
+ * This function constructs a constexpr array where each element at index `i` contains a coprime of `i`
+ * (the largest number less than `i` that is coprime to it).
+ *
+ * @tparam N the size of the array to generate (should be greater than 0).
+ * @return a constexpr array of size @c N where each index holds a coprime of its value.
+ */
+template <size_t N>
+constexpr std::array<size_t, N> make_coprime_lut() {
+ static_assert(N>0, "N must be greater than 0");
+ std::array<size_t, N> coprimes{};
+ for (size_t n = 0; n < N; ++n) {
+ coprimes[n] = coprime(n);
+ }
+ return coprimes;
+}
+
+
//class XorShift64 {
//
// public:
#include <atomic>
#include <optional>
+#include "os.hpp"
+
namespace tf {
/**
alignas(2*TF_CACHELINE_SIZE) std::atomic<uint64_t> _dequeue_pos;
};
+// ------------------------------------------------------------------------------------------------
+// specialization for pointer type
+// ------------------------------------------------------------------------------------------------
+
+template<typename T, size_t LogSize>
+class MPMC <T*, LogSize> {
+
+ constexpr static uint64_t BufferSize = 1ull << LogSize;
+ constexpr static uint64_t BufferMask = (BufferSize - 1);
+
+ static_assert((BufferSize >= 2) && ((BufferSize & (BufferSize - 1)) == 0));
+
+public:
+
+ /**
+ * Constructs a bounded multi-producer, multi-consumer queue
+ *
+ * Note: Due to the algorithm used, buffer_size must be a power
+ * of two and must be greater than or equal to two.
+ *
+ * @param buffer_size Number of spaces available in the queue.
+ */
+ explicit MPMC() {
+ for (size_t i = 0; i < _buffer.size(); i++) {
+ _buffer[i].sequence.store(i, std::memory_order_relaxed);
+ }
+ _enqueue_pos.store(0, std::memory_order_relaxed);
+ _dequeue_pos.store(0, std::memory_order_relaxed);
+ }
+
+
+ /**
+ * Enqueues an item into the queue
+ *
+ * @param data Argument to place into the array
+ * @return false if the queue was full (and enqueing failed),
+ * true otherwise
+ */
+ bool try_enqueue(T* data) {
+ Cell *cell;
+ auto pos = _enqueue_pos.load(std::memory_order_relaxed);
+ for (; ;) {
+ cell = &_buffer[pos & BufferMask];
+ auto seq = cell->sequence.load(std::memory_order_acquire);
+ if (seq == pos) {
+ if (_enqueue_pos.compare_exchange_weak(pos, pos + 1,
+ std::memory_order_relaxed)) {
+ break;
+ }
+ } else if (seq < pos) {
+ return false;
+ } else {
+ pos = _enqueue_pos.load(std::memory_order_relaxed);
+ }
+ }
+
+ cell->data = data;
+ cell->sequence.store(pos + 1, std::memory_order_release);
+
+ return true;
+ }
+
+ void enqueue(T* data) {
+
+ Cell *cell;
+ auto pos = _enqueue_pos.load(std::memory_order_relaxed);
+
+ for (; ;) {
+ cell = &_buffer[pos & BufferMask];
+ auto seq = cell->sequence.load(std::memory_order_acquire);
+ if (seq == pos) {
+ if (_enqueue_pos.compare_exchange_weak(pos, pos + 1,
+ std::memory_order_relaxed)) {
+ break;
+ }
+ }
+ else {
+ pos = _enqueue_pos.load(std::memory_order_relaxed);
+ }
+ }
+
+ cell->data = data;
+ cell->sequence.store(pos + 1, std::memory_order_release);
+ }
+
+ /**
+ * Dequeues an item from the queue
+ *
+ * @param[out] data Reference to place item into
+ * @return false if the queue was empty (and dequeuing failed),
+ * true if successful
+ */
+ T* try_dequeue() {
+ Cell *cell;
+ auto pos = _dequeue_pos.load(std::memory_order_relaxed);
+ for (; ;) {
+ cell = &_buffer[pos & BufferMask];
+ auto seq = cell->sequence.load(std::memory_order_acquire);
+ if (seq == pos + 1) {
+ if (_dequeue_pos.compare_exchange_weak(pos, pos + 1,
+ std::memory_order_relaxed)) {
+ break;
+ }
+ } else if (seq < (pos + 1)) {
+ return nullptr;
+ } else {
+ pos = _dequeue_pos.load(std::memory_order_relaxed);
+ }
+ }
+
+ auto data = cell->data;
+ cell->sequence.store(pos + BufferMask + 1, std::memory_order_release);
+
+ return data;
+ }
+
+ bool empty() const {
+ auto beg = _dequeue_pos.load(std::memory_order_relaxed);
+ auto end = _enqueue_pos.load(std::memory_order_relaxed);
+ return beg >= end;
+ }
+
+ size_t capacity() const {
+ return BufferSize;
+ }
+
+private:
+
+ struct Cell {
+ T* data;
+ std::atomic<uint64_t> sequence;
+ };
+
+ //static const size_t cacheline_size = 64;
+
+ alignas(2*TF_CACHELINE_SIZE) std::array<Cell, BufferSize> _buffer;
+ alignas(2*TF_CACHELINE_SIZE) std::atomic<uint64_t> _enqueue_pos;
+ alignas(2*TF_CACHELINE_SIZE) std::atomic<uint64_t> _dequeue_pos;
+};
+
+/**
+ * RunQueue is a fixed-size, partially non-blocking deque or Work items.
+ * Operations on front of the queue must be done by a single thread (owner),
+ * operations on back of the queue can be done by multiple threads concurrently.
+ *
+ * Algorithm outline:
+ * All remote threads operating on the queue back are serialized by a mutex.
+ * This ensures that at most two threads access state: owner and one remote
+ * thread (Size aside). The algorithm ensures that the occupied region of the
+ * underlying array is logically continuous (can wraparound, but no stray
+ * occupied elements). Owner operates on one end of this region, remote thread
+ * operates on the other end. Synchronization between these threads
+ * (potential consumption of the last element and take up of the last empty
+ * element) happens by means of state variable in each element. States are:
+ * empty, busy (in process of insertion of removal) and ready. Threads claim
+ * elements (empty->busy and ready->busy transitions) by means of a CAS
+ * operation. The finishing transition (busy->empty and busy->ready) are done
+ * with plain store as the element is exclusively owned by the current thread.
+ *
+ * Note: we could permit only pointers as elements, then we would not need
+ * separate state variable as null/non-null pointer value would serve as state,
+ * but that would require malloc/free per operation for large, complex values
+ * (and this is designed to store std::function<()>).
+template <typename Work, unsigned kSize>
+class RunQueue {
+ public:
+ RunQueue() : front_(0), back_(0) {
+ // require power-of-two for fast masking
+ eigen_plain_assert((kSize & (kSize - 1)) == 0);
+ eigen_plain_assert(kSize > 2); // why would you do this?
+ eigen_plain_assert(kSize <= (64 << 10)); // leave enough space for counter
+ for (unsigned i = 0; i < kSize; i++) array_[i].state.store(kEmpty, std::memory_order_relaxed);
+ }
+
+ ~RunQueue() { eigen_plain_assert(Size() == 0); }
+
+ // PushFront inserts w at the beginning of the queue.
+ // If queue is full returns w, otherwise returns default-constructed Work.
+ Work PushFront(Work w) {
+ unsigned front = front_.load(std::memory_order_relaxed);
+ Elem* e = &array_[front & kMask];
+ uint8_t s = e->state.load(std::memory_order_relaxed);
+ if (s != kEmpty || !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) return w;
+ front_.store(front + 1 + (kSize << 1), std::memory_order_relaxed);
+ e->w = std::move(w);
+ e->state.store(kReady, std::memory_order_release);
+ return Work();
+ }
+
+ // PopFront removes and returns the first element in the queue.
+ // If the queue was empty returns default-constructed Work.
+ Work PopFront() {
+ unsigned front = front_.load(std::memory_order_relaxed);
+ Elem* e = &array_[(front - 1) & kMask];
+ uint8_t s = e->state.load(std::memory_order_relaxed);
+ if (s != kReady || !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) return Work();
+ Work w = std::move(e->w);
+ e->state.store(kEmpty, std::memory_order_release);
+ front = ((front - 1) & kMask2) | (front & ~kMask2);
+ front_.store(front, std::memory_order_relaxed);
+ return w;
+ }
+
+ // PushBack adds w at the end of the queue.
+ // If queue is full returns w, otherwise returns default-constructed Work.
+ Work PushBack(Work w) {
+ EIGEN_MUTEX_LOCK lock(mutex_);
+ unsigned back = back_.load(std::memory_order_relaxed);
+ Elem* e = &array_[(back - 1) & kMask];
+ uint8_t s = e->state.load(std::memory_order_relaxed);
+ if (s != kEmpty || !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) return w;
+ back = ((back - 1) & kMask2) | (back & ~kMask2);
+ back_.store(back, std::memory_order_relaxed);
+ e->w = std::move(w);
+ e->state.store(kReady, std::memory_order_release);
+ return Work();
+ }
+
+ // PopBack removes and returns the last elements in the queue.
+ Work PopBack() {
+ if (Empty()) return Work();
+ EIGEN_MUTEX_LOCK lock(mutex_);
+ unsigned back = back_.load(std::memory_order_relaxed);
+ Elem* e = &array_[back & kMask];
+ uint8_t s = e->state.load(std::memory_order_relaxed);
+ if (s != kReady || !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) return Work();
+ Work w = std::move(e->w);
+ e->state.store(kEmpty, std::memory_order_release);
+ back_.store(back + 1 + (kSize << 1), std::memory_order_relaxed);
+ return w;
+ }
+
+ // PopBackHalf removes and returns half last elements in the queue.
+ // Returns number of elements removed.
+ unsigned PopBackHalf(std::vector<Work>* result) {
+ if (Empty()) return 0;
+ EIGEN_MUTEX_LOCK lock(mutex_);
+ unsigned back = back_.load(std::memory_order_relaxed);
+ unsigned size = Size();
+ unsigned mid = back;
+ if (size > 1) mid = back + (size - 1) / 2;
+ unsigned n = 0;
+ unsigned start = 0;
+ for (; static_cast<int>(mid - back) >= 0; mid--) {
+ Elem* e = &array_[mid & kMask];
+ uint8_t s = e->state.load(std::memory_order_relaxed);
+ if (n == 0) {
+ if (s != kReady || !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) continue;
+ start = mid;
+ } else {
+ // Note: no need to store temporal kBusy, we exclusively own these
+ // elements.
+ eigen_plain_assert(s == kReady);
+ }
+ result->push_back(std::move(e->w));
+ e->state.store(kEmpty, std::memory_order_release);
+ n++;
+ }
+ if (n != 0) back_.store(start + 1 + (kSize << 1), std::memory_order_relaxed);
+ return n;
+ }
+
+ // Size returns current queue size.
+ // Can be called by any thread at any time.
+ unsigned Size() const { return SizeOrNotEmpty<true>(); }
+
+ // Empty tests whether container is empty.
+ // Can be called by any thread at any time.
+ bool Empty() const { return SizeOrNotEmpty<false>() == 0; }
+
+ // Delete all the elements from the queue.
+ void Flush() {
+ while (!Empty()) {
+ PopFront();
+ }
+ }
+
+ private:
+ static const unsigned kMask = kSize - 1;
+ static const unsigned kMask2 = (kSize << 1) - 1;
+
+ enum State {
+ kEmpty,
+ kBusy,
+ kReady,
+ };
+
+ struct Elem {
+ std::atomic<uint8_t> state;
+ Work w;
+ };
+
+ // Low log(kSize) + 1 bits in front_ and back_ contain rolling index of
+ // front/back, respectively. The remaining bits contain modification counters
+ // that are incremented on Push operations. This allows us to (1) distinguish
+ // between empty and full conditions (if we would use log(kSize) bits for
+ // position, these conditions would be indistinguishable); (2) obtain
+ // consistent snapshot of front_/back_ for Size operation using the
+ // modification counters.
+ EIGEN_ALIGN_TO_AVOID_FALSE_SHARING std::atomic<unsigned> front_;
+ EIGEN_ALIGN_TO_AVOID_FALSE_SHARING std::atomic<unsigned> back_;
+ EIGEN_MUTEX mutex_; // guards `PushBack` and `PopBack` (accesses `back_`)
+
+ EIGEN_ALIGN_TO_AVOID_FALSE_SHARING Elem array_[kSize];
+
+ // SizeOrNotEmpty returns current queue size; if NeedSizeEstimate is false,
+ // only whether the size is 0 is guaranteed to be correct.
+ // Can be called by any thread at any time.
+ template <bool NeedSizeEstimate>
+ unsigned SizeOrNotEmpty() const {
+ // Emptiness plays critical role in thread pool blocking. So we go to great
+ // effort to not produce false positives (claim non-empty queue as empty).
+ unsigned front = front_.load(std::memory_order_acquire);
+ for (;;) {
+ // Capture a consistent snapshot of front/tail.
+ unsigned back = back_.load(std::memory_order_acquire);
+ unsigned front1 = front_.load(std::memory_order_relaxed);
+ if (front != front1) {
+ front = front1;
+ std::atomic_thread_fence(std::memory_order_acquire);
+ continue;
+ }
+ if (NeedSizeEstimate) {
+ return CalculateSize(front, back);
+ } else {
+ // This value will be 0 if the queue is empty, and undefined otherwise.
+ unsigned maybe_zero = ((front ^ back) & kMask2);
+ // Queue size estimate must agree with maybe zero check on the queue
+ // empty/non-empty state.
+ eigen_assert((CalculateSize(front, back) == 0) == (maybe_zero == 0));
+ return maybe_zero;
+ }
+ }
+ }
+
+ EIGEN_ALWAYS_INLINE unsigned CalculateSize(unsigned front, unsigned back) const {
+ int size = (front & kMask2) - (back & kMask2);
+ // Fix overflow.
+ if (EIGEN_PREDICT_FALSE(size < 0)) size += 2 * kSize;
+ // Order of modification in push/pop is crafted to make the queue look
+ // larger than it is during concurrent modifications. E.g. push can
+ // increment size before the corresponding pop has decremented it.
+ // So the computed size can be up to kSize + 1, fix it.
+ if (EIGEN_PREDICT_FALSE(size > static_cast<int>(kSize))) size = kSize;
+ return static_cast<unsigned>(size);
+ }
+
+ RunQueue(const RunQueue&) = delete;
+ void operator=(const RunQueue&) = delete;
+};
+*/
+
+
} // end of namespace tf -----------------------------------------------------
+
#include <cstdlib>
#include <cstdio>
#include <string>
+#include <thread>
#define TF_OS_LINUX 0
#define TF_OS_DRAGONFLY 0
#endif
}
+/**
+@brief pause CPU for a specified number of iterations
+*/
+inline void pause(size_t count) {
+ while(count-- > 0) pause();
+}
+
/**
* @brief spins until the given predicate becomes true
*
private:
template <typename, unsigned> friend struct SmallVectorStorage;
+ //template <typename X>
+ //struct AlignedUnionType {
+ // alignas(X) std::byte buff[std::max(sizeof(std::byte), sizeof(X))];
+ //};
+
template <typename X>
struct AlignedUnionType {
- alignas(X) std::byte buff[std::max(sizeof(std::byte), sizeof(X))];
+ static constexpr std::size_t max_size = (sizeof(std::byte) > sizeof(X)) ? sizeof(std::byte) : sizeof(X);
+ alignas(X) std::byte buff[max_size];
};
// Allocate raw space for N elements of type T. If T has a ctor or dtor, we