"Always use the bundled taskflow header library instead of an external one."
OFF)
-set(TASKFLOW_FOLDER "${CMAKE_SOURCE_DIR}/bundled/taskflow-3.6.0")
+set(TASKFLOW_FOLDER "${CMAKE_SOURCE_DIR}/bundled/taskflow-3.7.0")
macro(feature_taskflow_configure_bundled)
- set(TASKFLOW_VERSION "3.6.0")
+ set(TASKFLOW_VERSION "3.7.0")
- list(APPEND DEAL_II_BUNDLED_INCLUDE_DIRS ${TASKFLOW_FOLDER}/include)
+ list(APPEND DEAL_II_BUNDLED_INCLUDE_DIRS ${TASKFLOW_FOLDER})
endmacro()
+++ /dev/null
-#pragma once
-
-#include "launch.hpp"
-
-namespace tf {
-
-namespace detail {
-
-// Function: make_reduce_task
-template <typename B, typename E, typename T, typename O, typename P>
-TF_FORCE_INLINE auto make_reduce_task(B beg, E end, T& init, O bop, P&& part) {
-
- using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
- using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
- using namespace std::string_literals;
-
- return
- [b=beg, e=end, &r=init, bop, part=std::forward<P>(part)]
- (Runtime& rt) mutable {
-
- // fetch the iterator values
- B_t beg = b;
- E_t end = e;
-
- size_t W = rt.executor().num_workers();
- size_t N = std::distance(beg, end);
-
- // only myself - no need to spawn another graph
- if(W <= 1 || N <= part.chunk_size()) {
- for(; beg!=end; r = bop(r, *beg++));
- return;
- }
-
- if(N < W) {
- W = N;
- }
-
- std::mutex mtx;
-
- // static partitioner
- if constexpr(std::is_same_v<std::decay_t<P>, StaticPartitioner>) {
-
- size_t chunk_size;
-
- for(size_t w=0, curr_b=0; w<W && curr_b < N; ++w, curr_b += chunk_size) {
-
- // we force chunk size to be at least two because the temporary
- // variable sum need to avoid copy at the first step
- chunk_size = std::max(size_t{2}, part.adjusted_chunk_size(N, W, w));
-
- launch_loop(W, w, rt, [=, &bop, &mtx, &r, &part] () mutable {
-
- std::advance(beg, curr_b);
-
- if(N - curr_b == 1) {
- std::lock_guard<std::mutex> lock(mtx);
- r = bop(r, *beg);
- return;
- }
-
- auto beg1 = beg++;
- auto beg2 = beg++;
- T sum = bop(*beg1, *beg2);
-
- // loop reduce
- part.loop(N, W, curr_b, chunk_size,
- [&, prev_e=curr_b+2](size_t curr_b, size_t curr_e) mutable {
-
- if(curr_b > prev_e) {
- std::advance(beg, curr_b - prev_e);
- }
- else {
- curr_b = prev_e;
- }
-
- for(size_t x=curr_b; x<curr_e; x++, beg++) {
- sum = bop(sum, *beg);
- }
- prev_e = curr_e;
- }
- );
-
- // final reduce
- std::lock_guard<std::mutex> lock(mtx);
- r = bop(r, sum);
-
- });
- }
- rt.join();
- }
- // dynamic partitioner
- else {
- std::atomic<size_t> next(0);
- launch_loop(N, W, rt, next, part, [=, &bop, &mtx, &next, &r, &part] () mutable {
- // pre-reduce
- size_t s0 = next.fetch_add(2, std::memory_order_relaxed);
-
- if(s0 >= N) {
- return;
- }
-
- std::advance(beg, s0);
-
- if(N - s0 == 1) {
- std::lock_guard<std::mutex> lock(mtx);
- r = bop(r, *beg);
- return;
- }
-
- auto beg1 = beg++;
- auto beg2 = beg++;
-
- T sum = bop(*beg1, *beg2);
-
- // loop reduce
- part.loop(N, W, next,
- [&, prev_e=s0+2](size_t curr_b, size_t curr_e) mutable {
- std::advance(beg, curr_b - prev_e);
- for(size_t x=curr_b; x<curr_e; x++, beg++) {
- sum = bop(sum, *beg);
- }
- prev_e = curr_e;
- }
- );
-
- // final reduce
- std::lock_guard<std::mutex> lock(mtx);
- r = bop(r, sum);
- });
- }
- };
-}
-
-// Function: make_transform_reduce_task
-template <typename B, typename E, typename T, typename BOP, typename UOP, typename P>
-TF_FORCE_INLINE auto make_transform_reduce_task(
- B beg, E end, T& init, BOP bop, UOP uop, P&& part
-) {
-
- using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
- using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
- using namespace std::string_literals;
-
- return
- [b=beg, e=end, &r=init, bop, uop, part=std::forward<P>(part)]
- (Runtime& rt) mutable {
-
- // fetch the iterator values
- B_t beg = b;
- E_t end = e;
-
- size_t W = rt.executor().num_workers();
- size_t N = std::distance(beg, end);
-
- // only myself - no need to spawn another graph
- if(W <= 1 || N <= part.chunk_size()) {
- for(; beg!=end; r = bop(std::move(r), uop(*beg++)));
- return;
- }
-
- if(N < W) {
- W = N;
- }
-
- std::mutex mtx;
-
- // static partitioner
- if constexpr(std::is_same_v<std::decay_t<P>, StaticPartitioner>) {
-
- size_t chunk_size;
-
- for(size_t w=0, curr_b=0; w<W && curr_b < N; ++w, curr_b += chunk_size) {
-
- chunk_size = part.adjusted_chunk_size(N, W, w);
-
- launch_loop(W, w, rt, [=, &bop, &uop, &mtx, &r, &part] () mutable {
-
- std::advance(beg, curr_b);
-
- if(N - curr_b == 1) {
- std::lock_guard<std::mutex> lock(mtx);
- r = bop(std::move(r), uop(*beg));
- return;
- }
-
- //auto beg1 = beg++;
- //auto beg2 = beg++;
- //T sum = bop(uop(*beg1), uop(*beg2));
-
- T sum = (chunk_size == 1) ? uop(*beg++) : bop(uop(*beg++), uop(*beg++));
-
- // loop reduce
- part.loop(N, W, curr_b, chunk_size,
- [&, prev_e=curr_b+(chunk_size == 1 ? 1 : 2)]
- (size_t curr_b, size_t curr_e) mutable {
- if(curr_b > prev_e) {
- std::advance(beg, curr_b - prev_e);
- }
- else {
- curr_b = prev_e;
- }
- for(size_t x=curr_b; x<curr_e; x++, beg++) {
- sum = bop(std::move(sum), uop(*beg));
- }
- prev_e = curr_e;
- }
- );
-
- // final reduce
- std::lock_guard<std::mutex> lock(mtx);
- r = bop(std::move(r), std::move(sum));
-
- });
- }
-
- rt.join();
- }
- // dynamic partitioner
- else {
- std::atomic<size_t> next(0);
-
- launch_loop(N, W, rt, next, part, [=, &bop, &uop, &mtx, &next, &r, &part] () mutable {
-
- // pre-reduce
- size_t s0 = next.fetch_add(2, std::memory_order_relaxed);
-
- if(s0 >= N) {
- return;
- }
-
- std::advance(beg, s0);
-
- if(N - s0 == 1) {
- std::lock_guard<std::mutex> lock(mtx);
- r = bop(std::move(r), uop(*beg));
- return;
- }
-
- auto beg1 = beg++;
- auto beg2 = beg++;
-
- T sum = bop(uop(*beg1), uop(*beg2));
-
- // loop reduce
- part.loop(N, W, next,
- [&, prev_e=s0+2](size_t curr_b, size_t curr_e) mutable {
- std::advance(beg, curr_b - prev_e);
- for(size_t x=curr_b; x<curr_e; x++, beg++) {
- sum = bop(std::move(sum), uop(*beg));
- }
- prev_e = curr_e;
- }
- );
-
- // final reduce
- std::lock_guard<std::mutex> lock(mtx);
- r = bop(std::move(r), std::move(sum));
- });
- }
- };
-}
-
-} // end of namespace detail -------------------------------------------------
-
-// ----------------------------------------------------------------------------
-// default reduction
-// ----------------------------------------------------------------------------
-
-// Function: reduce
-template <typename B, typename E, typename T, typename O, typename P>
-Task FlowBuilder::reduce(B beg, E end, T& init, O bop, P&& part) {
- return emplace(detail::make_reduce_task(
- beg, end, init, bop, std::forward<P>(part)
- ));
-}
-
-// ----------------------------------------------------------------------------
-// default transform and reduction
-// ----------------------------------------------------------------------------
-
-// Function: transform_reduce
-template <typename B, typename E, typename T, typename BOP, typename UOP, typename P>
-Task FlowBuilder::transform_reduce(
- B beg, E end, T& init, BOP bop, UOP uop, P&& part
-) {
- return emplace(detail::make_transform_reduce_task(
- beg, end, init, bop, uop, std::forward<P>(part)
- ));
-}
-
-} // end of namespace tf -----------------------------------------------------
-
-
-
-
+++ /dev/null
-#pragma once
-
-#include "executor.hpp"
-
-// https://hackmd.io/@sysprog/concurrency-atomics
-
-namespace tf {
-
-// ----------------------------------------------------------------------------
-// Async
-// ----------------------------------------------------------------------------
-
-// Function: async
-template <typename F>
-auto Executor::async(const std::string& name, F&& f) {
-
- _increment_topology();
-
- using R = std::invoke_result_t<std::decay_t<F>>;
-
- std::promise<R> p;
- auto fu{p.get_future()};
-
- auto node = node_pool.animate(
- name, 0, nullptr, nullptr, 0,
- std::in_place_type_t<Node::Async>{},
- _make_promised_async(std::move(p), std::forward<F>(f))
- );
-
- _schedule_async_task(node);
-
- return fu;
-}
-
-// Function: async
-template <typename F>
-auto Executor::async(F&& f) {
- return async("", std::forward<F>(f));
-}
-
-// ----------------------------------------------------------------------------
-// Silent Async
-// ----------------------------------------------------------------------------
-
-// Function: silent_async
-template <typename F>
-void Executor::silent_async(const std::string& name, F&& f) {
-
- _increment_topology();
-
- auto node = node_pool.animate(
- name, 0, nullptr, nullptr, 0,
- std::in_place_type_t<Node::Async>{}, std::forward<F>(f)
- );
-
- _schedule_async_task(node);
-}
-
-// Function: silent_async
-template <typename F>
-void Executor::silent_async(F&& f) {
- silent_async("", std::forward<F>(f));
-}
-
-// ----------------------------------------------------------------------------
-// Async Helper Methods
-// ----------------------------------------------------------------------------
-
-// Function: _make_promised_async
-template <typename R, typename F>
-auto Executor::_make_promised_async(std::promise<R>&& p, F&& func) {
- return [p=make_moc(std::move(p)), func=std::forward<F>(func)]() mutable {
- if constexpr(std::is_same_v<R, void>) {
- func();
- p.object.set_value();
- }
- else {
- p.object.set_value(func());
- }
- };
-}
-
-// Procedure: _schedule_async_task
-inline void Executor::_schedule_async_task(Node* node) {
- if(auto w = _this_worker(); w) {
- _schedule(*w, node);
- }
- else{
- _schedule(node);
- }
-}
-
-// Procedure: _tear_down_async
-inline void Executor::_tear_down_async(Node* node) {
- // from runtime
- if(node->_parent) {
- node->_parent->_join_counter.fetch_sub(1, std::memory_order_release);
- }
- // from executor
- else {
- _decrement_topology_and_notify();
- }
- node_pool.recycle(node);
-}
-
-// ----------------------------------------------------------------------------
-// Silent Dependent Async
-// ----------------------------------------------------------------------------
-
-// Function: silent_dependent_async
-template <typename F, typename... Tasks,
- std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>*
->
-tf::AsyncTask Executor::silent_dependent_async(F&& func, Tasks&&... tasks) {
- return silent_dependent_async("", std::forward<F>(func), std::forward<Tasks>(tasks)...);
-}
-
-// Function: silent_dependent_async
-template <typename F, typename... Tasks,
- std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>*
->
-tf::AsyncTask Executor::silent_dependent_async(
- const std::string& name, F&& func, Tasks&&... tasks
-){
-
- _increment_topology();
-
- size_t num_dependents = sizeof...(Tasks);
-
- std::shared_ptr<Node> node(
- node_pool.animate(
- name, 0, nullptr, nullptr, num_dependents,
- std::in_place_type_t<Node::DependentAsync>{}, std::forward<F>(func)
- ),
- [&](Node* ptr){ node_pool.recycle(ptr); }
- );
-
- {
- std::scoped_lock lock(_asyncs_mutex);
- _asyncs.insert(node);
- }
-
- if constexpr(sizeof...(Tasks) > 0) {
- (_process_async_dependent(node.get(), tasks, num_dependents), ...);
- }
-
- if(num_dependents == 0) {
- _schedule_async_task(node.get());
- }
-
- return AsyncTask(std::move(node));
-}
-
-// Function: silent_dependent_async
-template <typename F, typename I,
- std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>*
->
-tf::AsyncTask Executor::silent_dependent_async(F&& func, I first, I last) {
- return silent_dependent_async("", std::forward<F>(func), first, last);
-}
-
-// Function: silent_dependent_async
-template <typename F, typename I,
- std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>*
->
-tf::AsyncTask Executor::silent_dependent_async(
- const std::string& name, F&& func, I first, I last
-) {
-
- _increment_topology();
-
- size_t num_dependents = std::distance(first, last);
-
- std::shared_ptr<Node> node(
- node_pool.animate(
- name, 0, nullptr, nullptr, num_dependents,
- std::in_place_type_t<Node::DependentAsync>{}, std::forward<F>(func)
- ),
- [&](Node* ptr){ node_pool.recycle(ptr); }
- );
-
- {
- std::scoped_lock lock(_asyncs_mutex);
- _asyncs.insert(node);
- }
-
- for(; first != last; first++){
- _process_async_dependent(node.get(), *first, num_dependents);
- }
-
- if(num_dependents == 0) {
- _schedule_async_task(node.get());
- }
-
- return AsyncTask(std::move(node));
-}
-
-// ----------------------------------------------------------------------------
-// Dependent Async
-// ----------------------------------------------------------------------------
-
-// Function: dependent_async
-template <typename F, typename... Tasks,
- std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>*
->
-auto Executor::dependent_async(F&& func, Tasks&&... tasks) {
- return dependent_async("", std::forward<F>(func), std::forward<Tasks>(tasks)...);
-}
-
-// Function: dependent_async
-template <typename F, typename... Tasks,
- std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>*
->
-auto Executor::dependent_async(
- const std::string& name, F&& func, Tasks&&... tasks
-) {
-
- _increment_topology();
-
- using R = std::invoke_result_t<std::decay_t<F>>;
-
- std::promise<R> p;
- auto fu{p.get_future()};
-
- size_t num_dependents = sizeof...(tasks);
-
- std::shared_ptr<Node> node(
- node_pool.animate(
- name, 0, nullptr, nullptr, num_dependents,
- std::in_place_type_t<Node::DependentAsync>{},
- _make_promised_async(std::move(p), std::forward<F>(func))
- ),
- [&](Node* ptr){ node_pool.recycle(ptr); }
- );
-
- {
- std::scoped_lock lock(_asyncs_mutex);
- _asyncs.insert(node);
- }
-
- if constexpr(sizeof...(Tasks) > 0) {
- (_process_async_dependent(node.get(), tasks, num_dependents), ...);
- }
-
- if(num_dependents == 0) {
- _schedule_async_task(node.get());
- }
-
- return std::make_pair(AsyncTask(std::move(node)), std::move(fu));
-}
-
-// Function: dependent_async
-template <typename F, typename I,
- std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>*
->
-auto Executor::dependent_async(F&& func, I first, I last) {
- return dependent_async("", std::forward<F>(func), first, last);
-}
-
-// Function: dependent_async
-template <typename F, typename I,
- std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>*
->
-auto Executor::dependent_async(
- const std::string& name, F&& func, I first, I last
-) {
-
- _increment_topology();
-
- using R = std::invoke_result_t<std::decay_t<F>>;
-
- std::promise<R> p;
- auto fu{p.get_future()};
-
- size_t num_dependents = std::distance(first, last);
-
- std::shared_ptr<Node> node(
- node_pool.animate(
- name, 0, nullptr, nullptr, num_dependents,
- std::in_place_type_t<Node::DependentAsync>{},
- _make_promised_async(std::move(p), std::forward<F>(func))
- ),
- [&](Node* ptr){ node_pool.recycle(ptr); }
- );
-
- {
- std::scoped_lock lock(_asyncs_mutex);
- _asyncs.insert(node);
- }
-
- for(; first != last; first++) {
- _process_async_dependent(node.get(), *first, num_dependents);
- }
-
- if(num_dependents == 0) {
- _schedule_async_task(node.get());
- }
-
- return std::make_pair(AsyncTask(std::move(node)), std::move(fu));
-}
-
-// ----------------------------------------------------------------------------
-// Dependent Async Helper Functions
-// ----------------------------------------------------------------------------
-
-// Procedure: _process_async_dependent
-inline void Executor::_process_async_dependent(
- Node* node, tf::AsyncTask& task, size_t& num_dependents
-) {
-
- std::shared_ptr<Node> dep;
- {
- std::scoped_lock lock(_asyncs_mutex);
- if(auto itr = _asyncs.find(task._node); itr != _asyncs.end()){
- dep = *itr;
- }
- }
-
- // if the dependent task exists
- if(dep) {
- auto& state = std::get_if<Node::DependentAsync>(&(dep->_handle))->state;
-
- add_dependent:
-
- auto target = Node::AsyncState::UNFINISHED;
-
- // acquires the lock
- if(state.compare_exchange_weak(target, Node::AsyncState::LOCKED,
- std::memory_order_acq_rel,
- std::memory_order_acquire)) {
- dep->_successors.push_back(node);
- state.store(Node::AsyncState::UNFINISHED, std::memory_order_release);
- }
- // dep's state is FINISHED, which means dep finished its callable already
- // thus decrement the node's join counter by 1
- else if (target == Node::AsyncState::FINISHED) {
- // decrement the counter needs to be the order of acquire and release
- // to synchronize with the worker
- num_dependents = node->_join_counter.fetch_sub(1, std::memory_order_acq_rel) - 1;
- }
- // another worker adding an async task that shares the same dependent
- else {
- goto add_dependent;
- }
- }
- else {
- num_dependents = node->_join_counter.fetch_sub(1, std::memory_order_acq_rel) - 1;
- }
-}
-
-// Procedure: _tear_down_dependent_async
-inline void Executor::_tear_down_dependent_async(Worker& worker, Node* node) {
-
- // this async task comes from Executor
- auto& state = std::get_if<Node::DependentAsync>(&(node->_handle))->state;
- auto target = Node::AsyncState::UNFINISHED;
-
- while(!state.compare_exchange_weak(target, Node::AsyncState::FINISHED,
- std::memory_order_acq_rel,
- std::memory_order_relaxed)) {
- target = Node::AsyncState::UNFINISHED;
- }
-
- // spaw successors whenever their dependencies are resolved
- worker._cache = nullptr;
- for(size_t i=0; i<node->_successors.size(); ++i) {
- //if(auto s = node->_successors[i]; --(s->_join_counter) == 0) {
- if(auto s = node->_successors[i];
- s->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1
- ) {
- if(worker._cache) {
- _schedule(worker, worker._cache);
- }
- worker._cache = s;
- }
- }
-
- // remove myself from the asyncs using extraction to avoid calling
- // ~Node inside the lock
- typename std::unordered_set<std::shared_ptr<Node>>::node_type extracted;
- {
- std::shared_ptr<Node> ptr(node, [](Node*){});
- std::scoped_lock lock(_asyncs_mutex);
- extracted = _asyncs.extract(ptr);
- // assert(extracted.empty() == false);
- }
-
- _decrement_topology_and_notify();
-}
-
-
-
-
-
-} // end of namespace tf -----------------------------------------------------
-
+++ /dev/null
-#pragma once
-
-#include "graph.hpp"
-
-/**
-@file async_task.hpp
-@brief asynchronous task include file
-*/
-
-namespace tf {
-
-// ----------------------------------------------------------------------------
-// AsyncTask
-// ----------------------------------------------------------------------------
-
-/**
-@brief class to create a dependent asynchronous task
-
-A tf::AsyncTask is a lightweight handle that retains @em shared ownership
-of a dependent async task created by an executor.
-This shared ownership ensures that the async task remains alive when
-adding it to the dependency list of another async task,
-thus avoiding the classical [ABA problem](https://en.wikipedia.org/wiki/ABA_problem).
-
-@code{.cpp}
-// main thread retains shared ownership of async task A
-tf::AsyncTask A = executor.silent_dependent_async([](){});
-
-// task A remains alive (i.e., at least one ref count by the main thread)
-// when being added to the dependency list of async task B
-tf::AsyncTask B = executor.silent_dependent_async([](){}, A);
-@endcode
-
-Currently, tf::AsyncTask is implemented based on C++ smart pointer std::shared_ptr and
-is considered cheap to copy or move as long as only a handful of objects
-own it.
-When a worker completes an async task, it will remove the task from the executor,
-decrementing the number of shared owners by one.
-If that counter reaches zero, the task is destroyed.
-*/
-class AsyncTask {
-
- friend class FlowBuilder;
- friend class Runtime;
- friend class Taskflow;
- friend class TaskView;
- friend class Executor;
-
- public:
-
- /**
- @brief constructs an empty task handle
- */
- AsyncTask() = default;
-
- /**
- @brief destroys the managed asynchronous task if this is the last owner
- */
- ~AsyncTask() = default;
-
- /**
- @brief constructs an task that shares ownership of @c rhs
- */
- AsyncTask(const AsyncTask& rhs) = default;
-
- /**
- @brief move-constructs an task from @c rhs
- */
- AsyncTask(AsyncTask&& rhs) = default;
-
- /**
- @brief shares ownership of the task managed by @c rhs
- */
- AsyncTask& operator = (const AsyncTask& rhs) = default;
-
- /**
- @brief move-assigns the task from @c rhs
- */
- AsyncTask& operator = (AsyncTask&& rhs) = default;
-
- /**
- @brief checks if the task stores a non-null shared pointer
- */
- bool empty() const;
-
- /**
- @brief release the ownership
- */
- void reset();
-
- /**
- @brief obtains a hash value of the underlying node
- */
- size_t hash_value() const;
-
- private:
-
- AsyncTask(std::shared_ptr<Node>);
-
- std::shared_ptr<Node> _node;
-};
-
-// Constructor
-inline AsyncTask::AsyncTask(std::shared_ptr<Node> ptr) : _node {std::move(ptr)} {
-}
-
-// Function: empty
-inline bool AsyncTask::empty() const {
- return _node == nullptr;
-}
-
-// Function: reset
-inline void AsyncTask::reset() {
- _node.reset();
-}
-
-// Function: hash_value
-inline size_t AsyncTask::hash_value() const {
- return std::hash<std::shared_ptr<Node>>{}(_node);
-}
-
-} // end of namespace tf ----------------------------------------------------
-
-
-
+++ /dev/null
-#pragma once
-
-#if defined(_MSC_VER)
- #define TF_FORCE_INLINE __forceinline
-#elif defined(__GNUC__) && __GNUC__ > 3
- #define TF_FORCE_INLINE __attribute__((__always_inline__)) inline
-#else
- #define TF_FORCE_INLINE inline
-#endif
-
-#if defined(_MSC_VER)
- #define TF_NO_INLINE __declspec(noinline)
-#elif defined(__GNUC__) && __GNUC__ > 3
- #define TF_NO_INLINE __attribute__((__noinline__))
-#else
- #define TF_NO_INLINE
-#endif
TASKFLOW MIT LICENSE
-Copyright (c) 2018-2022 Dr. Tsung-Wei Huang
+Copyright (c) 2018-2024 Dr. Tsung-Wei Huang
+
+The University of Wisconsin at Madison
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
[![Windows](https://github.com/taskflow/taskflow/workflows/Windows/badge.svg)](https://github.com/taskflow/taskflow/actions?query=workflow%3AWindows)
[![Wiki](image/api-doc.svg)][documentation]
[![TFProf](image/tfprof.svg)](https://taskflow.github.io/tfprof/)
-[![Cite](image/cite-tpds.svg)][TPDS21]
+[![Cite](image/cite-tpds.svg)][TPDS22]
Taskflow helps you quickly write parallel and heterogeneous task programs in modern C++
that incorporate both regular and irregular compute patterns,
together with an efficient *work-stealing* scheduler to optimize your multithreaded performance.
-| [Static Tasking](#get-started-with-taskflow) | [Dynamic Tasking](#dynamic-tasking) |
+| [Static Tasking](#start-your-first-taskflow-program) | [Subflow Tasking](#create-a-subflow-graph) |
| :------------: | :-------------: |
| ![](image/static_graph.svg) | <img align="right" src="image/dynamic_graph.svg" width="100%"> |
across dependent tasks to implement cycles and conditions that were otherwise difficult to do
with existing tools.
-| [Conditional Tasking](#conditional-tasking) |
+| [Conditional Tasking](#integrate-control-flow-to-a-task-graph) |
| :-----------------: |
| ![](image/condition.svg) |
composition of modular and reusable blocks that are easier to optimize
at an individual scope.
-| [Taskflow Composition](#composable-tasking) |
+| [Taskflow Composition](#compose-task-graphs) |
| :---------------: |
|![](image/framework.svg)|
accelerate a wide range of scientific computing applications
by harnessing the power of CPU-GPU collaborative computing.
-| [Concurrent CPU-GPU Tasking](#concurrent-cpu-gpu-tasking) |
+| [Concurrent CPU-GPU Tasking](#offload-a-task-to-a-gpu) |
| :-----------------: |
| ![](image/cudaflow.svg) |
`A`, `B`, `C`, and `D`, where `A` runs before `B` and `C`, and `D`
runs after `B` and `C`.
When `A` finishes, `B` and `C` can run in parallel.
+Try it live on [Compiler Explorer (godbolt)](https://godbolt.org/z/j8hx3xnnx)!
## Launch Asynchronous Tasks
Taskflow supports *asynchronous* tasking.
-You can launch tasks asynchronously to incorporate independent, dynamic
-parallelism in your taskflows.
+You can launch tasks asynchronously to dynamically explore task graph parallelism.
```cpp
tf::Executor executor;
-tf::Taskflow taskflow;
// create asynchronous tasks directly from an executor
-tf::Future<std::optional<int>> future = executor.async([](){
+std::future<int> future = executor.async([](){
std::cout << "async task returns 1\n";
return 1;
});
-executor.silent_async([](){ std::cout << "async task of no return\n"; });
+executor.silent_async([](){ std::cout << "async task does not return\n"; });
-// launch an asynchronous task from a running task
-taskflow.emplace([&](){
- executor.async([](){ std::cout << "async task within a task\n"; });
-});
+// create asynchronous tasks with dynamic dependencies
+tf::AsyncTask A = executor.silent_dependent_async([](){ printf("A\n"); });
+tf::AsyncTask B = executor.silent_dependent_async([](){ printf("B\n"); }, A);
+tf::AsyncTask C = executor.silent_dependent_async([](){ printf("C\n"); }, A);
+tf::AsyncTask D = executor.silent_dependent_async([](){ printf("D\n"); }, B, C);
-executor.run(taskflow).wait();
+executor.wait_for_all();
```
## Execute a Taskflow
[PayMe]: https://www.paypal.me/twhuang/10
[email me]: mailto:twh760812@gmail.com
[Cpp Conference 2018]: https://github.com/CppCon/CppCon2018
-[TPDS21]: https://tsung-wei-huang.github.io/papers/tpds21-taskflow.pdf
+[TPDS22]: https://tsung-wei-huang.github.io/papers/tpds21-taskflow.pdf
@brief class to create a critical region of limited workers to run tasks
-tf::CriticalSection is a warpper over tf::Semaphore and is specialized for
+tf::CriticalSection is a wrapper over tf::Semaphore and is specialized for
limiting the maximum concurrency over a set of tasks.
A critical section starts with an initial count representing that limit.
When a task is added to the critical section,
// Function: find_if_loop
template <typename Iterator, typename Predicate>
-TF_FORCE_INLINE bool find_if_loop(
+bool find_if_loop(
std::atomic<size_t>& offset,
Iterator& beg,
size_t& prev_e,
size_t curr_b,
size_t curr_e,
- Predicate&& predicate
+ Predicate predicate
) {
// early prune
if(offset.load(std::memory_order_relaxed) < curr_b) {
// Function: find_if_not_loop
template <typename Iterator, typename Predicate>
-TF_FORCE_INLINE bool find_if_not_loop(
+bool find_if_not_loop(
std::atomic<size_t>& offset,
Iterator& beg,
size_t& prev_e,
size_t curr_b,
size_t curr_e,
- Predicate&& predicate
+ Predicate predicate
) {
// early prune
return false;
}
+} // namespace detail --------------------------------------------------------
+
// Function: make_find_if_task
-template <typename B, typename E, typename T, typename UOP, typename P>
-TF_FORCE_INLINE auto make_find_if_task(
- B first, E last, T& result, UOP predicate, P&& part
-) {
+template <typename B, typename E, typename T, typename UOP, typename P = DefaultPartitioner>
+auto make_find_if_task(B first, E last, T& result, UOP predicate, P part = P()) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
- using namespace std::string_literals;
- return
- [b=first, e=last, predicate, &result, part=std::forward<P>(part)]
- (Runtime& rt) mutable {
+ return [=, &result] (Runtime& rt) mutable {
// fetch the stateful values
- B_t beg = b;
- E_t end = e;
+ B_t beg = first;
+ E_t end = last;
size_t W = rt.executor().num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- result = std::find_if(beg, end, predicate);
+ launch_loop(part, [&](){
+ result = std::find_if(beg, end, predicate);
+ });
return;
}
std::atomic<size_t> offset(N);
// static partitioner
- if constexpr(std::is_same_v<std::decay_t<P>, StaticPartitioner>) {
+ if constexpr(part.type() == PartitionerType::STATIC) {
size_t chunk_size;
chunk_size = part.adjusted_chunk_size(N, W, w);
- launch_loop(W, w, rt,
- [N, W, curr_b, chunk_size, beg, &predicate, &offset, &part]
- () mutable {
+ launch_loop(W, w, rt, part,
+ [N, W, curr_b, chunk_size, beg, &predicate, &offset, &part] () mutable {
part.loop_until(N, W, curr_b, chunk_size,
- [&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable {
+ [&, prev_e=size_t{0}](size_t part_b, size_t part_e) mutable {
return detail::find_if_loop(
- offset, beg, prev_e, curr_b, curr_e, predicate
+ offset, beg, prev_e, part_b, part_e, predicate
);
}
- );
+ );
}
);
}
- rt.join();
+ rt.corun_all();
}
// dynamic partitioner
else {
offset, beg, prev_e, curr_b, curr_e, predicate
);
}
- );
+ );
}
);
}
}
// Function: make_find_if_not_task
-template <typename B, typename E, typename T, typename UOP, typename P>
-TF_FORCE_INLINE auto make_find_if_not_task(
- B first, E last, T& result, UOP predicate, P&& part
-) {
+template <typename B, typename E, typename T, typename UOP, typename P = DefaultPartitioner>
+auto make_find_if_not_task(B first, E last, T& result, UOP predicate, P part = P()) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
- using namespace std::string_literals;
- return
- [b=first, e=last, predicate, &result, part=std::forward<P>(part)]
- (Runtime& rt) mutable {
+ return [=, &result] (Runtime& rt) mutable {
// fetch the stateful values
- B_t beg = b;
- E_t end = e;
+ B_t beg = first;
+ E_t end = last;
size_t W = rt.executor().num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- result = std::find_if_not(beg, end, predicate);
+ launch_loop(part, [&](){
+ result = std::find_if_not(beg, end, predicate);
+ });
return;
}
std::atomic<size_t> offset(N);
// static partitioner
- if constexpr(std::is_same_v<std::decay_t<P>, StaticPartitioner>) {
+ if constexpr(part.type() == PartitionerType::STATIC) {
size_t chunk_size;
chunk_size = part.adjusted_chunk_size(N, W, w);
- launch_loop(W, w, rt,
+ launch_loop(W, w, rt, part,
[N, W, curr_b, chunk_size, beg, &predicate, &offset, &part] () mutable {
part.loop_until(N, W, curr_b, chunk_size,
- [&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable {
+ [&, prev_e=size_t{0}](size_t part_b, size_t part_e) mutable {
return detail::find_if_not_loop(
- offset, beg, prev_e, curr_b, curr_e, predicate
+ offset, beg, prev_e, part_b, part_e, predicate
);
}
- );
+ );
}
);
}
- rt.join();
+ rt.corun_all();
}
// dynamic partitioner
else {
offset, beg, prev_e, curr_b, curr_e, predicate
);
}
- );
+ );
}
);
}
}
// Function: make_min_element_task
-template <typename B, typename E, typename T, typename C, typename P>
-TF_FORCE_INLINE auto make_min_element_task(
- B first, E last, T& result, C comp, P&& part
-) {
+template <typename B, typename E, typename T, typename C, typename P = DefaultPartitioner>
+auto make_min_element_task(B first, E last, T& result, C comp, P part = P()) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
- using namespace std::string_literals;
- return
- [b=first, e=last, &result, comp, part=std::forward<P>(part)]
- (Runtime& rt) mutable {
+ return [=, &result] (Runtime& rt) mutable {
// fetch the iterator values
- B_t beg = b;
- E_t end = e;
+ B_t beg = first;
+ E_t end = last;
size_t W = rt.executor().num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- result = std::min_element(beg, end, comp);
+ launch_loop(part, [&](){
+ result = std::min_element(beg, end, comp);
+ });
return;
}
N--;
// static partitioner
- if constexpr(std::is_same_v<std::decay_t<P>, StaticPartitioner>) {
+ if constexpr(part.type() == PartitionerType::STATIC) {
size_t chunk_size;
// variable sum needs to avoid copy at the first step
chunk_size = std::max(size_t{2}, part.adjusted_chunk_size(N, W, w));
- launch_loop(W, w, rt,
+ launch_loop(W, w, rt, part,
[beg, curr_b, N, W, chunk_size, &comp, &mutex, &result, &part] () mutable {
-
std::advance(beg, curr_b);
if(N - curr_b == 1) {
// loop reduce
part.loop(N, W, curr_b, chunk_size,
- [&, prev_e=curr_b+2](size_t curr_b, size_t curr_e) mutable {
+ [&, prev_e=curr_b+2](size_t part_b, size_t part_e) mutable {
- if(curr_b > prev_e) {
- std::advance(beg, curr_b - prev_e);
+ if(part_b > prev_e) {
+ std::advance(beg, part_b - prev_e);
}
else {
- curr_b = prev_e;
+ part_b = prev_e;
}
- for(size_t x=curr_b; x<curr_e; x++, beg++) {
+ for(size_t x=part_b; x<part_e; x++, beg++) {
if(comp(*beg, *smallest)) {
smallest = beg;
}
}
- prev_e = curr_e;
+ prev_e = part_e;
}
);
}
});
}
- rt.join();
+ rt.corun_all();
}
// dynamic partitioner
else {
// loop reduce
part.loop(N, W, next,
- [&, prev_e=s0+2](size_t curr_b, size_t curr_e) mutable {
- std::advance(beg, curr_b - prev_e);
- for(size_t x=curr_b; x<curr_e; x++, beg++) {
+ [&, prev_e=s0+2](size_t part_b, size_t part_e) mutable {
+ std::advance(beg, part_b - prev_e);
+ for(size_t x=part_b; x<part_e; x++, beg++) {
if(comp(*beg, *smallest)) {
smallest = beg;
}
}
- prev_e = curr_e;
+ prev_e = part_e;
}
);
}
// Function: make_max_element_task
-template <typename B, typename E, typename T, typename C, typename P>
-TF_FORCE_INLINE auto make_max_element_task(B first, E last, T& result, C comp, P&& part) {
+template <typename B, typename E, typename T, typename C, typename P = DefaultPartitioner>
+auto make_max_element_task(B first, E last, T& result, C comp, P part = P()) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
- using namespace std::string_literals;
- return
- [b=first, e=last, &result, comp, part=std::forward<P>(part)]
- (Runtime& rt) mutable {
+ return [=, &result] (Runtime& rt) mutable {
// fetch the iterator values
- B_t beg = b;
- E_t end = e;
+ B_t beg = first;
+ E_t end = last;
size_t W = rt.executor().num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- result = std::max_element(beg, end, comp);
+ launch_loop(part, [&](){
+ result = std::max_element(beg, end, comp);
+ });
return;
}
N--;
// static partitioner
- if constexpr(std::is_same_v<std::decay_t<P>, StaticPartitioner>) {
+ if constexpr(part.type() == PartitionerType::STATIC) {
size_t chunk_size;
// variable sum needs to avoid copy at the first step
chunk_size = std::max(size_t{2}, part.adjusted_chunk_size(N, W, w));
- launch_loop(W, w, rt,
+ launch_loop(W, w, rt, part,
[beg, curr_b, N, W, chunk_size, &comp, &mutex, &result, &part] () mutable {
-
std::advance(beg, curr_b);
if(N - curr_b == 1) {
// loop reduce
part.loop(N, W, curr_b, chunk_size,
- [&, prev_e=curr_b+2](size_t curr_b, size_t curr_e) mutable {
+ [&, prev_e=curr_b+2](size_t part_b, size_t part_e) mutable {
- if(curr_b > prev_e) {
- std::advance(beg, curr_b - prev_e);
+ if(part_b > prev_e) {
+ std::advance(beg, part_b - prev_e);
}
else {
- curr_b = prev_e;
+ part_b = prev_e;
}
- for(size_t x=curr_b; x<curr_e; x++, beg++) {
+ for(size_t x=part_b; x<part_e; x++, beg++) {
if(comp(*largest, *beg)) {
largest = beg;
}
}
- prev_e = curr_e;
+ prev_e = part_e;
}
);
}
});
}
- rt.join();
+ rt.corun_all();
}
// dynamic partitioner
else {
// loop reduce
part.loop(N, W, next,
- [&, prev_e=s0+2](size_t curr_b, size_t curr_e) mutable {
- std::advance(beg, curr_b - prev_e);
- for(size_t x=curr_b; x<curr_e; x++, beg++) {
+ [&, prev_e=s0+2](size_t part_b, size_t part_e) mutable {
+ std::advance(beg, part_b - prev_e);
+ for(size_t x=part_b; x<part_e; x++, beg++) {
if(comp(*largest, *beg)) {
largest = beg;
}
}
- prev_e = curr_e;
+ prev_e = part_e;
}
);
};
}
-} // namespace detail --------------------------------------------------------
+
// Function: find_if
template <typename B, typename E, typename T, typename UOP, typename P>
-Task tf::FlowBuilder::find_if(B first, E last, T& result, UOP predicate, P&& part) {
- return emplace(detail::make_find_if_task(
- first, last, result, predicate, std::forward<P>(part)
- ));
+Task tf::FlowBuilder::find_if(B first, E last, T& result, UOP predicate, P part) {
+ return emplace(make_find_if_task(first, last, result, predicate, part));
}
// Function: find_if_not
template <typename B, typename E, typename T, typename UOP, typename P>
-Task tf::FlowBuilder::find_if_not(B first, E last, T& result, UOP predicate, P&& part) {
- return emplace(detail::make_find_if_not_task(
- first, last, result, predicate, std::forward<P>(part)
- ));
+Task tf::FlowBuilder::find_if_not(B first, E last, T& result, UOP predicate, P part) {
+ return emplace(make_find_if_not_task(first, last, result, predicate, part));
}
// ----------------------------------------------------------------------------
// Function: min_element
template <typename B, typename E, typename T, typename C, typename P>
-Task FlowBuilder::min_element(B first, E last, T& result, C comp, P&& part) {
- return emplace(detail::make_min_element_task(
- first, last, result, comp, std::forward<P>(part)
- ));
+Task FlowBuilder::min_element(B first, E last, T& result, C comp, P part) {
+ return emplace(make_min_element_task(first, last, result, comp, part));
}
// ----------------------------------------------------------------------------
// Function: max_element
template <typename B, typename E, typename T, typename C, typename P>
-Task FlowBuilder::max_element(B first, E last, T& result, C comp, P&& part) {
- return emplace(detail::make_max_element_task(
- first, last, result, comp, std::forward<P>(part)
- ));
+Task FlowBuilder::max_element(B first, E last, T& result, C comp, P part) {
+ return emplace(make_max_element_task(first, last, result, comp, part));
}
} // end of namespace tf -----------------------------------------------------
namespace tf {
-namespace detail {
-
// Function: make_for_each_task
-template <typename B, typename E, typename C, typename P>
-TF_FORCE_INLINE auto make_for_each_task(B beg, E end, C c, P&& part) {
-
+template <typename B, typename E, typename C, typename P = DefaultPartitioner>
+auto make_for_each_task(B b, E e, C c, P part = P()) {
+
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
- using namespace std::string_literals;
- return [b=beg, e=end, c, part=std::forward<P>(part)] (Runtime& rt) mutable {
+ return [=] (Runtime& rt) mutable {
// fetch the stateful values
B_t beg = b;
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- std::for_each(beg, end, c);
+ launch_loop(part, [&](){
+ std::for_each(beg, end, c);
+ });
return;
}
}
// static partitioner
- if constexpr(std::is_same_v<std::decay_t<P>, StaticPartitioner>) {
+ if constexpr(part.type() == PartitionerType::STATIC) {
size_t chunk_size;
for(size_t w=0, curr_b=0; w<W && curr_b < N; ++w, curr_b += chunk_size) {
chunk_size = part.adjusted_chunk_size(N, W, w);
- launch_loop(W, w, rt, [=, &c, &part] () mutable {
+ launch_loop(W, w, rt, part, [=, &c, &part] () mutable {
part.loop(N, W, curr_b, chunk_size,
- [&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable {
- std::advance(beg, curr_b - prev_e);
- for(size_t x = curr_b; x<curr_e; x++) {
+ [&, prev_e=size_t{0}](size_t part_b, size_t part_e) mutable {
+ std::advance(beg, part_b - prev_e);
+ for(size_t x = part_b; x<part_e; x++) {
c(*beg++);
}
- prev_e = curr_e;
+ prev_e = part_e;
}
);
});
}
- rt.join();
+ rt.corun_all();
}
// dynamic partitioner
else {
std::atomic<size_t> next(0);
launch_loop(N, W, rt, next, part, [=, &c, &next, &part] () mutable {
part.loop(N, W, next,
- [&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable {
- std::advance(beg, curr_b - prev_e);
- for(size_t x = curr_b; x<curr_e; x++) {
+ [&, prev_e=size_t{0}](size_t part_b, size_t part_e) mutable {
+ std::advance(beg, part_b - prev_e);
+ for(size_t x = part_b; x<part_e; x++) {
c(*beg++);
}
- prev_e = curr_e;
+ prev_e = part_e;
}
- );
+ );
});
}
};
}
// Function: make_for_each_index_task
-template <typename B, typename E, typename S, typename C, typename P>
-TF_FORCE_INLINE auto make_for_each_index_task(B beg, E end, S inc, C c, P&& part){
-
- using namespace std::string_literals;
+template <typename B, typename E, typename S, typename C, typename P = DefaultPartitioner>
+auto make_for_each_index_task(B b, E e, S s, C c, P part = P()){
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using S_t = std::decay_t<unwrap_ref_decay_t<S>>;
- return [b=beg, e=end, a=inc, c, part=std::forward<P>(part)]
- (Runtime& rt) mutable {
+ return [=] (Runtime& rt) mutable {
// fetch the iterator values
B_t beg = b;
E_t end = e;
- S_t inc = a;
+ S_t inc = s;
+
+ // nothing to be done if the range is invalid
+ if(is_range_invalid(beg, end, inc)) {
+ return;
+ }
size_t W = rt.executor().num_workers();
size_t N = distance(beg, end, inc);
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- for(size_t x=0; x<N; x++, beg+=inc) {
- c(beg);
- }
+ launch_loop(part, [&](){
+ for(size_t x=0; x<N; x++, beg+=inc) {
+ c(beg);
+ }
+ });
return;
}
}
// static partitioner
- if constexpr(std::is_same_v<std::decay_t<P>, StaticPartitioner>) {
-
+ if constexpr(part.type() == PartitionerType::STATIC) {
size_t chunk_size;
-
for(size_t w=0, curr_b=0; w<W && curr_b < N; ++w, curr_b += chunk_size) {
chunk_size = part.adjusted_chunk_size(N, W, w);
- launch_loop(W, w, rt, [=, &c, &part] () mutable {
+ launch_loop(W, w, rt, part, [=, &c, &part] () mutable {
part.loop(N, W, curr_b, chunk_size,
- [&](size_t curr_b, size_t curr_e) {
- auto idx = static_cast<B_t>(curr_b) * inc + beg;
- for(size_t x=curr_b; x<curr_e; x++, idx += inc) {
+ [&](size_t part_b, size_t part_e) {
+ auto idx = static_cast<B_t>(part_b) * inc + beg;
+ for(size_t x=part_b; x<part_e; x++, idx += inc) {
c(idx);
}
}
- );
+ );
});
}
- rt.join();
+ rt.corun_all();
}
// dynamic partitioner
else {
std::atomic<size_t> next(0);
launch_loop(N, W, rt, next, part, [=, &c, &next, &part] () mutable {
part.loop(N, W, next,
- [&](size_t curr_b, size_t curr_e) {
- auto idx = static_cast<B_t>(curr_b) * inc + beg;
- for(size_t x=curr_b; x<curr_e; x++, idx += inc) {
+ [&](size_t part_b, size_t part_e) {
+ auto idx = static_cast<B_t>(part_b) * inc + beg;
+ for(size_t x=part_b; x<part_e; x++, idx += inc) {
c(idx);
}
}
- );
+ );
});
}
};
}
-} // end of namespace detail -------------------------------------------------
-
// ----------------------------------------------------------------------------
// for_each
// ----------------------------------------------------------------------------
// Function: for_each
template <typename B, typename E, typename C, typename P>
-Task FlowBuilder::for_each(B beg, E end, C c, P&& part) {
+Task FlowBuilder::for_each(B beg, E end, C c, P part) {
return emplace(
- detail::make_for_each_task(beg, end, c, std::forward<P>(part))
+ make_for_each_task(beg, end, c, part)
);
}
// Function: for_each_index
template <typename B, typename E, typename S, typename C, typename P>
-Task FlowBuilder::for_each_index(B beg, E end, S inc, C c, P&& part){
+Task FlowBuilder::for_each_index(B beg, E end, S inc, C c, P part){
return emplace(
- detail::make_for_each_index_task(beg, end, inc, c, std::forward<P>(part))
+ make_for_each_index_task(beg, end, inc, c, part)
);
}
#pragma once
+#include <functional>
#include "../core/async.hpp"
namespace tf {
+// Function: launch_loop
+template<typename P, typename Loop>
+TF_FORCE_INLINE void launch_loop(P part, Loop loop) {
+
+ constexpr bool is_default_wrapper_v = std::is_same_v<
+ typename std::decay_t<P>::closure_wrapper_type, DefaultClosureWrapper
+ >;
+
+ if constexpr(is_default_wrapper_v) {
+ loop();
+ }
+ else {
+ std::invoke(part.closure_wrapper(), loop);
+ }
+}
+
// Function: launch_loop
template <typename P, typename Loop>
TF_FORCE_INLINE void launch_loop(
size_t W,
Runtime& rt,
std::atomic<size_t>& next,
- P&& part,
- Loop&& loop
+ P part,
+ Loop loop
) {
//static_assert(std::is_lvalue_reference_v<Loop>, "");
}
// tail optimization
if(r <= part.chunk_size() || w == W-1) {
- loop();
+ launch_loop(part, loop);
break;
}
else {
- rt.silent_async_unchecked("loop-"s + std::to_string(w), loop);
+ rt.silent_async_unchecked([=](){ launch_loop(part, loop); });
}
}
- rt.join();
+ rt.corun_all();
}
// Function: launch_loop
-template <typename Loop>
+template <typename P, typename Loop>
TF_FORCE_INLINE void launch_loop(
size_t W,
size_t w,
Runtime& rt,
- Loop&& loop
+ P part,
+ Loop loop
) {
using namespace std::string_literals;
if(w == W-1) {
- loop();
+ launch_loop(part, loop);
}
else {
- rt.silent_async_unchecked("loop-"s + std::to_string(w), loop);
+ rt.silent_async_unchecked([=](){ launch_loop(part, loop); });
}
}
namespace tf {
+/**
+@enum PartitionerType
+
+@brief enumeration of all partitioner types
+*/
+enum class PartitionerType : int {
+ /** @brief static partitioner type */
+ STATIC,
+ /** @brief dynamic partitioner type */
+ DYNAMIC
+};
+
+
+//template <typename C>
+//class PartitionInvoker : public PartitionerBase {
+//
+// protected
+//
+// C _closure;
+//
+// template <typename... ArgsT>
+// auto operator()(ArgsT&&... args) {
+// return std::invoke(closure, std::forward<ArgsT>(args)...);
+// }
+//
+// template <typename... ArgsT>
+// auto operator()(ArgsT&&... args) const {
+// return std::invoke(closure, std::forward<ArgsT>(args)...);
+// }
+//
+//};
+
+/**
+@struct DefaultClosureWrapper
+
+@brief default closure wrapper that simplies runs the given closure as is
+*/
+struct DefaultClosureWrapper {
+};
+
+/**
+@private
+*/
+struct IsPartitioner {
+};
+
// ----------------------------------------------------------------------------
// Partitioner Base
// ----------------------------------------------------------------------------
@brief class to derive a partitioner for scheduling parallel algorithms
+@tparam C closure wrapper type
+
The class provides base methods to derive a partitioner that can be used
to schedule parallel iterations (e.g., tf::Taskflow::for_each).
An partitioner defines the scheduling method for running parallel algorithms,
such tf::Taskflow::for_each, tf::Taskflow::reduce, and so on.
-By default, we provide the following partitioners:
+By default, we provide the following partitioners:
-+ tf::GuidedPartitioner to enable guided scheduling algorithm of adaptive chunk size
++ tf::GuidedPartitioner to enable guided scheduling algorithm of adaptive chunk size
+ tf::DynamicPartitioner to enable dynamic scheduling algorithm of equal chunk size
-+ tf::StaticPartitioner to enable static scheduling algorithm of static chunk size
-+ tf::RandomPartitioner to enable random scheduling algorithm of random chunk size
++ tf::StaticPartitioner to enable static scheduling algorithm of static chunk size
++ tf::RandomPartitioner to enable random scheduling algorithm of random chunk size
Depending on applications, partitioning algorithms can impact the performance
a lot.
tf::GuidedPartitioner or tf::DynamicPartitioner can outperform tf::StaticPartitioner.
In most situations, tf::GuidedPartitioner can deliver decent performance and
is thus used as our default partitioner.
+
+@note
+Giving the partition size of 0 lets the %Taskflow runtime automatically determines
+the partition size for the given partitioner.
+
+
+In addition to partition size, the application can specify a closure wrapper
+for a partitioner.
+A closure wrapper allows the application to wrapper a partitioned task
+(i.e., closure) with a custom function object that performs additional tasks.
+For example:
+
+@code{.cpp}
+std::atomic<int> count = 0;
+tf::Taskflow taskflow;
+taskflow.for_each_index(0, 100, 1,
+ [](){
+ printf("%d\n", i);
+ },
+ tf::StaticPartitioner(0, [](auto&& closure){
+ // do something before invoking the partitioned task
+ // ...
+
+ // invoke the partitioned task
+ closure();
+
+ // do something else after invoking the partitioned task
+ // ...
+ }
+);
+executor.run(taskflow).wait();
+@endcode
+
+@note
+The default closure wrapper (tf::DefaultClosureWrapper) does nothing but invoke
+the partitioned task (closure).
+
*/
-class PartitionerBase {
+template <typename C = DefaultClosureWrapper>
+class PartitionerBase : public IsPartitioner {
public:
+
+ /**
+ @brief the closure type
+ */
+ using closure_wrapper_type = C;
/**
@brief default constructor
@brief construct a partitioner with the given chunk size
*/
explicit PartitionerBase(size_t chunk_size) : _chunk_size {chunk_size} {}
+
+ /**
+ @brief construct a partitioner with the given chunk size and closure wrapper
+ */
+ PartitionerBase(size_t chunk_size, C&& closure_wrapper) :
+ _chunk_size {chunk_size},
+ _closure_wrapper {std::forward<C>(closure_wrapper)} {
+ }
/**
@brief query the chunk size of this partitioner
*/
void chunk_size(size_t cz) { _chunk_size = cz; }
+ /**
+ @brief acquire an immutable access to the closure wrapper object
+ */
+ const C& closure_wrapper() const { return _closure_wrapper; }
+
+ /**
+ @brief modify the closure wrapper object
+ */
+ template <typename F>
+ void closure_wrapper(F&& fn) { _closure_wrapper = std::forward<F>(fn); }
+
protected:
/**
@brief chunk size
*/
size_t _chunk_size{0};
+
+ /**
+ @brief closure wrapper
+ */
+ C _closure_wrapper;
};
// ----------------------------------------------------------------------------
// Guided Partitioner
// ----------------------------------------------------------------------------
-
+
/**
@class GuidedPartitioner
+@tparam C closure wrapper type (default tf::DefaultClosureWrapper)
+
@brief class to construct a guided partitioner for scheduling parallel algorithms
The size of a partition is proportional to the number of unassigned iterations
divided by the number of workers,
and the size will gradually decrease to the given chunk size.
The last partition may be smaller than the chunk size.
+
+In addition to partition size, the application can specify a closure wrapper
+for a guided partitioner.
+A closure wrapper allows the application to wrapper a partitioned task
+(i.e., closure) with a custom function object that performs additional tasks.
+For example:
+
+@code{.cpp}
+std::atomic<int> count = 0;
+tf::Taskflow taskflow;
+taskflow.for_each_index(0, 100, 1,
+ [](){
+ printf("%d\n", i);
+ },
+ tf::GuidedPartitioner(0, [](auto&& closure){
+ // do something before invoking the partitioned task
+ // ...
+
+ // invoke the partitioned task
+ closure();
+
+ // do something else after invoking the partitioned task
+ // ...
+ }
+);
+executor.run(taskflow).wait();
+@endcode
*/
-class GuidedPartitioner : public PartitionerBase {
+template <typename C = DefaultClosureWrapper>
+class GuidedPartitioner : public PartitionerBase<C> {
public:
+ /**
+ @brief queries the partition type (dynamic)
+ */
+ static constexpr PartitionerType type() { return PartitionerType::DYNAMIC; }
+
/**
@brief default constructor
*/
- GuidedPartitioner() : PartitionerBase{1} {}
+ GuidedPartitioner() = default;
/**
@brief construct a guided partitioner with the given chunk size
+
*/
- explicit GuidedPartitioner(size_t sz) : PartitionerBase (sz) {}
+ explicit GuidedPartitioner(size_t sz) : PartitionerBase<C> (sz) {}
+
+ /**
+ @brief construct a guided partitioner with the given chunk size and the closure
+ */
+ explicit GuidedPartitioner(size_t sz, C&& closure) :
+ PartitionerBase<C>(sz, std::forward<C>(closure)) {
+ }
// --------------------------------------------------------------------------
// scheduling methods
std::enable_if_t<std::is_invocable_r_v<void, F, size_t, size_t>, void>* = nullptr
>
void loop(
- size_t N,
- size_t W,
- std::atomic<size_t>& next,
- F&& func
+ size_t N, size_t W, std::atomic<size_t>& next, F&& func
) const {
- size_t chunk_size = (_chunk_size == 0) ? size_t{1} : _chunk_size;
+ size_t chunk_size = (this->_chunk_size == 0) ? size_t{1} : this->_chunk_size;
size_t p1 = 2 * W * (chunk_size + 1);
float p2 = 0.5f / static_cast<float>(W);
std::enable_if_t<std::is_invocable_r_v<bool, F, size_t, size_t>, void>* = nullptr
>
void loop_until(
- size_t N,
- size_t W,
- std::atomic<size_t>& next,
- F&& func
+ size_t N, size_t W, std::atomic<size_t>& next, F&& func
) const {
- size_t chunk_size = (_chunk_size == 0) ? size_t{1} : _chunk_size;
+ size_t chunk_size = (this->_chunk_size == 0) ? size_t{1} : this->_chunk_size;
size_t p1 = 2 * W * (chunk_size + 1);
float p2 = 0.5f / static_cast<float>(W);
}
}
}
+
};
// ----------------------------------------------------------------------------
@brief class to construct a dynamic partitioner for scheduling parallel algorithms
+@tparam C closure wrapper type (default tf::DefaultClosureWrapper)
+
The partitioner splits iterations into many partitions each of size equal to
the given chunk size.
Different partitions are distributed dynamically to workers
without any specific order.
+
+In addition to partition size, the application can specify a closure wrapper
+for a dynamic partitioner.
+A closure wrapper allows the application to wrapper a partitioned task
+(i.e., closure) with a custom function object that performs additional tasks.
+For example:
+
+@code{.cpp}
+std::atomic<int> count = 0;
+tf::Taskflow taskflow;
+taskflow.for_each_index(0, 100, 1,
+ [](){
+ printf("%d\n", i);
+ },
+ tf::DynamicPartitioner(0, [](auto&& closure){
+ // do something before invoking the partitioned task
+ // ...
+
+ // invoke the partitioned task
+ closure();
+
+ // do something else after invoking the partitioned task
+ // ...
+ }
+);
+executor.run(taskflow).wait();
+@endcode
*/
-class DynamicPartitioner : public PartitionerBase {
+template <typename C = DefaultClosureWrapper>
+class DynamicPartitioner : public PartitionerBase<C> {
public:
+
+ /**
+ @brief queries the partition type (dynamic)
+ */
+ static constexpr PartitionerType type() { return PartitionerType::DYNAMIC; }
/**
@brief default constructor
*/
- DynamicPartitioner() : PartitionerBase{1} {};
+ DynamicPartitioner() = default;
/**
@brief construct a dynamic partitioner with the given chunk size
*/
- explicit DynamicPartitioner(size_t sz) : PartitionerBase (sz) {}
+ explicit DynamicPartitioner(size_t sz) : PartitionerBase<C>(sz) {}
+
+ /**
+ @brief construct a dynamic partitioner with the given chunk size and the closure
+ */
+ explicit DynamicPartitioner(size_t sz, C&& closure) :
+ PartitionerBase<C>(sz, std::forward<C>(closure)) {
+ }
// --------------------------------------------------------------------------
// scheduling methods
// --------------------------------------------------------------------------
-
+
/**
@private
*/
std::enable_if_t<std::is_invocable_r_v<void, F, size_t, size_t>, void>* = nullptr
>
void loop(
- size_t N,
- size_t,
- std::atomic<size_t>& next,
- F&& func
+ size_t N, size_t, std::atomic<size_t>& next, F&& func
) const {
- size_t chunk_size = (_chunk_size == 0) ? size_t{1} : _chunk_size;
+ size_t chunk_size = (this->_chunk_size == 0) ? size_t{1} : this->_chunk_size;
size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
while(curr_b < N) {
std::enable_if_t<std::is_invocable_r_v<bool, F, size_t, size_t>, void>* = nullptr
>
void loop_until(
- size_t N,
- size_t,
- std::atomic<size_t>& next,
- F&& func
+ size_t N, size_t, std::atomic<size_t>& next, F&& func
) const {
- size_t chunk_size = (_chunk_size == 0) ? size_t{1} : _chunk_size;
+ size_t chunk_size = (this->_chunk_size == 0) ? size_t{1} : this->_chunk_size;
size_t curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
while(curr_b < N) {
curr_b = next.fetch_add(chunk_size, std::memory_order_relaxed);
}
}
+
};
// ----------------------------------------------------------------------------
/**
@class StaticPartitioner
-@brief class to construct a dynamic partitioner for scheduling parallel algorithms
+@brief class to construct a static partitioner for scheduling parallel algorithms
+
+@tparam C closure wrapper type (default tf::DefaultClosureWrapper)
The partitioner divides iterations into chunks and distributes chunks
to workers in order.
);
executor.run(taskflow).run();
@endcode
+
+In addition to partition size, the application can specify a closure wrapper
+for a static partitioner.
+A closure wrapper allows the application to wrapper a partitioned task
+(i.e., closure) with a custom function object that performs additional tasks.
+For example:
+
+@code{.cpp}
+std::atomic<int> count = 0;
+tf::Taskflow taskflow;
+taskflow.for_each_index(0, 100, 1,
+ [](){
+ printf("%d\n", i);
+ },
+ tf::StaticPartitioner(0, [](auto&& closure){
+ // do something before invoking the partitioned task
+ // ...
+
+ // invoke the partitioned task
+ closure();
+
+ // do something else after invoking the partitioned task
+ // ...
+ }
+);
+executor.run(taskflow).wait();
+@endcode
*/
-class StaticPartitioner : public PartitionerBase {
+template <typename C = DefaultClosureWrapper>
+class StaticPartitioner : public PartitionerBase<C> {
public:
+
+ /**
+ @brief queries the partition type (static)
+ */
+ static constexpr PartitionerType type() { return PartitionerType::STATIC; }
/**
@brief default constructor
*/
- StaticPartitioner() : PartitionerBase{0} {};
+ StaticPartitioner() = default;
/**
- @brief construct a dynamic partitioner with the given chunk size
+ @brief construct a static partitioner with the given chunk size
*/
- explicit StaticPartitioner(size_t sz) : PartitionerBase(sz) {}
+ explicit StaticPartitioner(size_t sz) : PartitionerBase<C>(sz) {}
+
+ /**
+ @brief construct a static partitioner with the given chunk size and the closure
+ */
+ explicit StaticPartitioner(size_t sz, C&& closure) :
+ PartitionerBase<C>(sz, std::forward<C>(closure)) {
+ }
/**
@brief queries the adjusted chunk size
@c W is the number of workers, and @c w is the worker ID.
*/
size_t adjusted_chunk_size(size_t N, size_t W, size_t w) const {
- return _chunk_size ? _chunk_size : N/W + (w < N%W);
+ return this->_chunk_size ? this->_chunk_size : N/W + (w < N%W);
}
// --------------------------------------------------------------------------
// scheduling methods
// --------------------------------------------------------------------------
-
+
/**
@private
*/
std::enable_if_t<std::is_invocable_r_v<void, F, size_t, size_t>, void>* = nullptr
>
void loop(
- size_t N,
- size_t W,
- size_t curr_b,
- size_t chunk_size,
- F&& func
+ size_t N, size_t W, size_t curr_b, size_t chunk_size, F&& func
) {
size_t stride = W * chunk_size;
while(curr_b < N) {
std::enable_if_t<std::is_invocable_r_v<bool, F, size_t, size_t>, void>* = nullptr
>
void loop_until(
- size_t N,
- size_t W,
- size_t curr_b,
- size_t chunk_size,
- F&& func
+ size_t N, size_t W, size_t curr_b, size_t chunk_size, F&& func
) {
size_t stride = W * chunk_size;
while(curr_b < N) {
@brief class to construct a random partitioner for scheduling parallel algorithms
+@tparam C closure wrapper type (default tf::DefaultClosureWrapper)
+
Similar to tf::DynamicPartitioner,
the partitioner splits iterations into many partitions but each with a random
chunk size in the range, <tt>c = [alpha * N * W, beta * N * W]</tt>.
By default, @c alpha is <tt>0.01</tt> and @c beta is <tt>0.5</tt>, respectively.
+In addition to partition size, the application can specify a closure wrapper
+for a random partitioner.
+A closure wrapper allows the application to wrapper a partitioned task
+(i.e., closure) with a custom function object that performs additional tasks.
+For example:
+
+@code{.cpp}
+std::atomic<int> count = 0;
+tf::Taskflow taskflow;
+taskflow.for_each_index(0, 100, 1,
+ [](){
+ printf("%d\n", i);
+ },
+ tf::RandomPartitioner(0, [](auto&& closure){
+ // do something before invoking the partitioned task
+ // ...
+
+ // invoke the partitioned task
+ closure();
+
+ // do something else after invoking the partitioned task
+ // ...
+ }
+);
+executor.run(taskflow).wait();
+@endcode
*/
-class RandomPartitioner : public PartitionerBase {
+template <typename C = DefaultClosureWrapper>
+class RandomPartitioner : public PartitionerBase<C> {
public:
+
+ /**
+ @brief queries the partition type (dynamic)
+ */
+ static constexpr PartitionerType type() { return PartitionerType::DYNAMIC; }
/**
@brief default constructor
RandomPartitioner() = default;
/**
- @brief constructs a random partitioner
+ @brief construct a dynamic partitioner with the given chunk size
*/
- RandomPartitioner(size_t cz) : PartitionerBase(cz) {}
+ explicit RandomPartitioner(size_t sz) : PartitionerBase<C>(sz) {}
+
+ /**
+ @brief construct a random partitioner with the given chunk size and the closure
+ */
+ explicit RandomPartitioner(size_t sz, C&& closure) :
+ PartitionerBase<C>(sz, std::forward<C>(closure)) {
+ }
/**
@brief constructs a random partitioner with the given parameters
*/
- RandomPartitioner(float alpha, float beta) : _alpha {alpha}, _beta {beta} {}
+ RandomPartitioner(float alpha, float beta) : _alpha{alpha}, _beta{beta} {}
+
+ /**
+ @brief constructs a random partitioner with the given parameters and the closure
+ */
+ RandomPartitioner(float alpha, float beta, C&& closure) :
+ _alpha {alpha}, _beta {beta},
+ PartitionerBase<C>(0, std::forward<C>(closure)) {
+ }
/**
@brief queries the @c alpha value
std::enable_if_t<std::is_invocable_r_v<void, F, size_t, size_t>, void>* = nullptr
>
void loop(
- size_t N,
- size_t W,
- std::atomic<size_t>& next,
- F&& func
+ size_t N, size_t W, std::atomic<size_t>& next, F&& func
) const {
auto [b1, b2] = chunk_size_range(N, W);
std::enable_if_t<std::is_invocable_r_v<bool, F, size_t, size_t>, void>* = nullptr
>
void loop_until(
- size_t N,
- size_t W,
- std::atomic<size_t>& next,
- F&& func
+ size_t N, size_t W, std::atomic<size_t>& next, F&& func
) const {
auto [b1, b2] = chunk_size_range(N, W);
float _alpha {0.01f};
float _beta {0.5f};
-
};
/**
Guided partitioner can achieve decent performance for most parallel algorithms,
especially for those with irregular and unbalanced workload per iteration.
*/
-using DefaultPartitioner = GuidedPartitioner;
+using DefaultPartitioner = GuidedPartitioner<>;
/**
@brief determines if a type is a partitioner
A partitioner is a derived type from tf::PartitionerBase.
*/
-template <typename C>
-inline constexpr bool is_partitioner_v = std::is_base_of<PartitionerBase, C>::value;
+template <typename P>
+inline constexpr bool is_partitioner_v = std::is_base_of<IsPartitioner, P>::value;
} // end of namespace tf -----------------------------------------------------
std::atomic<size_t> join_counter;
};
+
public:
/**
_build();
}
+/*
// move constructor
template <typename P>
ScalablePipeline<P>::ScalablePipeline(ScalablePipeline&& rhs) :
rhs._longest_deferral = 0;
rhs._num_tokens = 0;
+ std::cout << "scalable move constructor\n";
+}
+*/
+
+// move constructor
+template <typename P>
+ScalablePipeline<P>::ScalablePipeline(ScalablePipeline&& rhs):
+ _num_tokens {rhs._num_tokens},
+ _pipes {std::move(rhs._pipes)},
+ _pipeflows {std::move(rhs._pipeflows)},
+ _lines {std::move(rhs._lines)},
+ _ready_tokens {std::move(rhs._ready_tokens)},
+ _token_dependencies {std::move(rhs._token_dependencies)},
+ _deferred_tokens {std::move(rhs._deferred_tokens)},
+ _longest_deferral {rhs._longest_deferral}{
+
+
+ //_num_tokens = rhs._num_tokens;
+
+ //_pipes.resize(rhs.num_pipes());
+ //size_t i=0;
+ //for(auto itr = rhs._pipes.begin(); itr != rhs._pipes.end(); itr++) {
+ // _pipes[i++] = *itr;
+ //}
+
+
+ //_pipeflows.resize(rhs.num_lines());
+ //for(size_t l = 0; l<rhs.num_lines(); l++) {
+ // _pipeflows[l]._pipe = rhs._pipeflows[l]._pipe;
+ // _pipeflows[l]._line = rhs._pipeflows[l]._line;
+ // _pipeflows[l]._num_deferrals = 0;
+ // _pipeflows[l]._dependents.clear();
+ //}
+
+ //_lines = std::make_unique<Line[]>(rhs.num_lines() * rhs._pipes.size());
+ //for(size_t l=0; l<num_lines(); l++) {
+ // for(size_t f=0; f<num_pipes(); f++) {
+ // _line(l, f).join_counter.store(
+ // rhs._line(l, f).join_counter, std::memory_order_relaxed
+ // );
+ // }
+ //}
+
+ //_ready_tokens = std::move(rhs._ready_tokens);
+ //_token_dependencies = std::move(rhs._token_dependencies);
+ //_deferred_tokens = std::move(rhs._deferred_tokens);
+
+ _graph.clear();
+ _tasks.resize(_pipeflows.size()+1);
+ rhs._longest_deferral = 0;
+ rhs._num_tokens = 0;
+ rhs._tasks.clear();
+ _build();
}
+//// move assignment operator
+//template <typename P>
+//ScalablePipeline<P>& ScalablePipeline<P>::operator = (ScalablePipeline&& rhs) {
+// _graph = std::move(rhs._graph);
+// _num_tokens = rhs._num_tokens;
+// _pipes = std::move(rhs._pipes);
+// _tasks = std::move(rhs._tasks);
+// _pipeflows = std::move(rhs._pipeflows);
+// _lines = std::move(rhs._lines);
+// rhs._num_tokens = 0;
+// _ready_tokens = std::move(rhs._ready_tokens);
+// _token_dependencies = std::move(rhs._token_dependencies);
+// _deferred_tokens = std::move(rhs._deferred_tokens);
+// _longest_deferral = rhs._longest_deferral;
+// rhs._longest_deferral = 0;
+// std::cout << "scalable move assignment\n";
+// return *this;
+//}
+
// move assignment operator
template <typename P>
ScalablePipeline<P>& ScalablePipeline<P>::operator = (ScalablePipeline&& rhs) {
- _graph = std::move(rhs._graph);
- _num_tokens = rhs._num_tokens;
- _pipes = std::move(rhs._pipes);
- _tasks = std::move(rhs._tasks);
- _pipeflows = std::move(rhs._pipeflows);
- _lines = std::move(rhs._lines);
- rhs._num_tokens = 0;
- _ready_tokens = std::move(rhs._ready_tokens);
- _token_dependencies = std::move(rhs._token_dependencies);
- _deferred_tokens = std::move(rhs._deferred_tokens);
- _longest_deferral = rhs._longest_deferral;
+ _num_tokens = rhs._num_tokens;
+ _pipes = std::move(rhs._pipes);
+ _pipeflows = std::move(rhs._pipeflows);
+ _lines = std::move(rhs._lines);
+ _ready_tokens = std::move(rhs._ready_tokens);
+ _token_dependencies = std::move(rhs._token_dependencies);
+ _deferred_tokens = std::move(rhs._deferred_tokens);
+ _longest_deferral = rhs._longest_deferral;
+
+ _graph.clear();
+ _tasks.resize(_pipeflows.size()+1);
+
rhs._longest_deferral = 0;
+ rhs._num_tokens = 0;
+ rhs._tasks.clear();
+ _build();
return *this;
}
using namespace std::literals::string_literals;
FlowBuilder fb(_graph);
-
+
// init task
_tasks[0] = fb.emplace([this]() {
return static_cast<int>(_num_tokens % num_lines());
// line task
for(size_t l = 0; l < num_lines(); l++) {
-
+
_tasks[l + 1] = fb.emplace([this, l] (tf::Runtime& rt) mutable {
auto pf = &_pipeflows[l];
else {
_on_pipe(*pf, rt);
}
-
+
size_t c_f = pf->_pipe;
size_t n_f = (pf->_pipe + 1) % num_pipes();
size_t n_l = (pf->_line + 1) % num_lines();
-
+
pf->_pipe = n_f;
// ---- scheduling starts here ----
--- /dev/null
+#pragma once
+
+#include "launch.hpp"
+
+namespace tf {
+
+// Function: make_reduce_task
+template <typename B, typename E, typename T, typename O, typename P = DefaultPartitioner>
+auto make_reduce_task(B b, E e, T& init, O bop, P part = P()) {
+
+ using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
+ using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
+
+ return [=, &r=init] (Runtime& rt) mutable {
+
+ // fetch the iterator values
+ B_t beg = b;
+ E_t end = e;
+
+ size_t W = rt.executor().num_workers();
+ size_t N = std::distance(beg, end);
+
+ // only myself - no need to spawn another graph
+ if(W <= 1 || N <= part.chunk_size()) {
+ launch_loop(part, [&](){
+ for(; beg!=end; r = bop(r, *beg++));
+ });
+ return;
+ }
+
+ if(N < W) {
+ W = N;
+ }
+
+ std::mutex mtx;
+
+ // static partitioner
+ if constexpr(part.type() == PartitionerType::STATIC) {
+
+ size_t chunk_size;
+
+ for(size_t w=0, curr_b=0; w<W && curr_b < N; ++w, curr_b += chunk_size) {
+
+ // we force chunk size to be at least two because the temporary
+ // variable sum need to avoid copy at the first step
+ chunk_size = std::max(size_t{2}, part.adjusted_chunk_size(N, W, w));
+
+ launch_loop(W, w, rt, part, [=, &bop, &mtx, &r, &part] () mutable {
+
+ std::advance(beg, curr_b);
+
+ if(N - curr_b == 1) {
+ std::lock_guard<std::mutex> lock(mtx);
+ r = bop(r, *beg);
+ return;
+ }
+
+ auto beg1 = beg++;
+ auto beg2 = beg++;
+ T sum = bop(*beg1, *beg2);
+
+ // loop reduce
+ part.loop(N, W, curr_b, chunk_size,
+ [&, prev_e=curr_b+2](size_t part_b, size_t part_e) mutable {
+
+ if(part_b > prev_e) {
+ std::advance(beg, part_b - prev_e);
+ }
+ else {
+ part_b = prev_e;
+ }
+
+ for(size_t x=part_b; x<part_e; x++, beg++) {
+ sum = bop(sum, *beg);
+ }
+ prev_e = part_e;
+ }
+ );
+
+ // final reduce
+ std::lock_guard<std::mutex> lock(mtx);
+ r = bop(r, sum);
+ });
+ }
+ rt.corun_all();
+ }
+ // dynamic partitioner
+ else {
+ std::atomic<size_t> next(0);
+ launch_loop(N, W, rt, next, part, [=, &bop, &mtx, &next, &r, &part] () mutable {
+ // pre-reduce
+ size_t s0 = next.fetch_add(2, std::memory_order_relaxed);
+
+ if(s0 >= N) {
+ return;
+ }
+
+ std::advance(beg, s0);
+
+ if(N - s0 == 1) {
+ std::lock_guard<std::mutex> lock(mtx);
+ r = bop(r, *beg);
+ return;
+ }
+
+ auto beg1 = beg++;
+ auto beg2 = beg++;
+
+ T sum = bop(*beg1, *beg2);
+
+ // loop reduce
+ part.loop(N, W, next,
+ [&, prev_e=s0+2](size_t curr_b, size_t curr_e) mutable {
+ std::advance(beg, curr_b - prev_e);
+ for(size_t x=curr_b; x<curr_e; x++, beg++) {
+ sum = bop(sum, *beg);
+ }
+ prev_e = curr_e;
+ }
+ );
+
+ // final reduce
+ std::lock_guard<std::mutex> lock(mtx);
+ r = bop(r, sum);
+ });
+ }
+ };
+}
+
+// Function: make_transform_reduce_task
+template <
+ typename B, typename E, typename T, typename BOP, typename UOP,
+ typename P = DefaultPartitioner
+>
+auto make_transform_reduce_task(B b, E e, T& init, BOP bop, UOP uop, P part = P()) {
+
+ using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
+ using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
+
+ return [=, &r=init] (Runtime& rt) mutable {
+
+ // fetch the iterator values
+ B_t beg = b;
+ E_t end = e;
+
+ size_t W = rt.executor().num_workers();
+ size_t N = std::distance(beg, end);
+
+ // only myself - no need to spawn another graph
+ if(W <= 1 || N <= part.chunk_size()) {
+ launch_loop(part, [&](){
+ for(; beg!=end; r = bop(std::move(r), uop(*beg++)));
+ });
+ return;
+ }
+
+ if(N < W) {
+ W = N;
+ }
+
+ std::mutex mtx;
+
+ // static partitioner
+ if constexpr(part.type() == PartitionerType::STATIC) {
+
+ size_t chunk_size;
+
+ for(size_t w=0, curr_b=0; w<W && curr_b < N; ++w, curr_b += chunk_size) {
+
+ chunk_size = part.adjusted_chunk_size(N, W, w);
+
+ launch_loop(W, w, rt, part, [=, &bop, &uop, &mtx, &r, &part] () mutable {
+ std::advance(beg, curr_b);
+
+ if(N - curr_b == 1) {
+ std::lock_guard<std::mutex> lock(mtx);
+ r = bop(std::move(r), uop(*beg));
+ return;
+ }
+
+ //auto beg1 = beg++;
+ //auto beg2 = beg++;
+ //T sum = bop(uop(*beg1), uop(*beg2));
+
+ T sum = (chunk_size == 1) ? uop(*beg++) : bop(uop(*beg++), uop(*beg++));
+
+ // loop reduce
+ part.loop(N, W, curr_b, chunk_size,
+ [&, prev_e=curr_b+(chunk_size == 1 ? 1 : 2)]
+ (size_t part_b, size_t part_e) mutable {
+ if(part_b > prev_e) {
+ std::advance(beg, part_b - prev_e);
+ }
+ else {
+ part_b = prev_e;
+ }
+ for(size_t x=part_b; x<part_e; x++, beg++) {
+ sum = bop(std::move(sum), uop(*beg));
+ }
+ prev_e = part_e;
+ }
+ );
+
+ // final reduce
+ std::lock_guard<std::mutex> lock(mtx);
+ r = bop(std::move(r), std::move(sum));
+ });
+ }
+
+ rt.corun_all();
+ }
+ // dynamic partitioner
+ else {
+ std::atomic<size_t> next(0);
+
+ launch_loop(N, W, rt, next, part, [=, &bop, &uop, &mtx, &next, &r, &part] () mutable {
+ // pre-reduce
+ size_t s0 = next.fetch_add(2, std::memory_order_relaxed);
+
+ if(s0 >= N) {
+ return;
+ }
+
+ std::advance(beg, s0);
+
+ if(N - s0 == 1) {
+ std::lock_guard<std::mutex> lock(mtx);
+ r = bop(std::move(r), uop(*beg));
+ return;
+ }
+
+ auto beg1 = beg++;
+ auto beg2 = beg++;
+
+ T sum = bop(uop(*beg1), uop(*beg2));
+
+ // loop reduce
+ part.loop(N, W, next,
+ [&, prev_e=s0+2](size_t curr_b, size_t curr_e) mutable {
+ std::advance(beg, curr_b - prev_e);
+ for(size_t x=curr_b; x<curr_e; x++, beg++) {
+ sum = bop(std::move(sum), uop(*beg));
+ }
+ prev_e = curr_e;
+ }
+ );
+
+ // final reduce
+ std::lock_guard<std::mutex> lock(mtx);
+ r = bop(std::move(r), std::move(sum));
+ });
+ }
+ };
+}
+
+// Function: make_transform_reduce_task with two binary operation
+template <
+ typename B1, typename E1, typename B2, typename T, typename BOP_R, typename BOP_T,
+ typename P = DefaultPartitioner,
+ std::enable_if_t<!is_partitioner_v<std::decay_t<BOP_T>>, void>* = nullptr
+>
+auto make_transform_reduce_task(
+ B1 b1, E1 e1, B2 b2, T& init, BOP_R bop_r, BOP_T bop_t, P part = P()
+) {
+
+ using B1_t = std::decay_t<unwrap_ref_decay_t<B1>>;
+ using E1_t = std::decay_t<unwrap_ref_decay_t<E1>>;
+ using B2_t = std::decay_t<unwrap_ref_decay_t<B2>>;
+
+ return [=, &r=init] (Runtime& rt) mutable {
+
+ // fetch the iterator values
+ B1_t beg1 = b1;
+ E1_t end1 = e1;
+ B2_t beg2 = b2;
+
+ size_t W = rt.executor().num_workers();
+ size_t N = std::distance(beg1, end1);
+
+ // only myself - no need to spawn another graph
+ if(W <= 1 || N <= part.chunk_size()) {
+ launch_loop(part, [&](){
+ for(; beg1!=end1; r = bop_r(std::move(r), bop_t(*beg1++, *beg2++)));
+ });
+ return;
+ }
+
+ if(N < W) {
+ W = N;
+ }
+
+ std::mutex mtx;
+
+ // static partitioner
+ if constexpr(part.type() == PartitionerType::STATIC) {
+
+ size_t chunk_size;
+
+ for(size_t w=0, curr_b=0; w<W && curr_b < N; ++w, curr_b += chunk_size) {
+
+ chunk_size = part.adjusted_chunk_size(N, W, w);
+
+ launch_loop(W, w, rt, part, [=, &bop_r, &bop_t, &mtx, &r, &part] () mutable {
+ std::advance(beg1, curr_b);
+ std::advance(beg2, curr_b);
+
+ if(N - curr_b == 1) {
+ std::lock_guard<std::mutex> lock(mtx);
+ r = bop_r(std::move(r), bop_t(*beg1, *beg2));
+ return;
+ }
+
+ T sum = (chunk_size == 1) ? bop_t(*beg1++, *beg2++) :
+ bop_r(bop_t(*beg1++, *beg2++), bop_t(*beg1++, *beg2++));
+
+ // loop reduce
+ part.loop(N, W, curr_b, chunk_size,
+ [&, prev_e=curr_b+(chunk_size == 1 ? 1 : 2)]
+ (size_t part_b, size_t part_e) mutable {
+ if(part_b > prev_e) {
+ std::advance(beg1, part_b - prev_e);
+ std::advance(beg2, part_b - prev_e);
+ }
+ else {
+ part_b = prev_e;
+ }
+ for(size_t x=part_b; x<part_e; x++, beg1++, beg2++) {
+ sum = bop_r(std::move(sum), bop_t(*beg1, *beg2));
+ }
+ prev_e = part_e;
+ }
+ );
+
+ // final reduce
+ std::lock_guard<std::mutex> lock(mtx);
+ r = bop_r(std::move(r), std::move(sum));
+ });
+ }
+
+ rt.corun_all();
+ }
+ // dynamic partitioner
+ else {
+ std::atomic<size_t> next(0);
+
+ launch_loop(N, W, rt, next, part, [=, &bop_r, &bop_t, &mtx, &next, &r, &part] () mutable {
+ // pre-reduce
+ size_t s0 = next.fetch_add(2, std::memory_order_relaxed);
+
+ if(s0 >= N) {
+ return;
+ }
+
+ std::advance(beg1, s0);
+ std::advance(beg2, s0);
+
+ if(N - s0 == 1) {
+ std::lock_guard<std::mutex> lock(mtx);
+ r = bop_r(std::move(r), bop_t(*beg1, *beg2));
+ return;
+ }
+
+ auto beg11 = beg1++;
+ auto beg12 = beg1++;
+ auto beg21 = beg2++;
+ auto beg22 = beg2++;
+
+ T sum = bop_r(bop_t(*beg11, *beg21), bop_t(*beg12, *beg22));
+
+ // loop reduce
+ part.loop(N, W, next,
+ [&, prev_e=s0+2](size_t curr_b, size_t curr_e) mutable {
+ std::advance(beg1, curr_b - prev_e);
+ std::advance(beg2, curr_b - prev_e);
+ for(size_t x=curr_b; x<curr_e; x++, beg1++, beg2++) {
+ sum = bop_r(std::move(sum), bop_t(*beg1, *beg2));
+ }
+ prev_e = curr_e;
+ }
+ );
+
+ // final reduce
+ std::lock_guard<std::mutex> lock(mtx);
+ r = bop_r(std::move(r), std::move(sum));
+ });
+ }
+ };
+}
+
+// ----------------------------------------------------------------------------
+// default reduction
+// ----------------------------------------------------------------------------
+
+// Function: reduce
+template <typename B, typename E, typename T, typename O, typename P>
+Task FlowBuilder::reduce(B beg, E end, T& init, O bop, P part) {
+ return emplace(make_reduce_task(beg, end, init, bop, part));
+}
+
+// ----------------------------------------------------------------------------
+// default transform and reduction
+// ----------------------------------------------------------------------------
+
+// Function: transform_reduce
+template <typename B, typename E, typename T, typename BOP, typename UOP, typename P,
+ std::enable_if_t<is_partitioner_v<std::decay_t<P>>, void>*
+>
+Task FlowBuilder::transform_reduce(
+ B beg, E end, T& init, BOP bop, UOP uop, P part
+) {
+ return emplace(make_transform_reduce_task(
+ beg, end, init, bop, uop, part
+ ));
+}
+
+// Function: transform_reduce
+template <
+ typename B1, typename E1, typename B2, typename T, typename BOP_R, typename BOP_T,
+ typename P,
+ std::enable_if_t<!is_partitioner_v<std::decay_t<BOP_T>>, void>*
+>
+Task FlowBuilder::transform_reduce(
+ B1 beg1, E1 end1, B2 beg2, T& init, BOP_R bop_r, BOP_T bop_t, P part
+) {
+ return emplace(make_transform_reduce_task(
+ beg1, end1, beg2, init, bop_r, bop_t, part
+ ));
+}
+
+} // end of namespace tf -----------------------------------------------------
+
+
+
+
// Function: scan_loop
template <typename Iterator, typename BufferT, typename B>
-TF_FORCE_INLINE void scan_loop(
+void scan_loop(
tf::Runtime& rt,
std::atomic<size_t>& counter,
BufferT& buf,
- B&& bop,
+ B bop,
Iterator d_beg,
size_t W,
size_t w,
}
}
+} // end of namespace tf::detail ---------------------------------------------
+
+
// Function: make_inclusive_scan_task
-template <typename B, typename E, typename D, typename BOP>
-TF_FORCE_INLINE auto make_inclusive_scan_task(B first, E last, D d_first, BOP bop) {
+template <typename B, typename E, typename D, typename BOP, typename P = DefaultPartitioner,
+ std::enable_if_t<is_partitioner_v<std::decay_t<P>>, void>* = nullptr
+>
+auto make_inclusive_scan_task(
+ B first, E last, D d_first, BOP bop, P part = P()
+) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using D_t = std::decay_t<unwrap_ref_decay_t<D>>;
using value_type = typename std::iterator_traits<B_t>::value_type;
- using namespace std::string_literals;
return [=] (Runtime& rt) mutable {
// only myself - no need to spawn another graph
if(W <= 1 || N <= 2) {
- std::inclusive_scan(s_beg, s_end, d_beg, bop);
+ launch_loop(part, [&](){
+ std::inclusive_scan(s_beg, s_end, d_beg, bop);
+ });
return;
}
chunk_size = std::min(Q + (w < R), N - curr_b);
// block scan
- launch_loop(W, w, rt, [=, &rt, &bop, &buf, &counter] () mutable {
-
+ launch_loop(W, w, rt, part, [=, &rt, &bop, &buf, &counter] () mutable {
auto result = d_beg;
// local scan per worker
curr_b += chunk_size;
}
- rt.join();
+ rt.corun_all();
};
}
// Function: make_inclusive_scan_task
-template <typename B, typename E, typename D, typename BOP, typename T>
-TF_FORCE_INLINE auto make_inclusive_scan_task(B first, E last, D d_first, BOP bop, T init) {
+template <typename B, typename E, typename D, typename BOP, typename T, typename P = DefaultPartitioner,
+ std::enable_if_t<!is_partitioner_v<std::decay_t<T>>, void>* = nullptr
+>
+auto make_inclusive_scan_task(
+ B first, E last, D d_first, BOP bop, T init, P part = P()
+) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using D_t = std::decay_t<unwrap_ref_decay_t<D>>;
using value_type = typename std::iterator_traits<B_t>::value_type;
- using namespace std::string_literals;
return [=] (Runtime& rt) mutable {
// only myself - no need to spawn another graph
if(W <= 1 || N <= 2) {
- std::inclusive_scan(s_beg, s_end, d_beg, bop, init);
+ launch_loop(part, [&](){
+ std::inclusive_scan(s_beg, s_end, d_beg, bop, init);
+ });
return;
}
chunk_size = std::min(Q + (w < R), N - curr_b);
// block scan
- launch_loop(W, w, rt, [=, &rt, &bop, &buf, &counter] () mutable {
-
+ launch_loop(W, w, rt, part, [=, &rt, &bop, &buf, &counter] () mutable {
auto result = d_beg;
// local scan per worker
- auto& init = buf[w].data;
- *d_beg++ = init = (w == 0) ? bop(init, *s_beg++) : *s_beg++;
+ auto& local = buf[w].data;
+ *d_beg++ = local = (w == 0) ? bop(local, *s_beg++) : *s_beg++;
for(size_t i=1; i<chunk_size; i++){
- *d_beg++ = init = bop(init, *s_beg++);
+ *d_beg++ = local = bop(local, *s_beg++);
}
// block scan
curr_b += chunk_size;
}
- rt.join();
+ rt.corun_all();
};
}
// ----------------------------------------------------------------------------
// Function: transform_inclusive_scan
-template <typename B, typename E, typename D, typename BOP, typename UOP>
-TF_FORCE_INLINE auto make_transform_inclusive_scan_task(
- B first, E last, D d_first, BOP bop, UOP uop
+template <typename B, typename E, typename D, typename BOP, typename UOP, typename P = DefaultPartitioner,
+ std::enable_if_t<is_partitioner_v<std::decay_t<P>>, void>* = nullptr
+>
+auto make_transform_inclusive_scan_task(
+ B first, E last, D d_first, BOP bop, UOP uop, P part = P()
) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using D_t = std::decay_t<unwrap_ref_decay_t<D>>;
using value_type = typename std::iterator_traits<B_t>::value_type;
- using namespace std::string_literals;
return [=] (Runtime& rt) mutable {
// only myself - no need to spawn another graph
if(W <= 1 || N <= 2) {
- std::transform_inclusive_scan(s_beg, s_end, d_beg, bop, uop);
+ launch_loop(part, [&](){
+ std::transform_inclusive_scan(s_beg, s_end, d_beg, bop, uop);
+ });
return;
}
chunk_size = std::min(Q + (w < R), N - curr_b);
// block scan
- launch_loop(W, w, rt, [=, &rt, &bop, &uop, &buf, &counter] () mutable {
-
+ launch_loop(W, w, rt, part, [=, &rt, &bop, &uop, &buf, &counter] () mutable {
auto result = d_beg;
// local scan per worker
curr_b += chunk_size;
}
- rt.join();
+ rt.corun_all();
};
}
// Function: transform_inclusive_scan
-template <typename B, typename E, typename D, typename BOP, typename UOP, typename T>
-TF_FORCE_INLINE auto make_transform_inclusive_scan_task(
- B first, E last, D d_first, BOP bop, UOP uop, T init
+template <typename B, typename E, typename D, typename BOP, typename UOP, typename T, typename P = DefaultPartitioner,
+ std::enable_if_t<!is_partitioner_v<std::decay_t<T>>, void>* = nullptr
+>
+auto make_transform_inclusive_scan_task(
+ B first, E last, D d_first, BOP bop, UOP uop, T init, P part = P()
) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using D_t = std::decay_t<unwrap_ref_decay_t<D>>;
using value_type = typename std::iterator_traits<B_t>::value_type;
- using namespace std::string_literals;
return [=] (Runtime& rt) mutable {
// only myself - no need to spawn another graph
if(W <= 1 || N <= 2) {
- std::transform_inclusive_scan(s_beg, s_end, d_beg, bop, uop, init);
+ launch_loop(part, [&](){
+ std::transform_inclusive_scan(s_beg, s_end, d_beg, bop, uop, init);
+ });
return;
}
chunk_size = std::min(Q + (w < R), N - curr_b);
// block scan
- launch_loop(W, w, rt, [=, &rt, &bop, &uop, &buf, &counter] () mutable {
-
+ launch_loop(W, w, rt, part, [=, &rt, &bop, &uop, &buf, &counter] () mutable {
auto result = d_beg;
// local scan per worker
- auto& init = buf[w].data;
- *d_beg++ = init = (w == 0) ? bop(init, uop(*s_beg++)) : uop(*s_beg++);
+ auto& local = buf[w].data;
+ *d_beg++ = local = (w == 0) ? bop(local, uop(*s_beg++)) : uop(*s_beg++);
for(size_t i=1; i<chunk_size; i++){
- *d_beg++ = init = bop(init, uop(*s_beg++));
+ *d_beg++ = local = bop(local, uop(*s_beg++));
}
// block scan
curr_b += chunk_size;
}
- rt.join();
+ rt.corun_all();
};
}
// ----------------------------------------------------------------------------
// Function: make_exclusive_scan_task
-template <typename B, typename E, typename D, typename T, typename BOP>
-TF_FORCE_INLINE auto make_exclusive_scan_task(
- B first, E last, D d_first, T init, BOP bop
+template <typename B, typename E, typename D, typename T, typename BOP, typename P = DefaultPartitioner>
+auto make_exclusive_scan_task(
+ B first, E last, D d_first, T init, BOP bop, P part = P()
) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using D_t = std::decay_t<unwrap_ref_decay_t<D>>;
using value_type = typename std::iterator_traits<B_t>::value_type;
- using namespace std::string_literals;
return [=] (Runtime& rt) mutable {
// only myself - no need to spawn another graph
if(W <= 1 || N <= 2) {
- std::exclusive_scan(s_beg, s_end, d_beg, init, bop);
+ launch_loop(part, [&](){
+ std::exclusive_scan(s_beg, s_end, d_beg, init, bop);
+ });
return;
}
chunk_size = std::min(Q + (w < R), N - curr_b);
// block scan
- launch_loop(W, w, rt, [=, &rt, &bop, &buf, &counter] () mutable {
-
+ launch_loop(W, w, rt, part, [=, &rt, &bop, &buf, &counter] () mutable {
auto result = d_beg;
// local scan per worker
- auto& init = buf[w].data;
+ auto& local = buf[w].data;
for(size_t i=1; i<chunk_size; i++) {
- auto v = init;
- init = bop(init, *s_beg++);
+ auto v = local;
+ local = bop(local, *s_beg++);
*d_beg++ = std::move(v);
}
- *d_beg++ = init;
+ *d_beg++ = local;
// block scan
detail::scan_loop(rt, counter, buf, bop, result, W, w, chunk_size);
curr_b += chunk_size;
}
- rt.join();
+ rt.corun_all();
};
}
// ----------------------------------------------------------------------------
// Function:
-template <typename B, typename E, typename D, typename T, typename BOP, typename UOP>
-TF_FORCE_INLINE auto make_transform_exclusive_scan_task(
- B first, E last, D d_first, T init, BOP bop, UOP uop
+template <typename B, typename E, typename D, typename T, typename BOP, typename UOP, typename P = DefaultPartitioner>
+auto make_transform_exclusive_scan_task(
+ B first, E last, D d_first, T init, BOP bop, UOP uop, P part = P()
) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using D_t = std::decay_t<unwrap_ref_decay_t<D>>;
using value_type = typename std::iterator_traits<B_t>::value_type;
- using namespace std::string_literals;
return [=] (Runtime& rt) mutable {
// only myself - no need to spawn another graph
if(W <= 1 || N <= 2) {
- std::transform_exclusive_scan(s_beg, s_end, d_beg, init, bop, uop);
+ launch_loop(part, [&](){
+ std::transform_exclusive_scan(s_beg, s_end, d_beg, init, bop, uop);
+ });
return;
}
chunk_size = std::min(Q + (w < R), N - curr_b);
// block scan
- launch_loop(W, w, rt, [=, &rt, &bop, &uop, &buf, &counter] () mutable {
-
+ launch_loop(W, w, rt, part, [=, &rt, &bop, &uop, &buf, &counter] () mutable {
auto result = d_beg;
// local scan per worker
- auto& init = buf[w].data;
+ auto& local = buf[w].data;
for(size_t i=1; i<chunk_size; i++) {
- auto v = init;
- init = bop(init, uop(*s_beg++));
+ auto v = local;
+ local = bop(local, uop(*s_beg++));
*d_beg++ = std::move(v);
}
- *d_beg++ = init;
+ *d_beg++ = local;
// block scan
detail::scan_loop(rt, counter, buf, bop, result, W, w, chunk_size);
curr_b += chunk_size;
}
- rt.join();
+ rt.corun_all();
};
}
-} // end of namespace tf::detail ---------------------------------------------
// ----------------------------------------------------------------------------
// Inclusive Scan
// ----------------------------------------------------------------------------
// Function: inclusive_scan
-template <typename B, typename E, typename D, typename BOP>
-Task FlowBuilder::inclusive_scan(B first, E last, D d_first, BOP bop) {
- return emplace(detail::make_inclusive_scan_task(
- first, last, d_first, bop
- ));
+template <typename B, typename E, typename D, typename BOP, typename P,
+ std::enable_if_t<is_partitioner_v<std::decay_t<P>>, void>*
+>
+Task FlowBuilder::inclusive_scan(B first, E last, D d_first, BOP bop, P part) {
+ return emplace(make_inclusive_scan_task(first, last, d_first, bop, part));
}
// Function: inclusive_scan
-template <typename B, typename E, typename D, typename BOP, typename T>
-Task FlowBuilder::inclusive_scan(B first, E last, D d_first, BOP bop, T init) {
- return emplace(detail::make_inclusive_scan_task(
- first, last, d_first, bop, init
- ));
+template <typename B, typename E, typename D, typename BOP, typename T, typename P,
+ std::enable_if_t<!is_partitioner_v<std::decay_t<T>>, void>*
+>
+Task FlowBuilder::inclusive_scan(B first, E last, D d_first, BOP bop, T init, P part) {
+ return emplace(make_inclusive_scan_task(first, last, d_first, bop, init, part));
}
// ----------------------------------------------------------------------------
// ----------------------------------------------------------------------------
// Function: transform_inclusive_scan
-template <typename B, typename E, typename D, typename BOP, typename UOP>
+template <typename B, typename E, typename D, typename BOP, typename UOP, typename P,
+ std::enable_if_t<is_partitioner_v<std::decay_t<P>>, void>*
+>
Task FlowBuilder::transform_inclusive_scan(
- B first, E last, D d_first, BOP bop, UOP uop
+ B first, E last, D d_first, BOP bop, UOP uop, P part
) {
- return emplace(detail::make_transform_inclusive_scan_task(
- first, last, d_first, bop, uop
+ return emplace(make_transform_inclusive_scan_task(
+ first, last, d_first, bop, uop, part
));
}
// Function: transform_inclusive_scan
-template <typename B, typename E, typename D, typename BOP, typename UOP, typename T>
+template <typename B, typename E, typename D, typename BOP, typename UOP, typename T, typename P,
+ std::enable_if_t<!is_partitioner_v<std::decay_t<T>>, void>*
+>
Task FlowBuilder::transform_inclusive_scan(
- B first, E last, D d_first, BOP bop, UOP uop, T init
+ B first, E last, D d_first, BOP bop, UOP uop, T init, P part
) {
- return emplace(detail::make_transform_inclusive_scan_task(
- first, last, d_first, bop, uop, init
+ return emplace(make_transform_inclusive_scan_task(
+ first, last, d_first, bop, uop, init, part
));
}
// ----------------------------------------------------------------------------
// Function: exclusive_scan
-template <typename B, typename E, typename D, typename T, typename BOP>
-Task FlowBuilder::exclusive_scan(B first, E last, D d_first, T init, BOP bop) {
- return emplace(detail::make_exclusive_scan_task(
- first, last, d_first, init, bop
+template <typename B, typename E, typename D, typename T, typename BOP, typename P>
+Task FlowBuilder::exclusive_scan(B first, E last, D d_first, T init, BOP bop, P part) {
+ return emplace(make_exclusive_scan_task(
+ first, last, d_first, init, bop, part
));
}
// ----------------------------------------------------------------------------
// Function: transform_exclusive_scan
-template <typename B, typename E, typename D, typename T, typename BOP, typename UOP>
+template <typename B, typename E, typename D, typename T, typename BOP, typename UOP, typename P>
Task FlowBuilder::transform_exclusive_scan(
- B first, E last, D d_first, T init, BOP bop, UOP uop
+ B first, E last, D d_first, T init, BOP bop, UOP uop, P part
) {
- return emplace(detail::make_transform_exclusive_scan_task(
- first, last, d_first, init, bop, uop
+ return emplace(make_transform_exclusive_scan_task(
+ first, last, d_first, init, bop, uop, part
));
}
} // end of namespace tf -----------------------------------------------------
+
#include "../core/async.hpp"
-namespace tf {
+namespace tf::detail {
// threshold whether or not to perform parallel sort
template <typename I>
//rt.join();
}
-// ----------------------------------------------------------------------------
-// tf::Taskflow::sort
-// ----------------------------------------------------------------------------
+} // end of namespace tf::detail ---------------------------------------------
-// Function: sort
-template <typename B, typename E, typename C>
-Task FlowBuilder::sort(B beg, E end, C cmp) {
+namespace tf {
- Task task = emplace([b=beg, e=end, cmp] (Runtime& rt) mutable {
+// Function: make_sort_task
+template <typename B, typename E, typename C>
+TF_FORCE_INLINE auto make_sort_task(B b, E e, C cmp) {
+
+ return [b, e, cmp] (Runtime& rt) mutable {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
return;
}
- size_t W = rt._executor.num_workers();
+ size_t W = rt.executor().num_workers();
size_t N = std::distance(beg, end);
// only myself - no need to spawn another graph
- if(W <= 1 || N <= parallel_sort_cutoff<B_t>()) {
+ if(W <= 1 || N <= detail::parallel_sort_cutoff<B_t>()) {
std::sort(beg, end, cmp);
return;
}
//parallel_3wqsort(rt, beg, end-1, cmp);
- parallel_pdqsort<B_t, C,
+ detail::parallel_pdqsort<B_t, C,
is_std_compare_v<std::decay_t<C>> &&
std::is_arithmetic_v<typename std::iterator_traits<B_t>::value_type>
>(rt, beg, end, cmp, log2(end - beg));
- rt.join();
- });
+ rt.corun_all();
+ };
+}
+
+template <typename B, typename E>
+TF_FORCE_INLINE auto make_sort_task(B beg, E end) {
+ using value_type = std::decay_t<decltype(*std::declval<B>())>;
+ return make_sort_task(beg, end, std::less<value_type>{});
+}
+
+// ----------------------------------------------------------------------------
+// tf::Taskflow::sort
+// ----------------------------------------------------------------------------
- return task;
+// Function: sort
+template <typename B, typename E, typename C>
+Task FlowBuilder::sort(B beg, E end, C cmp) {
+ return emplace(make_sort_task(beg, end, cmp));
}
// Function: sort
template <typename B, typename E>
Task FlowBuilder::sort(B beg, E end) {
- using value_type = std::decay_t<decltype(*std::declval<B>())>;
- return sort(beg, end, std::less<value_type>{});
+ return emplace(make_sort_task(beg, end));
}
} // namespace tf ------------------------------------------------------------
namespace tf {
-namespace detail {
-
// Function: make_transform_task
-template <typename B, typename E, typename O, typename C, typename P>
-TF_FORCE_INLINE auto make_transform_task(
- B first1, E last1, O d_first, C c, P&& part
-) {
-
- using namespace std::string_literals;
+template <
+ typename B, typename E, typename O, typename C, typename P = DefaultPartitioner,
+ std::enable_if_t<is_partitioner_v<std::decay_t<P>>, void>* = nullptr
+>
+auto make_transform_task(B first1, E last1, O d_first, C c, P part = P()) {
using B_t = std::decay_t<unwrap_ref_decay_t<B>>;
using E_t = std::decay_t<unwrap_ref_decay_t<E>>;
using O_t = std::decay_t<unwrap_ref_decay_t<O>>;
- return
- [first1, last1, d_first, c, part=std::forward<P>(part)]
- (Runtime& rt) mutable {
+ return [=] (Runtime& rt) mutable {
// fetch the stateful values
B_t beg = first1;
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- std::transform(beg, end, d_beg, c);
+ launch_loop(part, [&](){
+ std::transform(beg, end, d_beg, c);
+ });
return;
}
}
// static partitioner
- if constexpr(std::is_same_v<std::decay_t<P>, StaticPartitioner>) {
+ if constexpr(part.type() == PartitionerType::STATIC) {
size_t chunk_size;
for(size_t w=0, curr_b=0; w<W && curr_b < N; ++w, curr_b += chunk_size) {
chunk_size = part.adjusted_chunk_size(N, W, w);
- launch_loop(W, w, rt, [=, &part] () mutable {
+ launch_loop(W, w, rt, part, [=, &part] () mutable {
part.loop(N, W, curr_b, chunk_size,
- [&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable {
- std::advance(beg, curr_b - prev_e);
- std::advance(d_beg, curr_b - prev_e);
- for(size_t x = curr_b; x<curr_e; x++) {
+ [&, prev_e=size_t{0}](size_t part_b, size_t part_e) mutable {
+ std::advance(beg, part_b - prev_e);
+ std::advance(d_beg, part_b - prev_e);
+ for(size_t x = part_b; x<part_e; x++) {
*d_beg++ = c(*beg++);
}
- prev_e = curr_e;
+ prev_e = part_e;
}
- );
+ );
});
}
- rt.join();
+ rt.corun_all();
}
// dynamic partitioner
else {
std::atomic<size_t> next(0);
-
launch_loop(N, W, rt, next, part, [=, &next, &part] () mutable {
part.loop(N, W, next,
- [&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable {
- std::advance(beg, curr_b - prev_e);
- std::advance(d_beg, curr_b - prev_e);
- for(size_t x = curr_b; x<curr_e; x++) {
+ [&, prev_e=size_t{0}](size_t part_b, size_t part_e) mutable {
+ std::advance(beg, part_b - prev_e);
+ std::advance(d_beg, part_b - prev_e);
+ for(size_t x = part_b; x<part_e; x++) {
*d_beg++ = c(*beg++);
}
- prev_e = curr_e;
+ prev_e = part_e;
}
);
});
// Function: make_transform_task
template <
- typename B1, typename E1, typename B2, typename O, typename C, typename P,
+ typename B1, typename E1, typename B2, typename O, typename C, typename P = DefaultPartitioner,
std::enable_if_t<!is_partitioner_v<std::decay_t<C>>, void>* = nullptr
>
-TF_FORCE_INLINE auto make_transform_task(
- B1 first1, E1 last1, B2 first2, O d_first, C c, P&& part
-) {
-
- using namespace std::string_literals;
+auto make_transform_task(B1 first1, E1 last1, B2 first2, O d_first, C c, P part = P()) {
using B1_t = std::decay_t<unwrap_ref_decay_t<B1>>;
using E1_t = std::decay_t<unwrap_ref_decay_t<E1>>;
using B2_t = std::decay_t<unwrap_ref_decay_t<B2>>;
using O_t = std::decay_t<unwrap_ref_decay_t<O>>;
- return
- [first1, last1, first2, d_first, c, part=std::forward<P>(part)]
- (Runtime& rt) mutable {
+ return [=] (Runtime& rt) mutable {
// fetch the stateful values
B1_t beg1 = first1;
// only myself - no need to spawn another graph
if(W <= 1 || N <= part.chunk_size()) {
- std::transform(beg1, end1, beg2, d_beg, c);
+ launch_loop(part, [&](){
+ std::transform(beg1, end1, beg2, d_beg, c);
+ });
return;
}
}
// static partitioner
- if constexpr(std::is_same_v<std::decay_t<P>, StaticPartitioner>) {
+ if constexpr(part.type() == PartitionerType::STATIC) {
size_t chunk_size;
for(size_t w=0, curr_b=0; w<W && curr_b < N; ++w, curr_b += chunk_size) {
chunk_size = part.adjusted_chunk_size(N, W, w);
- launch_loop(W, w, rt, [=, &c, &part] () mutable {
+ launch_loop(W, w, rt, part, [=, &c, &part] () mutable {
part.loop(N, W, curr_b, chunk_size,
- [&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable {
- std::advance(beg1, curr_b - prev_e);
- std::advance(beg2, curr_b - prev_e);
- std::advance(d_beg, curr_b - prev_e);
- for(size_t x = curr_b; x<curr_e; x++) {
+ [&, prev_e=size_t{0}](size_t part_b, size_t part_e) mutable {
+ std::advance(beg1, part_b - prev_e);
+ std::advance(beg2, part_b - prev_e);
+ std::advance(d_beg, part_b - prev_e);
+ for(size_t x = part_b; x<part_e; x++) {
*d_beg++ = c(*beg1++, *beg2++);
}
- prev_e = curr_e;
+ prev_e = part_e;
}
- );
+ );
});
}
- rt.join();
+ rt.corun_all();
}
// dynamic partitioner
else {
std::atomic<size_t> next(0);
launch_loop(N, W, rt, next, part, [=, &c, &next, &part] () mutable {
part.loop(N, W, next,
- [&, prev_e=size_t{0}](size_t curr_b, size_t curr_e) mutable {
- std::advance(beg1, curr_b - prev_e);
- std::advance(beg2, curr_b - prev_e);
- std::advance(d_beg, curr_b - prev_e);
- for(size_t x = curr_b; x<curr_e; x++) {
+ [&, prev_e=size_t{0}](size_t part_b, size_t part_e) mutable {
+ std::advance(beg1, part_b - prev_e);
+ std::advance(beg2, part_b - prev_e);
+ std::advance(d_beg, part_b - prev_e);
+ for(size_t x = part_b; x<part_e; x++) {
*d_beg++ = c(*beg1++, *beg2++);
}
- prev_e = curr_e;
+ prev_e = part_e;
}
- );
+ );
});
}
};
}
-} // end of namespace detail -------------------------------------------------
-
// ----------------------------------------------------------------------------
// transform
// ----------------------------------------------------------------------------
// Function: transform
-template <typename B, typename E, typename O, typename C, typename P>
-Task FlowBuilder::transform(B first1, E last1, O d_first, C c, P&& part) {
+template <typename B, typename E, typename O, typename C, typename P,
+ std::enable_if_t<is_partitioner_v<std::decay_t<P>>, void>*
+>
+Task FlowBuilder::transform(B first1, E last1, O d_first, C c, P part) {
return emplace(
- detail::make_transform_task(first1, last1, d_first, c, std::forward<P>(part))
+ make_transform_task(first1, last1, d_first, c, part)
);
}
std::enable_if_t<!is_partitioner_v<std::decay_t<C>>, void>*
>
Task FlowBuilder::transform(
- B1 first1, E1 last1, B2 first2, O d_first, C c, P&& part
+ B1 first1, E1 last1, B2 first2, O d_first, C c, P part
) {
-
- return emplace(detail::make_transform_task(
- first1, last1, first2, d_first, c, std::forward<P>(part)
+ return emplace(make_transform_task(
+ first1, last1, first2, d_first, c, part
));
}
--- /dev/null
+#pragma once
+
+#include "executor.hpp"
+
+// https://hackmd.io/@sysprog/concurrency-atomics
+
+namespace tf {
+
+// ----------------------------------------------------------------------------
+// Async
+// ----------------------------------------------------------------------------
+
+// Function: async
+template <typename P, typename F>
+auto Executor::async(P&& params, F&& f) {
+
+ _increment_topology();
+
+ using R = std::invoke_result_t<std::decay_t<F>>;
+
+ std::packaged_task<R()> p(std::forward<F>(f));
+ auto fu{p.get_future()};
+
+ auto node = node_pool.animate(
+ std::forward<P>(params), nullptr, nullptr, 0,
+ // handle
+ std::in_place_type_t<Node::Async>{},
+ [p=make_moc(std::move(p))]() mutable { p.object(); }
+ );
+
+ _schedule_async_task(node);
+
+ return fu;
+}
+
+// Function: async
+template <typename F>
+auto Executor::async(F&& f) {
+ return async(DefaultTaskParams{}, std::forward<F>(f));
+}
+
+// ----------------------------------------------------------------------------
+// Silent Async
+// ----------------------------------------------------------------------------
+
+// Function: silent_async
+template <typename P, typename F>
+void Executor::silent_async(P&& params, F&& f) {
+
+ _increment_topology();
+
+ auto node = node_pool.animate(
+ std::forward<P>(params), nullptr, nullptr, 0,
+ // handle
+ std::in_place_type_t<Node::Async>{}, std::forward<F>(f)
+ );
+
+ _schedule_async_task(node);
+}
+
+// Function: silent_async
+template <typename F>
+void Executor::silent_async(F&& f) {
+ silent_async(DefaultTaskParams{}, std::forward<F>(f));
+}
+
+// ----------------------------------------------------------------------------
+// Async Helper Methods
+// ----------------------------------------------------------------------------
+
+// Procedure: _schedule_async_task
+inline void Executor::_schedule_async_task(Node* node) {
+ if(auto w = _this_worker(); w) {
+ _schedule(*w, node);
+ }
+ else{
+ _schedule(node);
+ }
+}
+
+// Procedure: _tear_down_async
+inline void Executor::_tear_down_async(Node* node) {
+ // from runtime
+ if(node->_parent) {
+ node->_parent->_join_counter.fetch_sub(1, std::memory_order_release);
+ }
+ // from executor
+ else {
+ _decrement_topology();
+ }
+ node_pool.recycle(node);
+}
+
+// ----------------------------------------------------------------------------
+// Silent Dependent Async
+// ----------------------------------------------------------------------------
+
+// Function: silent_dependent_async
+template <typename F, typename... Tasks,
+ std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>*
+>
+tf::AsyncTask Executor::silent_dependent_async(F&& func, Tasks&&... tasks) {
+ return silent_dependent_async(
+ DefaultTaskParams{}, std::forward<F>(func), std::forward<Tasks>(tasks)...
+ );
+}
+
+// Function: silent_dependent_async
+template <typename P, typename F, typename... Tasks,
+ std::enable_if_t<is_task_params_v<P> && all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>*
+>
+tf::AsyncTask Executor::silent_dependent_async(
+ P&& params, F&& func, Tasks&&... tasks
+){
+
+ _increment_topology();
+
+ size_t num_dependents = sizeof...(Tasks);
+
+ // create a task before scheduling the node to retain a shared ownership first
+ AsyncTask task(node_pool.animate(
+ std::forward<P>(params), nullptr, nullptr, num_dependents,
+ std::in_place_type_t<Node::DependentAsync>{}, std::forward<F>(func)
+ ));
+
+ if constexpr(sizeof...(Tasks) > 0) {
+ (_process_async_dependent(task._node, tasks, num_dependents), ...);
+ }
+
+ if(num_dependents == 0) {
+ _schedule_async_task(task._node);
+ }
+
+ return task;
+}
+
+// Function: silent_dependent_async
+template <typename F, typename I,
+ std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>*
+>
+tf::AsyncTask Executor::silent_dependent_async(F&& func, I first, I last) {
+ return silent_dependent_async(DefaultTaskParams{}, std::forward<F>(func), first, last);
+}
+
+// Function: silent_dependent_async
+template <typename P, typename F, typename I,
+ std::enable_if_t<is_task_params_v<P> && !std::is_same_v<std::decay_t<I>, AsyncTask>, void>*
+>
+tf::AsyncTask Executor::silent_dependent_async(
+ P&& params, F&& func, I first, I last
+) {
+
+ _increment_topology();
+
+ size_t num_dependents = std::distance(first, last);
+
+ AsyncTask task(node_pool.animate(
+ std::forward<P>(params), nullptr, nullptr, num_dependents,
+ std::in_place_type_t<Node::DependentAsync>{}, std::forward<F>(func)
+ ));
+
+ for(; first != last; first++){
+ _process_async_dependent(task._node, *first, num_dependents);
+ }
+
+ if(num_dependents == 0) {
+ _schedule_async_task(task._node);
+ }
+
+ return task;
+}
+
+// ----------------------------------------------------------------------------
+// Dependent Async
+// ----------------------------------------------------------------------------
+
+// Function: dependent_async
+template <typename F, typename... Tasks,
+ std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>*
+>
+auto Executor::dependent_async(F&& func, Tasks&&... tasks) {
+ return dependent_async(DefaultTaskParams{}, std::forward<F>(func), std::forward<Tasks>(tasks)...);
+}
+
+// Function: dependent_async
+template <typename P, typename F, typename... Tasks,
+ std::enable_if_t<is_task_params_v<P> && all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>*
+>
+auto Executor::dependent_async(P&& params, F&& func, Tasks&&... tasks) {
+
+ _increment_topology();
+
+ using R = std::invoke_result_t<std::decay_t<F>>;
+
+ std::packaged_task<R()> p(std::forward<F>(func));
+ auto fu{p.get_future()};
+
+ size_t num_dependents = sizeof...(tasks);
+
+ AsyncTask task(node_pool.animate(
+ std::forward<P>(params), nullptr, nullptr, num_dependents,
+ std::in_place_type_t<Node::DependentAsync>{},
+ [p=make_moc(std::move(p))] () mutable { p.object(); }
+ ));
+
+ if constexpr(sizeof...(Tasks) > 0) {
+ (_process_async_dependent(task._node, tasks, num_dependents), ...);
+ }
+
+ if(num_dependents == 0) {
+ _schedule_async_task(task._node);
+ }
+
+ return std::make_pair(std::move(task), std::move(fu));
+}
+
+// Function: dependent_async
+template <typename F, typename I,
+ std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>*
+>
+auto Executor::dependent_async(F&& func, I first, I last) {
+ return dependent_async(DefaultTaskParams{}, std::forward<F>(func), first, last);
+}
+
+// Function: dependent_async
+template <typename P, typename F, typename I,
+ std::enable_if_t<is_task_params_v<P> && !std::is_same_v<std::decay_t<I>, AsyncTask>, void>*
+>
+auto Executor::dependent_async(P&& params, F&& func, I first, I last) {
+
+ _increment_topology();
+
+ using R = std::invoke_result_t<std::decay_t<F>>;
+
+ std::packaged_task<R()> p(std::forward<F>(func));
+ auto fu{p.get_future()};
+
+ size_t num_dependents = std::distance(first, last);
+
+ AsyncTask task(node_pool.animate(
+ std::forward<P>(params), nullptr, nullptr, num_dependents,
+ std::in_place_type_t<Node::DependentAsync>{},
+ [p=make_moc(std::move(p))] () mutable { p.object(); }
+ ));
+
+ for(; first != last; first++) {
+ _process_async_dependent(task._node, *first, num_dependents);
+ }
+
+ if(num_dependents == 0) {
+ _schedule_async_task(task._node);
+ }
+
+ return std::make_pair(std::move(task), std::move(fu));
+}
+
+// ----------------------------------------------------------------------------
+// Dependent Async Helper Functions
+// ----------------------------------------------------------------------------
+
+// Procedure: _process_async_dependent
+inline void Executor::_process_async_dependent(
+ Node* node, tf::AsyncTask& task, size_t& num_dependents
+) {
+
+ auto& state = std::get_if<Node::DependentAsync>(&(task._node->_handle))->state;
+
+ add_successor:
+
+ auto target = Node::AsyncState::UNFINISHED;
+
+ // acquires the lock
+ if(state.compare_exchange_weak(target, Node::AsyncState::LOCKED,
+ std::memory_order_acq_rel,
+ std::memory_order_acquire)) {
+ task._node->_successors.push_back(node);
+ state.store(Node::AsyncState::UNFINISHED, std::memory_order_release);
+ }
+ // dep's state is FINISHED, which means dep finished its callable already
+ // thus decrement the node's join counter by 1
+ else if (target == Node::AsyncState::FINISHED) {
+ num_dependents = node->_join_counter.fetch_sub(1, std::memory_order_acq_rel) - 1;
+ }
+ // another worker adding its async task to the same successors of this node
+ else {
+ goto add_successor;
+ }
+}
+
+
+// Procedure: _tear_down_dependent_async
+inline void Executor::_tear_down_dependent_async(Worker& worker, Node* node) {
+
+ auto handle = std::get_if<Node::DependentAsync>(&(node->_handle));
+
+ // this async task comes from Executor
+ auto target = Node::AsyncState::UNFINISHED;
+
+ while(!handle->state.compare_exchange_weak(target, Node::AsyncState::FINISHED,
+ std::memory_order_acq_rel,
+ std::memory_order_relaxed)) {
+ target = Node::AsyncState::UNFINISHED;
+ }
+
+ // spaw successors whenever their dependencies are resolved
+ worker._cache = nullptr;
+ for(size_t i=0; i<node->_successors.size(); ++i) {
+ if(auto s = node->_successors[i];
+ s->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1
+ ) {
+ if(worker._cache) {
+ _schedule(worker, worker._cache);
+ }
+ worker._cache = s;
+ }
+ }
+
+ // now the executor no longer needs to retain ownership
+ if(handle->use_count.fetch_sub(1, std::memory_order_acq_rel) == 1) {
+ node_pool.recycle(node);
+ }
+
+ _decrement_topology();
+}
+
+
+
+
+
+} // end of namespace tf -----------------------------------------------------
+
--- /dev/null
+#pragma once
+
+#include "graph.hpp"
+
+/**
+@file async_task.hpp
+@brief asynchronous task include file
+*/
+
+namespace tf {
+
+// ----------------------------------------------------------------------------
+// AsyncTask
+// ----------------------------------------------------------------------------
+
+/**
+@brief class to create a dependent asynchronous task
+
+A tf::AsyncTask is a lightweight handle that retains @em shared ownership
+of a dependent async task created by an executor.
+This shared ownership ensures that the async task remains alive when
+adding it to the dependency list of another async task,
+thus avoiding the classical [ABA problem](https://en.wikipedia.org/wiki/ABA_problem).
+
+@code{.cpp}
+// main thread retains shared ownership of async task A
+tf::AsyncTask A = executor.silent_dependent_async([](){});
+
+// task A remains alive (i.e., at least one ref count by the main thread)
+// when being added to the dependency list of async task B
+tf::AsyncTask B = executor.silent_dependent_async([](){}, A);
+@endcode
+
+Currently, tf::AsyncTask is implemented based on the logic of
+C++ smart pointer std::shared_ptr and
+is considered cheap to copy or move as long as only a handful of objects
+own it.
+When a worker completes an async task, it will remove the task from the executor,
+decrementing the number of shared owners by one.
+If that counter reaches zero, the task is destroyed.
+*/
+class AsyncTask {
+
+ friend class Executor;
+
+ public:
+
+ /**
+ @brief constructs an empty task handle
+ */
+ AsyncTask() = default;
+
+ /**
+ @brief destroys the managed asynchronous task if this is the last owner
+ */
+ ~AsyncTask();
+
+ /**
+ @brief constructs an asynchronous task that shares ownership of @c rhs
+ */
+ AsyncTask(const AsyncTask& rhs);
+
+ /**
+ @brief move-constructs an asynchronous task from @c rhs
+ */
+ AsyncTask(AsyncTask&& rhs);
+
+ /**
+ @brief copy-assigns the asynchronous task from @c rhs
+
+ Releases the managed object of @c this and retains a new shared ownership
+ of @c rhs.
+ */
+ AsyncTask& operator = (const AsyncTask& rhs);
+
+ /**
+ @brief move-assigns the asynchronous task from @c rhs
+
+ Releases the managed object of @c this and takes over the ownership of @c rhs.
+ */
+ AsyncTask& operator = (AsyncTask&& rhs);
+
+ /**
+ @brief checks if the asynchronous task stores nothing
+ */
+ bool empty() const;
+
+ /**
+ @brief release the managed object of @c this
+ */
+ void reset();
+
+ /**
+ @brief obtains a hash value of this asynchronous task
+ */
+ size_t hash_value() const;
+
+ /**
+ @brief returns the number of shared owners that are currently managing
+ this asynchronous task
+ */
+ size_t use_count() const;
+
+ /**
+ @brief returns the boolean indicating whether the async task is done
+ */
+ bool is_done() const;
+
+ private:
+
+ explicit AsyncTask(Node*);
+
+ Node* _node {nullptr};
+
+ void _incref();
+ void _decref();
+};
+
+// Constructor
+inline AsyncTask::AsyncTask(Node* ptr) : _node{ptr} {
+ _incref();
+}
+
+// Function: _incref
+inline void AsyncTask::_incref() {
+ if(_node) {
+ std::get_if<Node::DependentAsync>(&(_node->_handle))->use_count.fetch_add(
+ 1, std::memory_order_relaxed
+ );
+ }
+}
+
+// Function: _decref
+inline void AsyncTask::_decref() {
+ if(_node && std::get_if<Node::DependentAsync>(&(_node->_handle))->use_count.fetch_sub(
+ 1, std::memory_order_acq_rel
+ ) == 1) {
+ node_pool.recycle(_node);
+ }
+}
+
+// Copy Constructor
+inline AsyncTask::AsyncTask(const AsyncTask& rhs) :
+ _node{rhs._node} {
+ _incref();
+}
+
+// Move Constructor
+inline AsyncTask::AsyncTask(AsyncTask&& rhs) :
+ _node {rhs._node} {
+ rhs._node = nullptr;
+}
+
+// Destructor
+inline AsyncTask::~AsyncTask() {
+ _decref();
+}
+
+// Copy assignment
+inline AsyncTask& AsyncTask::operator = (const AsyncTask& rhs) {
+ _decref();
+ _node = rhs._node;
+ _incref();
+ return *this;
+}
+
+// Move assignment
+inline AsyncTask& AsyncTask::operator = (AsyncTask&& rhs) {
+ _decref();
+ _node = rhs._node;
+ rhs._node = nullptr;
+ return *this;
+}
+
+// Function: empty
+inline bool AsyncTask::empty() const {
+ return _node == nullptr;
+}
+
+// Function: reset
+inline void AsyncTask::reset() {
+ _decref();
+ _node = nullptr;
+}
+
+// Function: hash_value
+inline size_t AsyncTask::hash_value() const {
+ return std::hash<Node*>{}(_node);
+}
+
+// Function: use_count
+inline size_t AsyncTask::use_count() const {
+ return _node == nullptr ? size_t{0} :
+ std::get_if<Node::DependentAsync>(&(_node->_handle))->use_count.load(
+ std::memory_order_relaxed
+ );
+}
+
+// Function: is_done
+inline bool AsyncTask::is_done() const {
+ return std::get_if<Node::DependentAsync>(&(_node->_handle))->state.load(
+ std::memory_order_acquire
+ ) == Node::AsyncState::FINISHED;
+}
+
+} // end of namespace tf ----------------------------------------------------
+
+
+
class syclTask;
class syclFlow;
+// ----------------------------------------------------------------------------
+// struct
+// ----------------------------------------------------------------------------
+struct TaskParams;
+struct DefaultTaskParams;
+
} // end of namespace tf -----------------------------------------------------
/**
@brief constructs the executor with @c N worker threads
-
- @param N number of workers (default std::thread::hardware_concurrency)
- @param wix worker interface class to alter worker (thread) behaviors
+ @param N the number of workers (default std::thread::hardware_concurrency)
The constructor spawns @c N worker threads to run tasks in a
work-stealing loop. The number of workers must be greater than zero
or an exception will be thrown.
By default, the number of worker threads is equal to the maximum
hardware concurrency returned by std::thread::hardware_concurrency.
-
- Users can alter the worker behavior, such as changing thread affinity,
- via deriving an instance from tf::WorkerInterface.
*/
- explicit Executor(
- size_t N = std::thread::hardware_concurrency(),
- std::shared_ptr<WorkerInterface> wix = nullptr
- );
+ explicit Executor(size_t N = std::thread::hardware_concurrency());
/**
@brief destructs the executor
// --------------------------------------------------------------------------
// Async Task Methods
// --------------------------------------------------------------------------
-
+
/**
- @brief runs a given function asynchronously
+ @brief creates a parameterized asynchronous task to run the given function
+ @tparam P task parameter type
@tparam F callable type
+ @param params task parameters
@param func callable object
@return a @std_future that will hold the result of the execution
-
- The method creates an asynchronous task to run the given function
- and return a @std_future object that eventually will hold the result
- of the return value.
+
+ The method creates a parameterized asynchronous task
+ to run the given function and return a @std_future object
+ that eventually will hold the result of the execution.
@code{.cpp}
- std::future<int> future = executor.async([](){
- std::cout << "create an asynchronous task and returns 1\n";
+ std::future<int> future = executor.async("name", [](){
+ std::cout << "create an asynchronous task with a name and returns 1\n";
return 1;
});
future.get();
This member function is thread-safe.
*/
- template <typename F>
- auto async(F&& func);
+ template <typename P, typename F>
+ auto async(P&& params, F&& func);
/**
- @brief runs a given function asynchronously and gives a name to this task
+ @brief runs a given function asynchronously
@tparam F callable type
- @param name name of the asynchronous task
@param func callable object
@return a @std_future that will hold the result of the execution
-
- The method creates and assigns a name to an asynchronous task
- to run the given function,
- returning @std_future object that eventually will hold the result
- Assigned task names will appear in the observers of the executor.
+
+ The method creates an asynchronous task to run the given function
+ and return a @std_future object that eventually will hold the result
+ of the return value.
@code{.cpp}
- std::future<int> future = executor.async("name", [](){
- std::cout << "create an asynchronous task with a name and returns 1\n";
+ std::future<int> future = executor.async([](){
+ std::cout << "create an asynchronous task and returns 1\n";
return 1;
});
future.get();
This member function is thread-safe.
*/
template <typename F>
- auto async(const std::string& name, F&& func);
+ auto async(F&& func);
/**
@brief similar to tf::Executor::async but does not return a future object
-
+
@tparam F callable type
-
+
+ @param params task parameters
@param func callable object
- This member function is more efficient than tf::Executor::async
- and is encouraged to use when you do not want a @std_future to
- acquire the result or synchronize the execution.
+ The method creates a parameterized asynchronous task
+ to run the given function without returning any @std_future object.
+ This member function is more efficient than tf::Executor::async
+ and is encouraged to use when applications do not need a @std_future to acquire
+ the result or synchronize the execution.
@code{.cpp}
- executor.silent_async([](){
- std::cout << "create an asynchronous task with no return\n";
+ executor.silent_async("name", [](){
+ std::cout << "create an asynchronous task with a name and no return\n";
});
executor.wait_for_all();
@endcode
This member function is thread-safe.
*/
- template <typename F>
- void silent_async(F&& func);
-
+ template <typename P, typename F>
+ void silent_async(P&& params, F&& func);
+
/**
@brief similar to tf::Executor::async but does not return a future object
-
+
@tparam F callable type
-
- @param name assigned name to the task
+
@param func callable object
- This member function is more efficient than tf::Executor::async
- and is encouraged to use when you do not want a @std_future to
- acquire the result or synchronize the execution.
- Assigned task names will appear in the observers of the executor.
+ The method creates an asynchronous task
+ to run the given function without returning any @std_future object.
+ This member function is more efficient than tf::Executor::async
+ and is encouraged to use when applications do not need a @std_future to acquire
+ the result or synchronize the execution.
@code{.cpp}
- executor.silent_async("name", [](){
- std::cout << "create an asynchronous task with a name and no return\n";
+ executor.silent_async([](){
+ std::cout << "create an asynchronous task with no return\n";
});
executor.wait_for_all();
@endcode
This member function is thread-safe.
*/
template <typename F>
- void silent_async(const std::string& name, F&& func);
+ void silent_async(F&& func);
// --------------------------------------------------------------------------
// Silent Dependent Async Methods
tf::AsyncTask silent_dependent_async(F&& func, Tasks&&... tasks);
/**
- @brief names and runs the given function asynchronously
+ @brief runs the given function asynchronously
when the given dependents finish
@tparam F callable type
@tparam Tasks task types convertible to tf::AsyncTask
- @param name assigned name to the task
+ @param params task parameters
@param func callable object
@param tasks asynchronous tasks on which this execution depends
This member function is thread-safe.
*/
- template <typename F, typename... Tasks,
- std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>* = nullptr
+ template <typename P, typename F, typename... Tasks,
+ std::enable_if_t<is_task_params_v<P> && all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>* = nullptr
>
- tf::AsyncTask silent_dependent_async(const std::string& name, F&& func, Tasks&&... tasks);
+ tf::AsyncTask silent_dependent_async(P&& params, F&& func, Tasks&&... tasks);
/**
@brief runs the given function asynchronously
tf::AsyncTask silent_dependent_async(F&& func, I first, I last);
/**
- @brief names and runs the given function asynchronously
+ @brief runs the given function asynchronously
when the given range of dependents finish
@tparam F callable type
@tparam I iterator type
- @param name assigned name to the task
+ @param params tasks parameters
@param func callable object
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
This member function is thread-safe.
*/
- template <typename F, typename I,
- std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>* = nullptr
+ template <typename P, typename F, typename I,
+ std::enable_if_t<is_task_params_v<P> && !std::is_same_v<std::decay_t<I>, AsyncTask>, void>* = nullptr
>
- tf::AsyncTask silent_dependent_async(const std::string& name, F&& func, I first, I last);
+ tf::AsyncTask silent_dependent_async(P&& params, F&& func, I first, I last);
// --------------------------------------------------------------------------
// Dependent Async Methods
auto dependent_async(F&& func, Tasks&&... tasks);
/**
- @brief names and runs the given function asynchronously
+ @brief runs the given function asynchronously
when the given dependents finish
+ @tparam P task parameters type
@tparam F callable type
@tparam Tasks task types convertible to tf::AsyncTask
- @param name assigned name to the task
+ @param params task parameters
@param func callable object
@param tasks asynchronous tasks on which this execution depends
This member function is thread-safe.
*/
- template <typename F, typename... Tasks,
- std::enable_if_t<all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>* = nullptr
+ template <typename P, typename F, typename... Tasks,
+ std::enable_if_t<is_task_params_v<P> && all_same_v<AsyncTask, std::decay_t<Tasks>...>, void>* = nullptr
>
- auto dependent_async(const std::string& name, F&& func, Tasks&&... tasks);
+ auto dependent_async(P&& params, F&& func, Tasks&&... tasks);
/**
@brief runs the given function asynchronously
auto dependent_async(F&& func, I first, I last);
/**
- @brief names and runs the given function asynchronously
+ @brief runs the given function asynchronously
when the given range of dependents finish
+ @tparam P task parameters type
@tparam F callable type
@tparam I iterator type
- @param name assigned name to the task
+ @param params task parameters
@param func callable object
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
This member function is thread-safe.
*/
- template <typename F, typename I,
- std::enable_if_t<!std::is_same_v<std::decay_t<I>, AsyncTask>, void>* = nullptr
+ template <typename P, typename F, typename I,
+ std::enable_if_t<is_task_params_v<P> && !std::is_same_v<std::decay_t<I>, AsyncTask>, void>* = nullptr
>
- auto dependent_async(const std::string& name, F&& func, I first, I last);
+ auto dependent_async(P&& params, F&& func, I first, I last);
private:
const size_t _MAX_STEALS;
+
+ std::mutex _wsq_mutex;
+ std::mutex _taskflows_mutex;
+#ifdef __cpp_lib_atomic_wait
+ std::atomic<size_t> _num_topologies {0};
+ std::atomic_flag _all_spawned = ATOMIC_FLAG_INIT;
+#else
std::condition_variable _topology_cv;
- std::mutex _taskflows_mutex;
std::mutex _topology_mutex;
- std::mutex _wsq_mutex;
- std::mutex _asyncs_mutex;
-
size_t _num_topologies {0};
+#endif
std::unordered_map<std::thread::id, size_t> _wids;
std::vector<std::thread> _threads;
std::vector<Worker> _workers;
std::list<Taskflow> _taskflows;
- std::unordered_set<std::shared_ptr<Node>> _asyncs;
-
Notifier _notifier;
TaskQueue<Node*> _wsq;
std::atomic<bool> _done {0};
- std::shared_ptr<WorkerInterface> _worker_interface;
std::unordered_set<std::shared_ptr<ObserverInterface>> _observers;
Worker* _this_worker();
-
+
bool _wait_for_task(Worker&, Node*&);
+ bool _invoke_module_task_internal(Worker&, Node*);
void _observer_prologue(Worker&, Node*);
void _observer_epilogue(Worker&, Node*);
void _schedule(Worker&, const SmallVector<Node*>&);
void _schedule(const SmallVector<Node*>&);
void _set_up_topology(Worker*, Topology*);
+ void _set_up_graph(Graph&, Node*, Topology*, int, SmallVector<Node*>&);
void _tear_down_topology(Worker&, Topology*);
void _tear_down_async(Node*);
void _tear_down_dependent_async(Worker&, Node*);
void _tear_down_invoke(Worker&, Node*);
void _increment_topology();
void _decrement_topology();
- void _decrement_topology_and_notify();
void _invoke(Worker&, Node*);
void _invoke_static_task(Worker&, Node*);
- void _invoke_dynamic_task(Worker&, Node*);
- void _consume_graph(Worker&, Node*, Graph&);
- void _detach_dynamic_task(Worker&, Node*, Graph&);
+ void _invoke_subflow_task(Worker&, Node*);
+ void _detach_subflow_task(Worker&, Node*, Graph&);
void _invoke_condition_task(Worker&, Node*, SmallVector<int>&);
void _invoke_multi_condition_task(Worker&, Node*, SmallVector<int>&);
void _invoke_module_task(Worker&, Node*);
void _invoke_async_task(Worker&, Node*);
void _invoke_dependent_async_task(Worker&, Node*);
void _process_async_dependent(Node*, tf::AsyncTask&, size_t&);
+ void _process_exception(Worker&, Node*);
void _schedule_async_task(Node*);
+ void _corun_graph(Worker&, Node*, Graph&);
template <typename P>
void _corun_until(Worker&, P&&);
-
- template <typename R, typename F>
- auto _make_promised_async(std::promise<R>&&, F&&);
};
// Constructor
-inline Executor::Executor(size_t N, std::shared_ptr<WorkerInterface> wix) :
+inline Executor::Executor(size_t N) :
_MAX_STEALS {((N+1) << 1)},
_threads {N},
_workers {N},
- _notifier {N},
- _worker_interface {std::move(wix)} {
+ _notifier {N} {
if(N == 0) {
- TF_THROW("no cpu workers to execute taskflows");
+ TF_THROW("executor must define at least one worker");
}
_spawn(N);
- // instantite the default observer if requested
+ // initialize the default observer if requested
if(has_env(TF_ENABLE_PROFILER)) {
TFProfManager::get()._manage(make_observer<TFProfObserver>());
}
// Function: num_topologies
inline size_t Executor::num_topologies() const {
+#ifdef __cpp_lib_atomic_wait
+ return _num_topologies.load(std::memory_order_relaxed);
+#else
return _num_topologies;
+#endif
}
// Function: num_taskflows
// Procedure: _spawn
inline void Executor::_spawn(size_t N) {
+#ifdef __cpp_lib_atomic_wait
+#else
std::mutex mutex;
std::condition_variable cond;
size_t n=0;
+#endif
for(size_t id=0; id<N; ++id) {
_workers[id]._executor = this;
_workers[id]._waiter = &_notifier._waiters[id];
- _threads[id] = std::thread([this] (
- Worker& w, std::mutex& mutex, std::condition_variable& cond, size_t& n
- ) -> void {
-
- // assign the thread
- w._thread = &_threads[w._id];
+ _threads[id] = std::thread([&, &w=_workers[id]] () {
- // enables the mapping
+#ifdef __cpp_lib_atomic_wait
+ // wait for the caller thread to initialize the ID mapping
+ _all_spawned.wait(false, std::memory_order_acquire);
+ w._thread = &_threads[w._id];
+#else
+ // update the ID mapping of this thread
+ w._thread = &_threads[w._id];
{
std::scoped_lock lock(mutex);
_wids[std::this_thread::get_id()] = w._id;
cond.notify_one();
}
}
+#endif
Node* t = nullptr;
- // before entering the scheduler (work-stealing loop),
- // call the user-specified prologue function
- if(_worker_interface) {
- _worker_interface->scheduler_prologue(w);
- }
-
- // must use 1 as condition instead of !done because
- // the previous worker may stop while the following workers
- // are still preparing for entering the scheduling loop
- std::exception_ptr ptr{nullptr};
- try {
- while(1) {
-
- // execute the tasks.
- _exploit_task(w, t);
-
- // wait for tasks
- if(_wait_for_task(w, t) == false) {
- break;
- }
+ while(1) {
+
+ // execute the tasks.
+ _exploit_task(w, t);
+
+ // wait for tasks
+ if(_wait_for_task(w, t) == false) {
+ break;
}
- }
- catch(...) {
- ptr = std::current_exception();
- }
-
- // call the user-specified epilogue function
- if(_worker_interface) {
- _worker_interface->scheduler_epilogue(w, ptr);
}
- }, std::ref(_workers[id]), std::ref(mutex), std::ref(cond), std::ref(n));
+ });
// POSIX-like system can use the following to affine threads to cores
//cpu_set_t cpuset;
//pthread_setaffinity_np(
// _threads[id].native_handle(), sizeof(cpu_set_t), &cpuset
//);
- }
+#ifdef __cpp_lib_atomic_wait
+ //_wids[_threads[id].get_id()] = id;
+ _wids.emplace(std::piecewise_construct,
+ std::forward_as_tuple(_threads[id].get_id()), std::forward_as_tuple(id)
+ );
+#endif
+ }
+
+#ifdef __cpp_lib_atomic_wait
+ _all_spawned.test_and_set(std::memory_order_release);
+ _all_spawned.notify_all();
+#else
std::unique_lock<std::mutex> lock(mutex);
cond.wait(lock, [&](){ return n==N; });
+#endif
}
// Function: _corun_until
while(!(node->_state.load(std::memory_order_acquire) & Node::READY));
begin_invoke:
+
+ SmallVector<int> conds;
// no need to do other things if the topology is cancelled
if(node->_is_cancelled()) {
// condition task
//int cond = -1;
- SmallVector<int> conds;
// switch is faster than nested if-else due to jump table
switch(node->_handle.index()) {
}
break;
- // dynamic task
- case Node::DYNAMIC: {
- _invoke_dynamic_task(worker, node);
+ // subflow task
+ case Node::SUBFLOW: {
+ _invoke_subflow_task(worker, node);
}
break;
break;
}
+ //invoke_successors:
+
// if releasing semaphores exist, release them
if(node->_semaphores && !node->_semaphores->to_release.empty()) {
_schedule(worker, node->_release_all());
}
}
-// Proecdure: _tear_down_invoke
+// Procedure: _tear_down_invoke
inline void Executor::_tear_down_invoke(Worker& worker, Node* node) {
- // we must check parent first before substracting the join counter,
+ // we must check parent first before subtracting the join counter,
// or it can introduce data race
- if(node->_parent == nullptr) {
+ if(auto parent = node->_parent; parent == nullptr) {
if(node->_topology->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1) {
_tear_down_topology(worker, node->_topology);
}
}
- // joined subflow
- else {
- node->_parent->_join_counter.fetch_sub(1, std::memory_order_release);
+ // Here we asssume the parent is in a busy loop (e.g., corun) waiting for
+ // its join counter to become 0.
+ else {
+ //parent->_join_counter.fetch_sub(1, std::memory_order_acq_rel);
+ parent->_join_counter.fetch_sub(1, std::memory_order_release);
}
+ //// module task
+ //else {
+ // auto id = parent->_handle.index();
+ // if(parent->_join_counter.fetch_sub(1, std::memory_order_acq_rel) == 1) {
+ // if(id == Node::MODULE) {
+ // return parent;
+ // }
+ // }
+ //}
+ //return nullptr;
}
// Procedure: _observer_prologue
}
}
+// Procedure: _process_exception
+inline void Executor::_process_exception(Worker&, Node* node) {
+
+ constexpr static auto flag = Topology::EXCEPTION | Topology::CANCELLED;
+
+ // if the node has a parent, we store the exception in its parent
+ if(auto parent = node->_parent; parent) {
+ if ((parent->_state.fetch_or(Node::EXCEPTION, std::memory_order_relaxed) & Node::EXCEPTION) == 0) {
+ parent->_exception_ptr = std::current_exception();
+ }
+ // TODO if the node has a topology, cancel it to enable early stop
+ //if(auto tpg = node->_topology; tpg) {
+ // tpg->_state.fetch_or(Topology::CANCELLED, std::memory_order_relaxed);
+ //}
+ }
+ // multiple tasks may throw, so we only take the first thrown exception
+ else if(auto tpg = node->_topology; tpg &&
+ ((tpg->_state.fetch_or(flag, std::memory_order_relaxed) & Topology::EXCEPTION) == 0)
+ ) {
+ tpg->_exception_ptr = std::current_exception();
+ }
+ // TODO: skip the exception that is not associated with any taskflows
+}
+
// Procedure: _invoke_static_task
inline void Executor::_invoke_static_task(Worker& worker, Node* node) {
_observer_prologue(worker, node);
- auto& work = std::get_if<Node::Static>(&node->_handle)->work;
- switch(work.index()) {
- case 0:
- std::get_if<0>(&work)->operator()();
- break;
+ TF_EXECUTOR_EXCEPTION_HANDLER(worker, node, {
+ auto& work = std::get_if<Node::Static>(&node->_handle)->work;
+ switch(work.index()) {
+ case 0:
+ std::get_if<0>(&work)->operator()();
+ break;
- case 1:
- Runtime rt(*this, worker, node);
- std::get_if<1>(&work)->operator()(rt);
- break;
- }
+ case 1:
+ Runtime rt(*this, worker, node);
+ std::get_if<1>(&work)->operator()(rt);
+ node->_process_exception();
+ break;
+ }
+ });
_observer_epilogue(worker, node);
}
-// Procedure: _invoke_dynamic_task
-inline void Executor::_invoke_dynamic_task(Worker& w, Node* node) {
-
+// Procedure: _invoke_subflow_task
+inline void Executor::_invoke_subflow_task(Worker& w, Node* node) {
_observer_prologue(w, node);
-
- auto handle = std::get_if<Node::Dynamic>(&node->_handle);
-
- handle->subgraph._clear();
-
- Subflow sf(*this, w, node, handle->subgraph);
-
- handle->work(sf);
-
- if(sf._joinable) {
- _consume_graph(w, node, handle->subgraph);
- }
-
+ TF_EXECUTOR_EXCEPTION_HANDLER(w, node, {
+ auto handle = std::get_if<Node::Subflow>(&node->_handle);
+ handle->subgraph._clear();
+ Subflow sf(*this, w, node, handle->subgraph);
+ handle->work(sf);
+ if(sf._joinable) {
+ _corun_graph(w, node, handle->subgraph);
+ }
+ node->_process_exception();
+ });
_observer_epilogue(w, node);
}
-// Procedure: _detach_dynamic_task
-inline void Executor::_detach_dynamic_task(
- Worker& w, Node* p, Graph& g
-) {
+// Procedure: _detach_subflow_task
+inline void Executor::_detach_subflow_task(Worker& w, Node* p, Graph& g) {
// graph is empty and has no async tasks
if(g.empty() && p->_join_counter.load(std::memory_order_acquire) == 0) {
}
SmallVector<Node*> src;
-
- for(auto n : g._nodes) {
-
- n->_state.store(Node::DETACHED, std::memory_order_relaxed);
- n->_set_up_join_counter();
- n->_topology = p->_topology;
- n->_parent = nullptr;
-
- if(n->num_dependents() == 0) {
- src.push_back(n);
- }
- }
+ _set_up_graph(g, nullptr, p->_topology, Node::DETACHED, src);
{
std::lock_guard<std::mutex> lock(p->_topology->_taskflow._mutex);
_schedule(w, src);
}
-// Procedure: _consume_graph
-inline void Executor::_consume_graph(Worker& w, Node* p, Graph& g) {
+// Procedure: _corun_graph
+inline void Executor::_corun_graph(Worker& w, Node* p, Graph& g) {
- // graph is empty and has no async tasks
+ // assert(p);
+
+ // graph is empty and has no async tasks (subflow)
if(g.empty() && p->_join_counter.load(std::memory_order_acquire) == 0) {
return;
}
SmallVector<Node*> src;
- for(auto n : g._nodes) {
- n->_state.store(0, std::memory_order_relaxed);
- n->_set_up_join_counter();
- n->_topology = p->_topology;
- n->_parent = p;
- if(n->num_dependents() == 0) {
- src.push_back(n);
- }
- }
+ _set_up_graph(g, p, p->_topology, 0, src);
p->_join_counter.fetch_add(src.size(), std::memory_order_relaxed);
_schedule(w, src);
- _corun_until(w, [p] () -> bool { return p->_join_counter.load(std::memory_order_acquire) == 0; });
+
+ _corun_until(w, [p] () -> bool {
+ return p->_join_counter.load(std::memory_order_acquire) == 0; }
+ );
}
// Procedure: _invoke_condition_task
Worker& worker, Node* node, SmallVector<int>& conds
) {
_observer_prologue(worker, node);
- auto& work = std::get_if<Node::Condition>(&node->_handle)->work;
- switch(work.index()) {
- case 0:
- conds = { std::get_if<0>(&work)->operator()() };
- break;
+ TF_EXECUTOR_EXCEPTION_HANDLER(worker, node, {
+ auto& work = std::get_if<Node::Condition>(&node->_handle)->work;
+ switch(work.index()) {
+ case 0:
+ conds = { std::get_if<0>(&work)->operator()() };
+ break;
- case 1:
- Runtime rt(*this, worker, node);
- conds = { std::get_if<1>(&work)->operator()(rt) };
- break;
- }
+ case 1:
+ Runtime rt(*this, worker, node);
+ conds = { std::get_if<1>(&work)->operator()(rt) };
+ node->_process_exception();
+ break;
+ }
+ });
_observer_epilogue(worker, node);
}
Worker& worker, Node* node, SmallVector<int>& conds
) {
_observer_prologue(worker, node);
- auto& work = std::get_if<Node::MultiCondition>(&node->_handle)->work;
- switch(work.index()) {
- case 0:
- conds = std::get_if<0>(&work)->operator()();
- break;
+ TF_EXECUTOR_EXCEPTION_HANDLER(worker, node, {
+ auto& work = std::get_if<Node::MultiCondition>(&node->_handle)->work;
+ switch(work.index()) {
+ case 0:
+ conds = std::get_if<0>(&work)->operator()();
+ break;
- case 1:
- Runtime rt(*this, worker, node);
- conds = std::get_if<1>(&work)->operator()(rt);
- break;
- }
+ case 1:
+ Runtime rt(*this, worker, node);
+ conds = std::get_if<1>(&work)->operator()(rt);
+ node->_process_exception();
+ break;
+ }
+ });
_observer_epilogue(worker, node);
}
// Procedure: _invoke_module_task
inline void Executor::_invoke_module_task(Worker& w, Node* node) {
_observer_prologue(w, node);
- _consume_graph(
- w, node, std::get_if<Node::Module>(&node->_handle)->graph
- );
+ TF_EXECUTOR_EXCEPTION_HANDLER(w, node, {
+ _corun_graph(w, node, std::get_if<Node::Module>(&node->_handle)->graph);
+ node->_process_exception();
+ });
_observer_epilogue(w, node);
}
+//// Function: _invoke_module_task_internal
+//inline bool Executor::_invoke_module_task_internal(Worker& w, Node* p) {
+//
+// // acquire the underlying graph
+// auto& g = std::get_if<Node::Module>(&p->_handle)->graph;
+//
+// // no need to do anything if the graph is empty
+// if(g.empty()) {
+// return false;
+// }
+//
+// SmallVector<Node*> src;
+// _set_up_graph(g, p, p->_topology, 0, src);
+// p->_join_counter.fetch_add(src.size(), std::memory_order_relaxed);
+//
+// _schedule(w, src);
+// return true;
+//}
+
// Procedure: _invoke_async_task
-inline void Executor::_invoke_async_task(Worker& w, Node* node) {
- _observer_prologue(w, node);
- std::get_if<Node::Async>(&node->_handle)->work();
- _observer_epilogue(w, node);
+inline void Executor::_invoke_async_task(Worker& worker, Node* node) {
+ _observer_prologue(worker, node);
+ TF_EXECUTOR_EXCEPTION_HANDLER(worker, node, {
+ auto& work = std::get_if<Node::Async>(&node->_handle)->work;
+ switch(work.index()) {
+ case 0:
+ std::get_if<0>(&work)->operator()();
+ break;
+
+ case 1:
+ Runtime rt(*this, worker, node);
+ std::get_if<1>(&work)->operator()(rt);
+ break;
+ }
+ });
+ _observer_epilogue(worker, node);
}
// Procedure: _invoke_dependent_async_task
-inline void Executor::_invoke_dependent_async_task(Worker& w, Node* node) {
- _observer_prologue(w, node);
- std::get_if<Node::DependentAsync>(&node->_handle)->work();
- _observer_epilogue(w, node);
+inline void Executor::_invoke_dependent_async_task(Worker& worker, Node* node) {
+ _observer_prologue(worker, node);
+ TF_EXECUTOR_EXCEPTION_HANDLER(worker, node, {
+ auto& work = std::get_if<Node::DependentAsync>(&node->_handle)->work;
+ switch(work.index()) {
+ case 0:
+ std::get_if<0>(&work)->operator()();
+ break;
+
+ case 1:
+ Runtime rt(*this, worker, node);
+ std::get_if<1>(&work)->operator()(rt);
+ break;
+ }
+ });
+ _observer_epilogue(worker, node);
}
// Function: run
_increment_topology();
- // Need to check the empty under the lock since dynamic task may
+ // Need to check the empty under the lock since subflow task may
// define detached blocks that modify the taskflow at the same time
bool empty;
{
c();
std::promise<void> promise;
promise.set_value();
- _decrement_topology_and_notify();
- return tf::Future<void>(promise.get_future(), std::monostate{});
+ _decrement_topology();
+ return tf::Future<void>(promise.get_future());
}
// create a topology for this run
TF_THROW("corun must be called by a worker of the executor");
}
- Node parent; // dummy parent
- _consume_graph(*w, &parent, target.graph());
+ Node parent; // auxiliary parent
+ _corun_graph(*w, &parent, target.graph());
+ parent._process_exception();
}
// Function: corun_until
}
_corun_until(*w, std::forward<P>(predicate));
+
+ // TODO: exception?
}
// Procedure: _increment_topology
inline void Executor::_increment_topology() {
+#ifdef __cpp_lib_atomic_wait
+ _num_topologies.fetch_add(1, std::memory_order_relaxed);
+#else
std::lock_guard<std::mutex> lock(_topology_mutex);
++_num_topologies;
+#endif
}
-// Procedure: _decrement_topology_and_notify
-inline void Executor::_decrement_topology_and_notify() {
+// Procedure: _decrement_topology
+inline void Executor::_decrement_topology() {
+#ifdef __cpp_lib_atomic_wait
+ if(_num_topologies.fetch_sub(1, std::memory_order_acq_rel) == 1) {
+ _num_topologies.notify_all();
+ }
+#else
std::lock_guard<std::mutex> lock(_topology_mutex);
if(--_num_topologies == 0) {
_topology_cv.notify_all();
}
-}
-
-// Procedure: _decrement_topology
-inline void Executor::_decrement_topology() {
- std::lock_guard<std::mutex> lock(_topology_mutex);
- --_num_topologies;
+#endif
}
// Procedure: wait_for_all
inline void Executor::wait_for_all() {
+#ifdef __cpp_lib_atomic_wait
+ size_t n = _num_topologies.load(std::memory_order_acquire);
+ while(n != 0) {
+ _num_topologies.wait(n, std::memory_order_acquire);
+ n = _num_topologies.load(std::memory_order_acquire);
+ }
+#else
std::unique_lock<std::mutex> lock(_topology_mutex);
_topology_cv.wait(lock, [&](){ return _num_topologies == 0; });
+#endif
}
// Function: _set_up_topology
tpg->_sources.clear();
tpg->_taskflow._graph._clear_detached();
-
- // scan each node in the graph and build up the links
- for(auto node : tpg->_taskflow._graph._nodes) {
-
- node->_topology = tpg;
- node->_parent = nullptr;
- node->_state.store(0, std::memory_order_relaxed);
-
- if(node->num_dependents() == 0) {
- tpg->_sources.push_back(node);
- }
-
- node->_set_up_join_counter();
- }
-
+ _set_up_graph(tpg->_taskflow._graph, nullptr, tpg, 0, tpg->_sources);
tpg->_join_counter.store(tpg->_sources.size(), std::memory_order_relaxed);
if(worker) {
}
}
+// Function: _set_up_graph
+inline void Executor::_set_up_graph(
+ Graph& g, Node* parent, Topology* tpg, int state, SmallVector<Node*>& src
+) {
+ for(auto node : g._nodes) {
+ node->_topology = tpg;
+ node->_parent = parent;
+ node->_state.store(state, std::memory_order_relaxed);
+ if(node->num_dependents() == 0) {
+ src.push_back(node);
+ }
+ node->_set_up_join_counter();
+ node->_exception_ptr = nullptr;
+ }
+}
+
// Function: _tear_down_topology
inline void Executor::_tear_down_topology(Worker& worker, Topology* tpg) {
//assert(&tpg == &(f._topologies.front()));
// case 1: we still need to run the topology again
- if(!tpg->_is_cancelled && !tpg->_pred()) {
+ if(!tpg->_exception_ptr && !tpg->cancelled() && !tpg->_pred()) {
//assert(tpg->_join_counter == 0);
std::lock_guard<std::mutex> lock(f._mutex);
tpg->_join_counter.store(tpg->_sources.size(), std::memory_order_relaxed);
else {
//assert(f._topologies.size() == 1);
- // Need to back up the promise first here becuz taskflow might be
- // destroy soon after calling get
- auto p {std::move(tpg->_promise)};
-
- // Back up lambda capture in case it has the topology pointer,
- // to avoid it releasing on pop_front ahead of _mutex.unlock &
- // _promise.set_value. Released safely when leaving scope.
- auto c {std::move(tpg->_call)};
-
- // Get the satellite if any
- auto s {f._satellite};
-
- // Now we remove the topology from this taskflow
+ auto fetched_tpg {std::move(f._topologies.front())};
f._topologies.pop();
+ auto satellite {f._satellite};
- //f._mutex.unlock();
lock.unlock();
+
+ // Soon after we carry out the promise, there is no longer any guarantee
+ // for the lifetime of the associated taskflow.
+ fetched_tpg->_carry_out_promise();
- // We set the promise in the end in case taskflow leaves the scope.
- // After set_value, the caller will return from wait
- p.set_value();
-
- _decrement_topology_and_notify();
+ _decrement_topology();
// remove the taskflow if it is managed by the executor
// TODO: in the future, we may need to synchronize on wait
// (which means the following code should the moved before set_value)
- if(s) {
- std::scoped_lock<std::mutex> lock(_taskflows_mutex);
- _taskflows.erase(*s);
+ if(satellite) {
+ std::scoped_lock<std::mutex> satellite_lock(_taskflows_mutex);
+ _taskflows.erase(*satellite);
}
}
}
}
// only the parent worker can join the subflow
- _executor._consume_graph(_worker, _parent, _graph);
+ _executor._corun_graph(_worker, _parent, _graph);
+
+ // if any exception is caught from subflow tasks, rethrow it
+ _parent->_process_exception();
+
_joinable = false;
}
}
// only the parent worker can detach the subflow
- _executor._detach_dynamic_task(_worker, _parent, _graph);
+ _executor._detach_subflow_task(_worker, _parent, _graph);
_joinable = false;
}
// Procedure: corun
template <typename T>
void Runtime::corun(T&& target) {
-
- // dynamic task (subflow)
- if constexpr(is_dynamic_task_v<T>) {
- Graph graph;
- Subflow sf(_executor, _worker, _parent, graph);
- target(sf);
- if(sf._joinable) {
- _executor._consume_graph(_worker, _parent, graph);
- }
- }
- // a composable graph object with `tf::Graph& T::graph()` defined
- else {
- _executor._consume_graph(_worker, _parent, target.graph());
- }
+ _executor._corun_graph(_worker, _parent, target.graph());
+ _parent->_process_exception();
}
// Procedure: corun_until
template <typename P>
void Runtime::corun_until(P&& predicate) {
_executor._corun_until(_worker, std::forward<P>(predicate));
+ // TODO: exception?
+}
+
+// Function: corun_all
+inline void Runtime::corun_all() {
+ _executor._corun_until(_worker, [this] () -> bool {
+ return _parent->_join_counter.load(std::memory_order_acquire) == 0;
+ });
+ _parent->_process_exception();
}
+// Destructor
+inline Runtime::~Runtime() {
+ _executor._corun_until(_worker, [this] () -> bool {
+ return _parent->_join_counter.load(std::memory_order_acquire) == 0;
+ });
+}
+
+// ------------------------------------
+// Runtime::silent_async series
+// ------------------------------------
+
// Function: _silent_async
-template <typename F>
-void Runtime::_silent_async(Worker& w, const std::string& name, F&& f) {
+template <typename P, typename F>
+void Runtime::_silent_async(Worker& w, P&& params, F&& f) {
_parent->_join_counter.fetch_add(1, std::memory_order_relaxed);
auto node = node_pool.animate(
- name, 0, _parent->_topology, _parent, 0,
+ std::forward<P>(params), _parent->_topology, _parent, 0,
std::in_place_type_t<Node::Async>{}, std::forward<F>(f)
);
// Function: silent_async
template <typename F>
void Runtime::silent_async(F&& f) {
- _silent_async(*_executor._this_worker(), "", std::forward<F>(f));
+ _silent_async(*_executor._this_worker(), DefaultTaskParams{}, std::forward<F>(f));
}
// Function: silent_async
-template <typename F>
-void Runtime::silent_async(const std::string& name, F&& f) {
- _silent_async(*_executor._this_worker(), name, std::forward<F>(f));
+template <typename P, typename F>
+void Runtime::silent_async(P&& params, F&& f) {
+ _silent_async(*_executor._this_worker(), std::forward<P>(params), std::forward<F>(f));
}
// Function: silent_async_unchecked
template <typename F>
-void Runtime::silent_async_unchecked(const std::string& name, F&& f) {
- _silent_async(_worker, name, std::forward<F>(f));
+void Runtime::silent_async_unchecked(F&& f) {
+ _silent_async(_worker, DefaultTaskParams{}, std::forward<F>(f));
}
+// Function: silent_async_unchecked
+template <typename P, typename F>
+void Runtime::silent_async_unchecked(P&& params, F&& f) {
+ _silent_async(_worker, std::forward<P>(params), std::forward<F>(f));
+}
+
+// ------------------------------------
+// Runtime::async series
+// ------------------------------------
+
// Function: _async
-template <typename F>
-auto Runtime::_async(Worker& w, const std::string& name, F&& f) {
+template <typename P, typename F>
+auto Runtime::_async(Worker& w, P&& params, F&& f) {
_parent->_join_counter.fetch_add(1, std::memory_order_relaxed);
using R = std::invoke_result_t<std::decay_t<F>>;
- std::promise<R> p;
+ std::packaged_task<R()> p(std::forward<F>(f));
auto fu{p.get_future()};
auto node = node_pool.animate(
- name, 0, _parent->_topology, _parent, 0,
+ std::forward<P>(params), _parent->_topology, _parent, 0,
std::in_place_type_t<Node::Async>{},
- [p=make_moc(std::move(p)), f=std::forward<F>(f)] () mutable {
- if constexpr(std::is_same_v<R, void>) {
- f();
- p.object.set_value();
- }
- else {
- p.object.set_value(f());
- }
- }
+ [p=make_moc(std::move(p))] () mutable { p.object(); }
);
_executor._schedule(w, node);
// Function: async
template <typename F>
auto Runtime::async(F&& f) {
- return _async(*_executor._this_worker(), "", std::forward<F>(f));
+ return _async(*_executor._this_worker(), DefaultTaskParams{}, std::forward<F>(f));
}
// Function: async
-template <typename F>
-auto Runtime::async(const std::string& name, F&& f) {
- return _async(*_executor._this_worker(), name, std::forward<F>(f));
+template <typename P, typename F>
+auto Runtime::async(P&& params, F&& f) {
+ return _async(*_executor._this_worker(), std::forward<P>(params), std::forward<F>(f));
}
-// Function: join
-inline void Runtime::join() {
- corun_until([this] () -> bool {
- return _parent->_join_counter.load(std::memory_order_acquire) == 0;
- });
-}
+
} // end of namespace tf -----------------------------------------------------
});
@endcode
- Please refer to @ref DynamicTasking for details.
+ Please refer to @ref SubflowTasking for details.
*/
template <typename C,
- std::enable_if_t<is_dynamic_task_v<C>, void>* = nullptr
+ std::enable_if_t<is_subflow_task_v<C>, void>* = nullptr
>
Task emplace(C&& callable);
The taskflow object @c t2 is composed of another taskflow object @c t1,
preceded by another static task @c init.
When taskflow @c t2 is submitted to an executor,
- @c init will run first and then @c comp which spwans its definition
+ @c init will run first and then @c comp which spawns its definition
in taskflow @c t1.
The target @c object being composed must define the method
*/
void linearize(std::initializer_list<Task> tasks);
+
// ------------------------------------------------------------------------
// parallel iterations
// ------------------------------------------------------------------------
@tparam B beginning iterator type
@tparam E ending iterator type
@tparam C callable type
- @tparam P partitioner type (default tf::GuidedPartitioner)
+ @tparam P partitioner type (default tf::DefaultPartitioner)
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
Please refer to @ref ParallelIterations for details.
*/
- template <typename B, typename E, typename C, typename P = GuidedPartitioner>
- Task for_each(B first, E last, C callable, P&& part = P());
+ template <typename B, typename E, typename C, typename P = DefaultPartitioner>
+ Task for_each(B first, E last, C callable, P part = P());
/**
@brief constructs an STL-styled index-based parallel-for task
@tparam E ending index type (must be integral)
@tparam S step type (must be integral)
@tparam C callable type
- @tparam P partitioner type (default tf::GuidedPartitioner)
+ @tparam P partitioner type (default tf::DefaultPartitioner)
@param first index of the beginning (inclusive)
@param last index of the end (exclusive)
Please refer to @ref ParallelIterations for details.
*/
- template <typename B, typename E, typename S, typename C, typename P = GuidedPartitioner>
+ template <typename B, typename E, typename S, typename C, typename P = DefaultPartitioner>
Task for_each_index(
- B first, E last, S step, C callable, P&& part = P()
+ B first, E last, S step, C callable, P part = P()
);
// ------------------------------------------------------------------------
@tparam E ending input iterator type
@tparam O output iterator type
@tparam C callable type
- @tparam P partitioner type (default tf::GuidedPartitioner)
+ @tparam P partitioner type (default tf::DefaultPartitioner)
@param first1 iterator to the beginning of the first range
@param last1 iterator to the end of the first range
Please refer to @ref ParallelTransforms for details.
*/
template <
- typename B, typename E, typename O, typename C, typename P = GuidedPartitioner
+ typename B, typename E, typename O, typename C, typename P = DefaultPartitioner,
+ std::enable_if_t<is_partitioner_v<std::decay_t<P>>, void>* = nullptr
>
- Task transform(B first1, E last1, O d_first, C c, P&& part = P());
+ Task transform(B first1, E last1, O d_first, C c, P part = P());
/**
@brief constructs a parallel-transform task
@tparam B2 beginning input iterator type for the first second range
@tparam O output iterator type
@tparam C callable type
- @tparam P partitioner type (default tf::GuidedPartitioner)
+ @tparam P partitioner type (default tf::DefaultPartitioner)
@param first1 iterator to the beginning of the first input range
@param last1 iterator to the end of the first input range
Please refer to @ref ParallelTransforms for details.
*/
template <
- typename B1, typename E1, typename B2, typename O, typename C, typename P=GuidedPartitioner,
+ typename B1, typename E1, typename B2, typename O, typename C, typename P=DefaultPartitioner,
std::enable_if_t<!is_partitioner_v<std::decay_t<C>>, void>* = nullptr
>
- Task transform(B1 first1, E1 last1, B2 first2, O d_first, C c, P&& part = P());
+ Task transform(B1 first1, E1 last1, B2 first2, O d_first, C c, P part = P());
// ------------------------------------------------------------------------
// reduction
@tparam E ending iterator type
@tparam T result type
@tparam O binary reducer type
- @tparam P partitioner type (default tf::GuidedPartitioner)
+ @tparam P partitioner type (default tf::DefaultPartitioner)
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
Please refer to @ref ParallelReduction for details.
*/
- template <typename B, typename E, typename T, typename O, typename P = GuidedPartitioner>
- Task reduce(B first, E last, T& init, O bop, P&& part = P());
+ template <typename B, typename E, typename T, typename O, typename P = DefaultPartitioner>
+ Task reduce(B first, E last, T& init, O bop, P part = P());
// ------------------------------------------------------------------------
- // transfrom and reduction
+ // transform and reduction
// ------------------------------------------------------------------------
/**
@tparam E ending iterator type
@tparam T result type
@tparam BOP binary reducer type
- @tparam UOP unary transformion type
- @tparam P partitioner type (default tf::GuidedPartitioner)
+ @tparam UOP unary transformation type
+ @tparam P partitioner type (default tf::DefaultPartitioner)
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
Please refer to @ref ParallelReduction for details.
*/
template <
- typename B, typename E, typename T, typename BOP, typename UOP, typename P = GuidedPartitioner
+ typename B, typename E, typename T, typename BOP, typename UOP, typename P = DefaultPartitioner,
+ std::enable_if_t<is_partitioner_v<std::decay_t<P>>, void>* = nullptr
>
- Task transform_reduce(B first, E last, T& init, BOP bop, UOP uop, P&& part = P());
+ Task transform_reduce(B first, E last, T& init, BOP bop, UOP uop, P part = P());
+
+ /**
+ @brief constructs an STL-styled parallel transform-reduce task
+ @tparam B1 first beginning iterator type
+ @tparam E1 first ending iterator type
+ @tparam B2 second beginning iterator type
+ @tparam T result type
+ @tparam BOP_R binary reducer type
+ @tparam BOP_T binary transformation type
+ @tparam P partitioner type (default tf::DefaultPartitioner)
+
+ @param first1 iterator to the beginning of the first range (inclusive)
+ @param last1 iterator to the end of the first range (exclusive)
+ @param first2 iterator to the beginning of the second range
+ @param init initial value of the reduction and the storage for the reduced result
+ @param bop_r binary operator that will be applied in unspecified order to the results of @c bop_t
+ @param bop_t binary operator that will be applied to transform each element in the range to the result type
+ @param part partitioning algorithm to schedule parallel iterations
+
+ @return a tf::Task handle
+
+ The task spawns asynchronous tasks to perform parallel reduction over @c init and
+ transformed elements in the range <tt>[first, last)</tt>.
+ The reduced result is store in @c init.
+ This method is equivalent to the parallel execution of the following loop:
+
+ @code{.cpp}
+ for(auto itr1=first1, itr2=first2; itr1!=last1; itr1++, itr2++) {
+ init = bop_r(init, bop_t(*itr1, *itr2));
+ }
+ @endcode
+
+ Iterators are templated to enable stateful range using std::reference_wrapper.
+
+ Please refer to @ref ParallelReduction for details.
+ */
+ template <
+ typename B1, typename E1, typename B2, typename T, typename BOP_R, typename BOP_T,
+ typename P = DefaultPartitioner,
+ std::enable_if_t<!is_partitioner_v<std::decay_t<BOP_T>>, void>* = nullptr
+ >
+ Task transform_reduce(
+ B1 first1, E1 last1, B2 first2, T& init, BOP_R bop_r, BOP_T bop_t, P part = P()
+ );
+
// ------------------------------------------------------------------------
// scan
// ------------------------------------------------------------------------
@tparam E ending iterator type
@tparam D destination iterator type
@tparam BOP summation operator type
+ @tparam P partitioner type (default tf::DefaultPartitioner)
@param first start of input range
@param last end of input range
@param d_first start of output range (may be the same as input range)
@param bop function to perform summation
+ @param part partitioning algorithm to schedule parallel iterations
Performs the cumulative sum (aka prefix sum, aka scan) of the input range
and writes the result to the output range.
Please refer to @ref ParallelScan for details.
*/
- template <typename B, typename E, typename D, typename BOP>
- Task inclusive_scan(B first, E last, D d_first, BOP bop);
+ template <typename B, typename E, typename D, typename BOP, typename P = DefaultPartitioner,
+ std::enable_if_t<is_partitioner_v<std::decay_t<P>>, void>* = nullptr
+ >
+ Task inclusive_scan(B first, E last, D d_first, BOP bop, P part = P());
/**
@brief creates an STL-styled parallel inclusive-scan task with an initial value
@tparam D destination iterator type
@tparam BOP summation operator type
@tparam T initial value type
+ @tparam P partitioner type (default tf::DefaultPartitioner)
@param first start of input range
@param last end of input range
@param d_first start of output range (may be the same as input range)
@param bop function to perform summation
@param init initial value
+ @param part partitioning algorithm to schedule parallel iterations
Performs the cumulative sum (aka prefix sum, aka scan) of the input range
and writes the result to the output range.
Please refer to @ref ParallelScan for details.
*/
- template <typename B, typename E, typename D, typename BOP, typename T>
- Task inclusive_scan(B first, E last, D d_first, BOP bop, T init);
+ template <typename B, typename E, typename D, typename BOP, typename T, typename P = DefaultPartitioner,
+ std::enable_if_t<!is_partitioner_v<std::decay_t<T>>, void>* = nullptr
+ >
+ Task inclusive_scan(B first, E last, D d_first, BOP bop, T init, P part = P());
/**
@brief creates an STL-styled parallel exclusive-scan task
@tparam D destination iterator type
@tparam T initial value type
@tparam BOP summation operator type
+ @tparam P partitioner type (default tf::DefaultPartitioner)
@param first start of input range
@param last end of input range
@param d_first start of output range (may be the same as input range)
@param init initial value
@param bop function to perform summation
+ @param part partitioning algorithm to schedule parallel iterations
Performs the cumulative sum (aka prefix sum, aka scan) of the input range
and writes the result to the output range.
Please refer to @ref ParallelScan for details.
*/
- template <typename B, typename E, typename D, typename T, typename BOP>
- Task exclusive_scan(B first, E last, D d_first, T init, BOP bop);
+ template <typename B, typename E, typename D, typename T, typename BOP, typename P = DefaultPartitioner>
+ Task exclusive_scan(B first, E last, D d_first, T init, BOP bop, P part = P());
// ------------------------------------------------------------------------
// transform scan
@tparam D destination iterator type
@tparam BOP summation operator type
@tparam UOP transform operator type
+ @tparam P partitioner type (default tf::DefaultPartitioner)
@param first start of input range
@param last end of input range
@param d_first start of output range (may be the same as input range)
@param bop function to perform summation
@param uop function to transform elements of the input range
+ @param part partitioning algorithm to schedule parallel iterations
Write the cumulative sum (aka prefix sum, aka scan) of the input range
to the output range. Each element of the output range contains the
Please refer to @ref ParallelScan for details.
*/
- template <typename B, typename E, typename D, typename BOP, typename UOP>
- Task transform_inclusive_scan(B first, E last, D d_first, BOP bop, UOP uop);
+ template <typename B, typename E, typename D, typename BOP, typename UOP, typename P = DefaultPartitioner,
+ std::enable_if_t<is_partitioner_v<std::decay_t<P>>, void>* = nullptr
+ >
+ Task transform_inclusive_scan(B first, E last, D d_first, BOP bop, UOP uop, P part = P());
/**
@brief creates an STL-styled parallel transform-inclusive scan task
@tparam BOP summation operator type
@tparam UOP transform operator type
@tparam T initial value type
+ @tparam P partitioner type (default tf::DefaultPartitioner)
@param first start of input range
@param last end of input range
@param bop function to perform summation
@param uop function to transform elements of the input range
@param init initial value
+ @param part partitioning algorithm to schedule parallel iterations
Write the cumulative sum (aka prefix sum, aka scan) of the input range
to the output range. Each element of the output range contains the
Please refer to @ref ParallelScan for details.
*/
- template <typename B, typename E, typename D, typename BOP, typename UOP, typename T>
- Task transform_inclusive_scan(B first, E last, D d_first, BOP bop, UOP uop, T init);
+ template <typename B, typename E, typename D, typename BOP, typename UOP, typename T, typename P = DefaultPartitioner,
+ std::enable_if_t<!is_partitioner_v<std::decay_t<T>>, void>* = nullptr
+ >
+ Task transform_inclusive_scan(B first, E last, D d_first, BOP bop, UOP uop, T init, P part = P());
/**
@brief creates an STL-styled parallel transform-exclusive scan task
@tparam BOP summation operator type
@tparam UOP transform operator type
@tparam T initial value type
+ @tparam P partitioner type (default tf::DefaultPartitioner)
@param first start of input range
@param last end of input range
@param bop function to perform summation
@param uop function to transform elements of the input range
@param init initial value
+ @param part partitioning algorithm to schedule parallel iterations
Write the cumulative sum (aka prefix sum, aka scan) of the input range
to the output range. Each element of the output range contains the
Please refer to @ref ParallelScan for details.
*/
- template <typename B, typename E, typename D, typename T, typename BOP, typename UOP>
- Task transform_exclusive_scan(B first, E last, D d_first, T init, BOP bop, UOP uop);
+ template <typename B, typename E, typename D, typename T, typename BOP, typename UOP, typename P = DefaultPartitioner>
+ Task transform_exclusive_scan(B first, E last, D d_first, T init, BOP bop, UOP uop, P part = P());
// ------------------------------------------------------------------------
// find
@param last end of the input range
@param result resulting iterator to the found element in the input range
@param predicate unary predicate which returns @c true for the required element
- @param part partitioning algorithm (default tf::GuidedPartitioner)
+ @param part partitioning algorithm (default tf::DefaultPartitioner)
Returns an iterator to the first element in the range <tt>[first, last)</tt>
that satisfies the given criteria (or last if there is no such iterator).
Iterators are templated to enable stateful range using std::reference_wrapper.
*/
- template <typename B, typename E, typename T, typename UOP, typename P = GuidedPartitioner>
- Task find_if(B first, E last, T& result, UOP predicate, P&& part = P());
-
+ template <typename B, typename E, typename T, typename UOP, typename P = DefaultPartitioner>
+ Task find_if(B first, E last, T &result, UOP predicate, P part = P());
+
/**
@brief constructs a task to perform STL-styled find-if-not algorithm
@param last end of the input range
@param result resulting iterator to the found element in the input range
@param predicate unary predicate which returns @c false for the required element
- @param part partitioning algorithm (default tf::GuidedPartitioner)
+ @param part partitioning algorithm (default tf::DefaultPartitioner)
Returns an iterator to the first element in the range <tt>[first, last)</tt>
that satisfies the given criteria (or last if there is no such iterator).
Iterators are templated to enable stateful range using std::reference_wrapper.
*/
- template <typename B, typename E, typename T, typename UOP,typename P = GuidedPartitioner>
- Task find_if_not(B first, E last, T& result, UOP predicate, P&& part = P());
+ template <typename B, typename E, typename T, typename UOP, typename P = DefaultPartitioner>
+ Task find_if_not(B first, E last, T &result, UOP predicate, P part = P());
/**
@brief constructs a task to perform STL-styled min-element algorithm
@param last end of the input range
@param result resulting iterator to the found element in the input range
@param comp comparison function object
- @param part partitioning algorithm (default tf::GuidedPartitioner)
+ @param part partitioning algorithm (default tf::DefaultPartitioner)
Finds the smallest element in the <tt>[first, last)</tt>
using the given comparison function object.
Iterators are templated to enable stateful range using std::reference_wrapper.
*/
template <typename B, typename E, typename T, typename C, typename P>
- Task min_element(B first, E last, T& result, C comp, P&& part);
+ Task min_element(B first, E last, T& result, C comp, P part);
/**
@brief constructs a task to perform STL-styled max-element algorithm
@param last end of the input range
@param result resulting iterator to the found element in the input range
@param comp comparison function object
- @param part partitioning algorithm (default tf::GuidedPartitioner)
+ @param part partitioning algorithm (default tf::DefaultPartitioner)
Finds the largest element in the <tt>[first, last)</tt>
using the given comparison function object.
Iterators are templated to enable stateful range using std::reference_wrapper.
*/
template <typename B, typename E, typename T, typename C, typename P>
- Task max_element(B first, E last, T& result, C comp, P&& part);
+ Task max_element(B first, E last, T& result, C comp, P part);
// ------------------------------------------------------------------------
// sort
@param first iterator to the beginning (inclusive)
@param last iterator to the end (exclusive)
- The task spawns asynchronous tasks to parallelly sort elements in the range
+ The task spawns asynchronous tasks to parallel sort elements in the range
<tt>[first, last)</tt> using the @c std::less<T> comparator,
where @c T is the dereferenced iterator type.
}
// Function: emplace
-template <typename C, std::enable_if_t<is_dynamic_task_v<C>, void>*>
+template <typename C, std::enable_if_t<is_subflow_task_v<C>, void>*>
Task FlowBuilder::emplace(C&& c) {
return Task(_graph._emplace_back("", 0, nullptr, nullptr, 0,
- std::in_place_type_t<Node::Dynamic>{}, std::forward<C>(c)
+ std::in_place_type_t<Node::Subflow>{}, std::forward<C>(c)
));
}
// Class: Graph
// ----------------------------------------------------------------------------
+
/**
@class Graph
friend class FlowBuilder;
public:
+
+ /**
+ @brief destroys the runtime object
+
+ Issues a tf::Runtime::corun_all to finish all spawned asynchronous tasks
+ and then destroys the runtime object.
+ */
+ ~Runtime();
/**
@brief obtains the running executor
The method creates an asynchronous task to launch the given
function on the given arguments.
The difference to tf::Executor::async is that the created asynchronous task
- pertains to the runtime.
- When the runtime joins, all asynchronous tasks created from the runtime
- are guaranteed to finish after the join returns.
+ pertains to the runtime object.
+ Applications can explicitly issue tf::Runtime::corun_all
+ to wait for all spawned asynchronous tasks to finish.
For example:
@code{.cpp}
rt.async([&](){ counter++; });
}
- // explicit join 100 asynchronous tasks
- rt.join();
+ // wait for the 100 asynchronous tasks to finish
+ rt.corun_all();
assert(counter == 102);
});
@endcode
});
}
- // explicit join 100 asynchronous tasks
- rt.join();
+ // wait for the 200 asynchronous tasks to finish
+ rt.corun_all();
assert(counter == 200);
});
@endcode
auto async(F&& f);
/**
- @brief similar to tf::Runtime::async but assigns the task a name
+ @brief runs the given callable asynchronously
@tparam F callable type
+ @tparam P task parameters type
- @param name assigned name to the task
+ @param params task parameters
@param f callable
@code{.cpp}
@endcode
*/
- template <typename F>
- auto async(const std::string& name, F&& f);
+ template <typename P, typename F>
+ auto async(P&& params, F&& f);
/**
@brief runs the given function asynchronously without returning any future object
for(int i=0; i<100; i++) {
rt.silent_async([&](){ counter++; });
}
- rt.join();
+ rt.corun_all();
assert(counter == 100);
});
@endcode
void silent_async(F&& f);
/**
- @brief similar to tf::Runtime::silent_async but assigns the task a name
+ @brief runs the given function asynchronously without returning any future object
@tparam F callable type
- @param name assigned name to the task
+ @param params task parameters
@param f callable
@code{.cpp}
taskflow.emplace([&](tf::Runtime& rt){
rt.silent_async("my task", [](){});
- rt.join();
+ rt.corun_all();
+ });
+ @endcode
+ */
+ template <typename P, typename F>
+ void silent_async(P&& params, F&& f);
+
+ /**
+ @brief similar to tf::Runtime::silent_async but the caller must be the worker of the runtime
+
+ @tparam F callable type
+
+ @param f callable
+
+ The method bypass the check of the caller worker from the executor
+ and thus can only called by the worker of this runtime.
+
+ @code{.cpp}
+ taskflow.emplace([&](tf::Runtime& rt){
+ // running by the worker of this runtime
+ rt.silent_async_unchecked([](){});
+ rt.corun_all();
});
@endcode
*/
template <typename F>
- void silent_async(const std::string& name, F&& f);
+ void silent_async_unchecked(F&& f);
/**
@brief similar to tf::Runtime::silent_async but the caller must be the worker of the runtime
@tparam F callable type
+ @tparam P task parameters type
- @param name assigned name to the task
+ @param params task parameters
@param f callable
The method bypass the check of the caller worker from the executor
taskflow.emplace([&](tf::Runtime& rt){
// running by the worker of this runtime
rt.silent_async_unchecked("my task", [](){});
- rt.join();
+ rt.corun_all();
});
@endcode
*/
- template <typename F>
- void silent_async_unchecked(const std::string& name, F&& f);
+ template <typename P, typename F>
+ void silent_async_unchecked(P&& params, F&& f);
/**
@brief co-runs the given target and waits until it completes
A target can be one of the following forms:
- + a dynamic task to spawn a subflow or
+ + a subflow task to spawn a subflow or
+ a composable graph object with `tf::Graph& T::graph()` defined
@code{.cpp}
the caller thread (worker) is not blocked (e.g., sleeping or holding any lock).
Instead, the caller thread joins the work-stealing loop of the executor
and returns when all tasks in the target completes.
+
+ @attention
+ Only the worker of this tf::Runtime can issue corun.
*/
template <typename T>
void corun(T&& target);
The method keeps the caller worker running in the work-stealing loop
until the stop predicate becomes true.
+
+ @attention
+ Only the worker of this tf::Runtime can issue corun.
*/
template <typename P>
void corun_until(P&& predicate);
/**
- @brief joins all asynchronous tasks spawned by this runtime
+ @brief corun all asynchronous tasks spawned by this runtime with other workers
- Immediately joins all asynchronous tasks (tf::Runtime::async,
- tf::Runtime::silent_async).
- Unlike tf::Subflow::join, you can join multiples times from
- a tf::Runtime object.
+ Coruns all asynchronous tasks (tf::Runtime::async,
+ tf::Runtime::silent_async) with other workers until all those
+ asynchronous tasks finish.
@code{.cpp}
std::atomic<size_t> counter{0};
taskflow.emplace([&](tf::Runtime& rt){
- // spawn 100 async tasks and join
+ // spawn 100 async tasks and wait
for(int i=0; i<100; i++) {
rt.silent_async([&](){ counter++; });
}
- rt.join();
+ rt.corun_all();
assert(counter == 100);
- // spawn another 100 async tasks and join
+ // spawn another 100 async tasks and wait
for(int i=0; i<100; i++) {
rt.silent_async([&](){ counter++; });
}
- rt.join();
+ rt.corun_all();
assert(counter == 200);
});
@endcode
@attention
- Only the worker of this tf::Runtime can issue join.
+ Only the worker of this tf::Runtime can issue tf::Runtime::corun_all.
*/
- inline void join();
+ inline void corun_all();
/**
@brief acquire a reference to the underlying worker
/**
@private
*/
- template <typename F>
- auto _async(Worker& w, const std::string& name, F&& f);
+ template <typename P, typename F>
+ auto _async(Worker& w, P&& params, F&& f);
/**
@private
*/
- template <typename F>
- void _silent_async(Worker& w, const std::string& name, F&& f);
+ template <typename P, typename F>
+ void _silent_async(Worker& w, P&& params, F&& f);
};
// constructor
return _worker;
}
+// ----------------------------------------------------------------------------
+// TaskParams
+// ----------------------------------------------------------------------------
+
+/**
+@struct TaskParams
+
+@brief task parameters to use when creating an asynchronous task
+*/
+struct TaskParams {
+ /**
+ @brief name of the task
+ */
+ std::string name;
+
+ /**
+ @brief priority of the tassk
+ */
+ unsigned priority {0};
+
+ /**
+ @brief C-styled pointer to user data
+ */
+ void* data {nullptr};
+};
+
+/**
+@struct DefaultTaskParams
+
+@brief empty task parameter type for compile-time optimization
+*/
+struct DefaultTaskParams {
+};
+
+/**
+@brief determines if the given type is a task parameter type
+
+Task parameters can be specified in one of the following types:
+ + tf::TaskParams: assign the struct of defined parameters
+ + tf::DefaultTaskParams: assign nothing
+ + std::string: assign a name to the task
+*/
+template <typename P>
+constexpr bool is_task_params_v =
+ std::is_same_v<std::decay_t<P>, TaskParams> ||
+ std::is_same_v<std::decay_t<P>, DefaultTaskParams> ||
+ std::is_constructible_v<std::string, P>;
+
// ----------------------------------------------------------------------------
// Node
// ----------------------------------------------------------------------------
friend class Graph;
friend class Task;
+ friend class AsyncTask;
friend class TaskView;
friend class Taskflow;
friend class Executor;
constexpr static int DETACHED = 2;
constexpr static int ACQUIRED = 4;
constexpr static int READY = 8;
+ constexpr static int EXCEPTION = 16;
using Placeholder = std::monostate;
> work;
};
- // dynamic work handle
- struct Dynamic {
+ // subflow work handle
+ struct Subflow {
template <typename C>
- Dynamic(C&&);
+ Subflow(C&&);
- std::function<void(Subflow&)> work;
+ std::function<void(tf::Subflow&)> work;
Graph subgraph;
};
template <typename T>
Async(T&&);
- std::function<void()> work;
+ std::variant<
+ std::function<void()>, std::function<void(Runtime&)>
+ > work;
};
// silent dependent async
template <typename C>
DependentAsync(C&&);
- std::function<void()> work;
-
+ std::variant<
+ std::function<void()>, std::function<void(Runtime&)>
+ > work;
+
+ std::atomic<size_t> use_count {1};
std::atomic<AsyncState> state {AsyncState::UNFINISHED};
};
using handle_t = std::variant<
Placeholder, // placeholder
Static, // static tasking
- Dynamic, // dynamic tasking
+ Subflow, // subflow tasking
Condition, // conditional tasking
MultiCondition, // multi-conditional tasking
Module, // composable tasking
Async, // async tasking
- DependentAsync // dependent async tasking (no future)
+ DependentAsync // dependent async tasking
>;
struct Semaphores {
// variant index
constexpr static auto PLACEHOLDER = get_index_v<Placeholder, handle_t>;
constexpr static auto STATIC = get_index_v<Static, handle_t>;
- constexpr static auto DYNAMIC = get_index_v<Dynamic, handle_t>;
+ constexpr static auto SUBFLOW = get_index_v<Subflow, handle_t>;
constexpr static auto CONDITION = get_index_v<Condition, handle_t>;
constexpr static auto MULTI_CONDITION = get_index_v<MultiCondition, handle_t>;
constexpr static auto MODULE = get_index_v<Module, handle_t>;
Node() = default;
template <typename... Args>
- Node(const std::string&, unsigned, Topology*, Node*, size_t, Args&&... args);
+ Node(const std::string&, unsigned, Topology*, Node*, size_t, Args&&...);
+
+ template <typename... Args>
+ Node(const std::string&, Topology*, Node*, size_t, Args&&...);
+
+ template <typename... Args>
+ Node(const TaskParams&, Topology*, Node*, size_t, Args&&...);
+
+ template <typename... Args>
+ Node(const DefaultTaskParams&, Topology*, Node*, size_t, Args&&...);
~Node();
unsigned _priority {0};
+ void* _data {nullptr};
+
Topology* _topology {nullptr};
Node* _parent {nullptr};
- void* _data {nullptr};
-
SmallVector<Node*> _successors;
SmallVector<Node*> _dependents;
std::atomic<size_t> _join_counter {0};
std::unique_ptr<Semaphores> _semaphores;
+ std::exception_ptr _exception_ptr {nullptr};
handle_t _handle;
void _precede(Node*);
void _set_up_join_counter();
+ void _process_exception();
bool _is_cancelled() const;
bool _is_conditioner() const;
}
// ----------------------------------------------------------------------------
-// Definition for Node::Dynamic
+// Definition for Node::Subflow
// ----------------------------------------------------------------------------
// Constructor
template <typename C>
-Node::Dynamic::Dynamic(C&& c) : work {std::forward<C>(c)} {
+Node::Subflow::Subflow(C&& c) : work {std::forward<C>(c)} {
}
// ----------------------------------------------------------------------------
_handle {std::forward<Args>(args)...} {
}
-//Node::Node(Args&&... args): _handle{std::forward<Args>(args)...} {
-//}
+// Constructor
+template <typename... Args>
+Node::Node(
+ const std::string& name,
+ Topology* topology,
+ Node* parent,
+ size_t join_counter,
+ Args&&... args
+) :
+ _name {name},
+ _topology {topology},
+ _parent {parent},
+ _join_counter {join_counter},
+ _handle {std::forward<Args>(args)...} {
+}
+
+// Constructor
+template <typename... Args>
+Node::Node(
+ const TaskParams& params,
+ Topology* topology,
+ Node* parent,
+ size_t join_counter,
+ Args&&... args
+) :
+ _name {params.name},
+ _priority {params.priority},
+ _data {params.data},
+ _topology {topology},
+ _parent {parent},
+ _join_counter {join_counter},
+ _handle {std::forward<Args>(args)...} {
+}
+
+// Constructor
+template <typename... Args>
+Node::Node(
+ const DefaultTaskParams&,
+ Topology* topology,
+ Node* parent,
+ size_t join_counter,
+ Args&&... args
+) :
+ _topology {topology},
+ _parent {parent},
+ _join_counter {join_counter},
+ _handle {std::forward<Args>(args)...} {
+}
// Destructor
inline Node::~Node() {
// this is to avoid stack overflow
- if(_handle.index() == DYNAMIC) {
+ if(_handle.index() == SUBFLOW) {
// using std::get_if instead of std::get makes this compatible
// with older macOS versions
// the result of std::get_if is guaranteed to be non-null
// due to the index check above
- auto& subgraph = std::get_if<Dynamic>(&_handle)->subgraph;
+ auto& subgraph = std::get_if<Subflow>(&_handle)->subgraph;
std::vector<Node*> nodes;
nodes.reserve(subgraph.size());
while(i < nodes.size()) {
- if(nodes[i]->_handle.index() == DYNAMIC) {
- auto& sbg = std::get_if<Dynamic>(&(nodes[i]->_handle))->subgraph;
+ if(nodes[i]->_handle.index() == SUBFLOW) {
+ auto& sbg = std::get_if<Subflow>(&(nodes[i]->_handle))->subgraph;
std::move(
sbg._nodes.begin(), sbg._nodes.end(), std::back_inserter(nodes)
);
}
// Function: _is_cancelled
+// we currently only support cancellation of taskflow (no async task)
inline bool Node::_is_cancelled() const {
- return _topology && _topology->_is_cancelled.load(std::memory_order_relaxed);
+ //return _topology && _topology->_is_cancelled.load(std::memory_order_relaxed);
+ return _topology &&
+ (_topology->_state.load(std::memory_order_relaxed) & Topology::CANCELLED);
}
// Procedure: _set_up_join_counter
c++;
}
}
- _join_counter.store(c, std::memory_order_release);
+ _join_counter.store(c, std::memory_order_relaxed);
}
+// Procedure: _process_exception
+inline void Node::_process_exception() {
+ if(_exception_ptr) {
+ auto e = _exception_ptr;
+ _exception_ptr = nullptr;
+ std::rethrow_exception(e);
+ }
+}
// Function: _acquire_all
inline bool Node::_acquire_all(SmallVector<Node*>& nodes) {
return _nodes.back();
}
+
} // end of namespace tf. ---------------------------------------------------
+
+
+
+
+
+
+
+
+
+
struct Waiter {
std::atomic<Waiter*> next;
- std::mutex mu;
- std::condition_variable cv;
uint64_t epoch;
- unsigned state;
- enum {
- kNotSignaled,
+ enum : unsigned {
+ kNotSignaled = 0,
kWaiting,
kSignaled,
};
+
+#ifdef __cpp_lib_atomic_wait
+ std::atomic<unsigned> state {0};
+#else
+ std::mutex mu;
+ std::condition_variable cv;
+ unsigned state;
+#endif
};
explicit Notifier(size_t N) : _waiters{N} {
}
// commit_wait commits waiting.
+ // only the waiter itself can call
void commit_wait(Waiter* w) {
+#ifdef __cpp_lib_atomic_wait
+ w->state.store(Waiter::kNotSignaled, std::memory_order_relaxed);
+#else
w->state = Waiter::kNotSignaled;
+#endif
// Modification epoch of this waiter.
uint64_t epoch =
(w->epoch & kEpochMask) +
// cancel_wait cancels effects of the previous prepare_wait call.
void cancel_wait(Waiter* w) {
uint64_t epoch =
- (w->epoch & kEpochMask) +
- (((w->epoch & kWaiterMask) >> kWaiterShift) << kEpochShift);
+ (w->epoch & kEpochMask) +
+ (((w->epoch & kWaiterMask) >> kWaiterShift) << kEpochShift);
uint64_t state = _state.load(std::memory_order_relaxed);
for (;;) {
if (int64_t((state & kEpochMask) - epoch) < 0) {
uint64_t state = _state.load(std::memory_order_acquire);
for (;;) {
// Easy case: no waiters.
- if ((state & kStackMask) == kStackMask && (state & kWaiterMask) == 0)
+ if ((state & kStackMask) == kStackMask && (state & kWaiterMask) == 0) {
return;
+ }
uint64_t waiters = (state & kWaiterMask) >> kWaiterShift;
uint64_t newstate;
if (all) {
Waiter* wnext = w->next.load(std::memory_order_relaxed);
uint64_t next = kStackMask;
//if (wnext != nullptr) next = wnext - &_waiters[0];
- if (wnext != nullptr) next = static_cast<uint64_t>(wnext - &_waiters[0]);
+ if (wnext != nullptr) {
+ next = static_cast<uint64_t>(wnext - &_waiters[0]);
+ }
// Note: we don't add kEpochInc here. ABA problem on the lock-free stack
// can't happen because a waiter is re-pushed onto the stack only after
// it was in the pre-wait state which inevitably leads to epoch
if (!all && waiters) return; // unblocked pre-wait thread
if ((state & kStackMask) == kStackMask) return;
Waiter* w = &_waiters[state & kStackMask];
- if (!all) w->next.store(nullptr, std::memory_order_relaxed);
+ if (!all) {
+ w->next.store(nullptr, std::memory_order_relaxed);
+ }
_unpark(w);
return;
}
std::vector<Waiter> _waiters;
void _park(Waiter* w) {
+#ifdef __cpp_lib_atomic_wait
+ unsigned target = Waiter::kNotSignaled;
+ if(w->state.compare_exchange_strong(target, Waiter::kWaiting,
+ std::memory_order_relaxed,
+ std::memory_order_relaxed)) {
+ w->state.wait(Waiter::kWaiting, std::memory_order_relaxed);
+ }
+#else
std::unique_lock<std::mutex> lock(w->mu);
while (w->state != Waiter::kSignaled) {
w->state = Waiter::kWaiting;
w->cv.wait(lock);
}
+#endif
}
void _unpark(Waiter* waiters) {
Waiter* next = nullptr;
for (Waiter* w = waiters; w; w = next) {
next = w->next.load(std::memory_order_relaxed);
+#ifdef __cpp_lib_atomic_wait
+ // We only notify if the other is waiting - this is why we use tri-state
+ // variable instead of binary-state variable (i.e., atomic_flag)
+ // Performance is about 0.1% faster
+ if(w->state.exchange(Waiter::kSignaled, std::memory_order_relaxed) ==
+ Waiter::kWaiting) {
+ w->state.notify_one();
+ }
+#else
unsigned state;
{
std::unique_lock<std::mutex> lock(w->mu);
}
// Avoid notifying if it wasn't waiting.
if (state == Waiter::kWaiting) w->cv.notify_one();
+#endif
}
}
/** @brief static task type */
STATIC,
/** @brief dynamic (subflow) task type */
- DYNAMIC,
+ SUBFLOW,
/** @brief condition task type */
CONDITION,
/** @brief module task type */
inline constexpr std::array<TaskType, 6> TASK_TYPES = {
TaskType::PLACEHOLDER,
TaskType::STATIC,
- TaskType::DYNAMIC,
+ TaskType::SUBFLOW,
TaskType::CONDITION,
TaskType::MODULE,
TaskType::ASYNC,
@code{.cpp}
TaskType::PLACEHOLDER -> "placeholder"
TaskType::STATIC -> "static"
-TaskType::DYNAMIC -> "subflow"
+TaskType::SUBFLOW -> "subflow"
TaskType::CONDITION -> "condition"
TaskType::MODULE -> "module"
TaskType::ASYNC -> "async"
switch(type) {
case TaskType::PLACEHOLDER: val = "placeholder"; break;
case TaskType::STATIC: val = "static"; break;
- case TaskType::DYNAMIC: val = "subflow"; break;
+ case TaskType::SUBFLOW: val = "subflow"; break;
case TaskType::CONDITION: val = "condition"; break;
case TaskType::MODULE: val = "module"; break;
case TaskType::ASYNC: val = "async"; break;
A dynamic task is a callable object constructible from std::function<void(Subflow&)>.
*/
template <typename C>
-constexpr bool is_dynamic_task_v =
+constexpr bool is_subflow_task_v =
std::is_invocable_r_v<void, C, Subflow&> &&
!std::is_invocable_r_v<void, C, Runtime&>;
template <typename C>
constexpr bool is_condition_task_v =
(std::is_invocable_r_v<int, C> || std::is_invocable_r_v<int, C, Runtime&>) &&
- !is_dynamic_task_v<C>;
+ !is_subflow_task_v<C>;
/**
@brief determines if a callable is a multi-condition task
constexpr bool is_multi_condition_task_v =
(std::is_invocable_r_v<SmallVector<int>, C> ||
std::is_invocable_r_v<SmallVector<int>, C, Runtime&>) &&
- !is_dynamic_task_v<C>;
+ !is_subflow_task_v<C>;
/**
@brief determines if a callable is a static task
(std::is_invocable_r_v<void, C> || std::is_invocable_r_v<void, C, Runtime&>) &&
!is_condition_task_v<C> &&
!is_multi_condition_task_v<C> &&
- !is_dynamic_task_v<C>;
+ !is_subflow_task_v<C>;
// ----------------------------------------------------------------------------
// Task
@return @c *this
*/
Task& data(void* data);
-
+
/**
@brief assigns a priority value to the task
switch(_node->_handle.index()) {
case Node::PLACEHOLDER: return TaskType::PLACEHOLDER;
case Node::STATIC: return TaskType::STATIC;
- case Node::DYNAMIC: return TaskType::DYNAMIC;
+ case Node::SUBFLOW: return TaskType::SUBFLOW;
case Node::CONDITION: return TaskType::CONDITION;
case Node::MULTI_CONDITION: return TaskType::CONDITION;
case Node::MODULE: return TaskType::MODULE;
if constexpr(is_static_task_v<C>) {
_node->_handle.emplace<Node::Static>(std::forward<C>(c));
}
- else if constexpr(is_dynamic_task_v<C>) {
- _node->_handle.emplace<Node::Dynamic>(std::forward<C>(c));
+ else if constexpr(is_subflow_task_v<C>) {
+ _node->_handle.emplace<Node::Subflow>(std::forward<C>(c));
}
else if constexpr(is_condition_task_v<C>) {
_node->_handle.emplace<Node::Condition>(std::forward<C>(c));
switch(_node._handle.index()) {
case Node::PLACEHOLDER: return TaskType::PLACEHOLDER;
case Node::STATIC: return TaskType::STATIC;
- case Node::DYNAMIC: return TaskType::DYNAMIC;
+ case Node::SUBFLOW: return TaskType::SUBFLOW;
case Node::CONDITION: return TaskType::CONDITION;
case Node::MULTI_CONDITION: return TaskType::CONDITION;
case Node::MODULE: return TaskType::MODULE;
}
}
-} // end of namespace tf. ---------------------------------------------------
+} // end of namespace tf. ----------------------------------------------------
namespace std {
1. static task : the callable constructible from
@c std::function<void()>
- 2. dynamic task : the callable constructible from
+ 2. subflow task : the callable constructible from
@c std::function<void(tf::Subflow&)>
3. condition task : the callable constructible from
@c std::function<int()>
template <typename V>
void for_each_task(V&& visitor) const;
+ /**
+ @brief removes dependencies that go from task @c from to task @c to
+
+ @param from from task (dependent)
+ @param to to task (successor)
+
+ @code{.cpp}
+ tf::Taskflow taskflow;
+ auto a = taskflow.placeholder().name("a");
+ auto b = taskflow.placeholder().name("b");
+ auto c = taskflow.placeholder().name("c");
+ auto d = taskflow.placeholder().name("d");
+
+ a.precede(b, c, d);
+ assert(a.num_successors() == 3);
+ assert(b.num_dependents() == 1);
+ assert(c.num_dependents() == 1);
+ assert(d.num_dependents() == 1);
+
+ taskflow.remove_dependency(a, b);
+ assert(a.num_successors() == 2);
+ assert(b.num_dependents() == 0);
+ @endcode
+ */
+ inline void remove_dependency(Task from, Task to);
+
/**
@brief returns a reference to the underlying graph object
Graph _graph;
std::queue<std::shared_ptr<Topology>> _topologies;
-
std::optional<std::list<Taskflow>::iterator> _satellite;
void _dump(std::ostream&, const Graph*) const;
}
}
+// Procedure: remove_dependency
+inline void Taskflow::remove_dependency(Task from, Task to) {
+ from._node->_successors.erase(std::remove_if(
+ from._node->_successors.begin(), from._node->_successors.end(), [&](Node* i){
+ return i == to._node;
+ }
+ ), from._node->_successors.end());
+
+ to._node->_dependents.erase(std::remove_if(
+ to._node->_dependents.begin(), to._node->_dependents.end(), [&](Node* i){
+ return i == from._node;
+ }
+ ), to._node->_dependents.end());
+}
+
// Procedure: dump
inline std::string Taskflow::dump() const {
std::ostringstream oss;
}
// subflow join node
- if(node->_parent && node->_parent->_handle.index() == Node::DYNAMIC &&
+ if(node->_parent && node->_parent->_handle.index() == Node::SUBFLOW &&
node->_successors.size() == 0
) {
os << 'p' << node << " -> p" << node->_parent << ";\n";
// node info
switch(node->_handle.index()) {
- case Node::DYNAMIC: {
- auto& sbg = std::get_if<Node::Dynamic>(&node->_handle)->subgraph;
+ case Node::SUBFLOW: {
+ auto& sbg = std::get_if<Node::Subflow>(&node->_handle)->subgraph;
if(!sbg.empty()) {
os << "subgraph cluster_p" << node << " {\nlabel=\"Subflow: ";
if(node->_name.empty()) os << 'p' << node;
tf::Future is a derived class from std::future that will eventually hold the
execution result of a submitted taskflow (tf::Executor::run)
-or an asynchronous task (tf::Executor::async, tf::Executor::silent_async).
In addition to the base methods inherited from std::future,
you can call tf::Future::cancel to cancel the execution of the running taskflow
associated with this future object.
friend class Subflow;
friend class Runtime;
- using handle_t = std::variant<
- std::monostate, std::weak_ptr<Topology>
- >;
-
public:
/**
bool cancel();
private:
+
+ std::weak_ptr<Topology> _topology;
- handle_t _handle;
-
- template <typename P>
- Future(std::future<T>&&, P&&);
+ Future(std::future<T>&&, std::weak_ptr<Topology> = std::weak_ptr<Topology>());
};
template <typename T>
-template <typename P>
-Future<T>::Future(std::future<T>&& fu, P&& p) :
- std::future<T> {std::move(fu)},
- _handle {std::forward<P>(p)} {
+Future<T>::Future(std::future<T>&& f, std::weak_ptr<Topology> p) :
+ std::future<T> {std::move(f)},
+ _topology {std::move(p)} {
}
// Function: cancel
template <typename T>
bool Future<T>::cancel() {
- return std::visit([](auto&& arg){
- using P = std::decay_t<decltype(arg)>;
- if constexpr(std::is_same_v<P, std::monostate>) {
- return false;
- }
- else {
- auto ptr = arg.lock();
- if(ptr) {
- ptr->_is_cancelled.store(true, std::memory_order_relaxed);
- return true;
- }
- return false;
- }
- }, _handle);
+ if(auto ptr = _topology.lock(); ptr) {
+ ptr->_state.fetch_or(Topology::CANCELLED, std::memory_order_relaxed);
+ return true;
+ }
+ return false;
}
// ----------------------------------------------------------------------------
-// class: TopologyBase
class TopologyBase {
- friend class Executor;
- friend class Node;
-
- template <typename T>
- friend class Future;
-
- protected:
-
- std::atomic<bool> _is_cancelled { false };
};
-// ----------------------------------------------------------------------------
-
// class: Topology
-class Topology : public TopologyBase {
+class Topology {
friend class Executor;
friend class Runtime;
+ friend class Node;
+
+ template <typename T>
+ friend class Future;
+
+ constexpr static int CLEAN = 0;
+ constexpr static int CANCELLED = 1;
+ constexpr static int EXCEPTION = 2;
public:
template <typename P, typename C>
Topology(Taskflow&, P&&, C&&);
+ bool cancelled() const;
+
private:
Taskflow& _taskflow;
std::function<void()> _call;
std::atomic<size_t> _join_counter {0};
+ std::atomic<int> _state {CLEAN};
+
+ std::exception_ptr _exception_ptr {nullptr};
+
+ void _carry_out_promise();
};
// Constructor
_call {std::forward<C>(c)} {
}
+// Procedure
+inline void Topology::_carry_out_promise() {
+ if(_exception_ptr) {
+ auto e = _exception_ptr;
+ _exception_ptr = nullptr;
+ _promise.set_exception(e);
+ }
+ else {
+ _promise.set_value();
+ }
+}
+
+// Function: cancelled
+inline bool Topology::cancelled() const {
+ return _state.load(std::memory_order_relaxed) & CANCELLED;
+}
+
} // end of namespace tf. ----------------------------------------------------
}
-// ----------------------------------------------------------------------------
-// Class Definition: WorkerInterface
-// ----------------------------------------------------------------------------
-
-/**
-@class WorkerInterface
-
-@brief class to configure worker behavior in an executor
-
-The tf::WorkerInterface class lets users interact with the executor
-to customize the worker behavior,
-such as calling custom methods before and after a worker enters and leaves
-the loop.
-When you create an executor, it spawns a set of workers to run tasks.
-The interaction between the executor and its spawned workers looks like
-the following:
-
-for(size_t n=0; n<num_workers; n++) {
- create_thread([](Worker& worker)
-
- // pre-processing executor-specific worker information
- // ...
-
- // enter the scheduling loop
- // Here, WorkerInterface::scheduler_prologue is invoked, if any
-
- while(1) {
- perform_work_stealing_algorithm();
- if(stop) {
- break;
- }
- }
-
- // leaves the scheduling loop and joins this worker thread
- // Here, WorkerInterface::scheduler_epilogue is invoked, if any
- );
-}
-
-@note
-Methods defined in tf::WorkerInterface are not thread-safe and may be
-be invoked by multiple workers concurrently.
-
-*/
-class WorkerInterface {
-
- public:
-
- /**
- @brief default destructor
- */
- virtual ~WorkerInterface() = default;
-
- /**
- @brief method to call before a worker enters the scheduling loop
- @param worker a reference to the worker
-
- The method is called by the constructor of an executor.
- */
- virtual void scheduler_prologue(Worker& worker) = 0;
-
- /**
- @brief method to call after a worker leaves the scheduling loop
- @param worker a reference to the worker
- @param ptr an pointer to the exception thrown by the scheduling loop
-
- The method is called by the constructor of an executor.
- */
- virtual void scheduler_epilogue(Worker& worker, std::exception_ptr ptr) = 0;
-
-};
-
-/**
-@brief helper function to create an instance derived from tf::WorkerInterface
-
-@tparam T type derived from tf::WorkerInterface
-@tparam ArgsT argument types to construct @c T
-
-@param args arguments to forward to the constructor of @c T
-*/
-template <typename T, typename... ArgsT>
-std::shared_ptr<T> make_worker_interface(ArgsT&&... args) {
- static_assert(
- std::is_base_of_v<WorkerInterface, T>,
- "T must be derived from WorkerInterface"
- );
- return std::make_shared<T>(std::forward<ArgsT>(args)...);
-}
-
} // end of namespact tf -----------------------------------------------------
// set the index to the maximum
cuda_single_task(p, [=] __device__ () { *idx = count; });
-ls
// launch the kernel to atomic-find the minimum
cuda_kernel<<<B, E::nt, 0, p.stream()>>>([=] __device__ (auto tid, auto bid) {
@param graph native CUDA graph
*/
template <typename T>
-void cuda_dump_graph(T& os, cudaGraph_t graph) {
+void cuda_dump_graph(T& os, cudaGraph_t g) {
os << "digraph cudaGraph {\n";
std::stack<std::tuple<cudaGraph_t, cudaGraphNode_t, int>> stack;
- stack.push(std::make_tuple(graph, nullptr, 1));
+ stack.push(std::make_tuple(g, nullptr, 1));
int pl = 0;
auto type = cuda_get_graph_node_type(node);
if(type == cudaGraphNodeTypeGraph) {
- cudaGraph_t graph;
- TF_CHECK_CUDA(cudaGraphChildGraphNodeGetGraph(node, &graph), "");
- stack.push(std::make_tuple(graph, node, l+1));
+ cudaGraph_t child_graph;
+ TF_CHECK_CUDA(cudaGraphChildGraphNodeGetGraph(node, &child_graph), "");
+ stack.push(std::make_tuple(child_graph, node, l+1));
os << 'p' << node << "["
<< "shape=folder, style=filled, fontcolor=white, fillcolor=purple, "
// Capture handle constructor
template <typename C>
-cudaFlowNode::Capture::Capture(C&& work) :
- work {std::forward<C>(work)} {
+cudaFlowNode::Capture::Capture(C&& c) :
+ work {std::forward<C>(c)} {
}
// Constructor
@param n number of elements (each of size sizeof(value_type)) to be allocated
@return a pointer to the initial element in the block of storage.
*/
- pointer allocate( size_type n, std::allocator<void>::const_pointer = 0 )
+ pointer allocate( size_type n, const void* = 0 )
{
void* ptr = NULL;
TF_CHECK_CUDA(
@param n number of elements (each of size sizeof(value_type)) to be allocated
@return a pointer to the initial element in the block of storage.
*/
- pointer allocate( size_type n, std::allocator<void>::const_pointer = 0 )
+ pointer allocate( size_type n, const void* = 0 )
{
void* ptr {nullptr};
TF_CHECK_CUDA(
*/
struct cudaEventDeleter {
void operator () (cudaEvent_t event) const {
- cudaEventDestroy(event);
+ if (event != nullptr) {
+ cudaEventDestroy(event);
+ }
}
};
// TF_VERSION / 100 % 1000 is the minor version
// TF_VERSION / 100000 is the major version
-// current version: 3.6.0
-#define TF_VERSION 300600
+// current version: 3.7.0
+#define TF_VERSION 300700
#define TF_MAJOR_VERSION TF_VERSION/100000
#define TF_MINOR_VERSION TF_VERSION/100%1000
Release notes are available here: https://taskflow.github.io/taskflow/Releases.html
*/
constexpr const char* version() {
- return "3.6.0";
+ return "3.7.0";
}
constexpr std::enable_if_t<std::is_integral<std::decay_t<T>>::value, bool>
is_range_invalid(T beg, T end, T step) {
return ((step == 0 && beg != end) ||
- (beg < end && step <= 0) ||
- (beg > end && step >= 0));
+ (beg < end && step <= 0) || // positive range
+ (beg > end && step >= 0)); // negative range
}
template <typename T>
--- /dev/null
+#pragma once
+
+#if defined(_MSC_VER)
+ #define TF_FORCE_INLINE __forceinline
+#elif defined(__GNUC__) && __GNUC__ > 3
+ #define TF_FORCE_INLINE __attribute__((__always_inline__)) inline
+#else
+ #define TF_FORCE_INLINE inline
+#endif
+
+#if defined(_MSC_VER)
+ #define TF_NO_INLINE __declspec(noinline)
+#elif defined(__GNUC__) && __GNUC__ > 3
+ #define TF_NO_INLINE __attribute__((__noinline__))
+#else
+ #define TF_NO_INLINE
+#endif
+
+// ----------------------------------------------------------------------------
+
+#ifdef TF_DISABLE_EXCEPTION_HANDLING
+ #define TF_EXECUTOR_EXCEPTION_HANDLER(worker, node, code_block) \
+ code_block;
+#else
+ #define TF_EXECUTOR_EXCEPTION_HANDLER(worker, node, code_block) \
+ try { \
+ code_block; \
+ } catch(...) { \
+ _process_exception(worker, node); \
+ }
+#endif
+
+// ----------------------------------------------------------------------------
#pragma once
#include <iostream>
+#include <sstream>
#include <string>
namespace tf {
#pragma once
+#if __has_include(<version>)
+# include <version>
+#endif
+
#include <type_traits>
#include <iterator>
#include <iostream>