From 3a344d5fa385713a4b831438f2428a2f8d56970e Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Mon, 18 Apr 2016 11:59:14 +0200 Subject: [PATCH] Use the same affinity for inner products as for vector updates --- include/deal.II/lac/vector.h | 2 +- include/deal.II/lac/vector.templates.h | 1109 +++++++++++++----------- 2 files changed, 591 insertions(+), 520 deletions(-) diff --git a/include/deal.II/lac/vector.h b/include/deal.II/lac/vector.h index d87c63e26b..6123fe7a76 100644 --- a/include/deal.II/lac/vector.h +++ b/include/deal.II/lac/vector.h @@ -995,7 +995,7 @@ protected: * For parallel loops with TBB, this member variable stores the affinity * information of loops. */ - std_cxx11::shared_ptr thread_loop_partitioner; + mutable std_cxx11::shared_ptr thread_loop_partitioner; /** * Make all other vector types friends. diff --git a/include/deal.II/lac/vector.templates.h b/include/deal.II/lac/vector.templates.h index 124587345e..d1b4a7f3a4 100644 --- a/include/deal.II/lac/vector.templates.h +++ b/include/deal.II/lac/vector.templates.h @@ -118,10 +118,57 @@ namespace internal "into a vector of reals/doubles")); } + + +#ifdef DEAL_II_WITH_THREADS + /** + * This struct takes the loop range from the tbb parallel for loop and + * translates it to the actual ranges of the for loop within the vector. It + * encodes the grain size but might choose larger values of chunks than the + * minimum grain size. The minimum grain size given to tbb is then simple + * 1. For affinity reasons, the layout in this loop must be kept in sync + * with the respective class for reductions further down. + */ template - void vectorized_transform(Functor &functor, - size_type vec_size, - std_cxx11::shared_ptr &partitioner) + struct TBBForFunctor + { + TBBForFunctor(Functor &functor, + const size_type vec_size) + : + functor(functor), + vec_size(vec_size) + { + // set chunk size for sub-tasks + const unsigned int gs = internal::Vector::minimum_parallel_grain_size; + n_chunks = std::min(4*MultithreadInfo::n_threads(), vec_size / gs); + chunk_size = vec_size / n_chunks; + // round to next multiple of 512 (or minimum grain size if that happens + // to be smaller) + if (chunk_size > 512) + chunk_size = ((chunk_size + 511)/512)*512; + n_chunks = (vec_size + chunk_size - 1) / chunk_size; + AssertIndexRange((n_chunks-1)*chunk_size, vec_size); + AssertIndexRange(vec_size, n_chunks*chunk_size+1); + }; + + void operator() (const tbb::blocked_range &range) const + { + const size_type begin = range.begin()*chunk_size; + const size_type end = std::min(range.end()*chunk_size, vec_size); + functor(begin, end); + } + + Functor &functor; + const size_type vec_size; + unsigned int n_chunks; + size_type chunk_size; + }; +#endif + + template + void parallel_for(Functor &functor, + size_type vec_size, + std_cxx11::shared_ptr &partitioner) { #ifdef DEAL_II_WITH_THREADS // only go to the parallel function in case there are at least 4 parallel @@ -135,10 +182,11 @@ namespace internal std_cxx11::shared_ptr tbb_partitioner = partitioner->acquire_one_partitioner(); + TBBForFunctor generic_functor(functor, vec_size); tbb::parallel_for (tbb::blocked_range (0, - vec_size, - internal::Vector::minimum_parallel_grain_size), - functor, + generic_functor.n_chunks, + 1), + generic_functor, *tbb_partitioner); partitioner->release_one_partitioner(tbb_partitioner); } @@ -159,13 +207,6 @@ namespace internal Number *dst; Number value; -#ifdef DEAL_II_WITH_THREADS - void operator() (const tbb::blocked_range &range) const - { - operator()(range.begin(),range.end()); - } -#endif - void operator() (const size_type begin, const size_type end) const { if (value == Number()) @@ -181,13 +222,6 @@ namespace internal const OtherNumber *src; Number *dst; -#ifdef DEAL_II_WITH_THREADS - void operator() (const tbb::blocked_range &range) const - { - operator()(range.begin(),range.end()); - } -#endif - void operator() (const size_type begin, const size_type end) const { if (types_are_equal::value) @@ -207,13 +241,6 @@ namespace internal Number *val; Number factor; -#ifdef DEAL_II_WITH_THREADS - void operator() (const tbb::blocked_range &range) const - { - operator()(range.begin(),range.end()); - } -#endif - void operator() (const size_type begin, const size_type end) const { if (parallel::internal::EnableOpenMPSimdFor::value) @@ -236,12 +263,6 @@ namespace internal Number *val; Number *v_val; Number factor; -#ifdef DEAL_II_WITH_THREADS - void operator() (const tbb::blocked_range &range) const - { - operator()(range.begin(),range.end()); - } -#endif void operator() (const size_type begin, const size_type end) const { @@ -266,12 +287,6 @@ namespace internal Number *v_val; Number a; Number x; -#ifdef DEAL_II_WITH_THREADS - void operator() (const tbb::blocked_range &range) const - { - operator()(range.begin(),range.end()); - } -#endif void operator() (const size_type begin, const size_type end) const { @@ -294,12 +309,6 @@ namespace internal { Number *val; Number *v_val; -#ifdef DEAL_II_WITH_THREADS - void operator() (const tbb::blocked_range &range) const - { - operator()(range.begin(),range.end()); - } -#endif void operator() (const size_type begin, const size_type end) const { @@ -322,12 +331,6 @@ namespace internal { Number *val; Number factor; -#ifdef DEAL_II_WITH_THREADS - void operator() (const tbb::blocked_range &range) const - { - operator()(range.begin(),range.end()); - } -#endif void operator() (const size_type begin, const size_type end) const { @@ -350,12 +353,6 @@ namespace internal { Number *val; Number *v_val; -#ifdef DEAL_II_WITH_THREADS - void operator() (const tbb::blocked_range &range) const - { - operator()(range.begin(),range.end()); - } -#endif void operator() (const size_type begin, const size_type end) const { @@ -381,12 +378,6 @@ namespace internal Number *w_val; Number a; Number b; -#ifdef DEAL_II_WITH_THREADS - void operator() (const tbb::blocked_range &range) const - { - operator()(range.begin(),range.end()); - } -#endif void operator() (const size_type begin, const size_type end) const { @@ -410,12 +401,6 @@ namespace internal Number *val; Number *v_val; Number x; -#ifdef DEAL_II_WITH_THREADS - void operator() (const tbb::blocked_range &range) const - { - operator()(range.begin(),range.end()); - } -#endif void operator() (const size_type begin, const size_type end) const { @@ -442,12 +427,6 @@ namespace internal Number x; Number a; Number b; -#ifdef DEAL_II_WITH_THREADS - void operator() (const tbb::blocked_range &range) const - { - operator()(range.begin(),range.end()); - } -#endif void operator() (const size_type begin, const size_type end) const { @@ -470,12 +449,6 @@ namespace internal { Number *val; Number *v_val; -#ifdef DEAL_II_WITH_THREADS - void operator() (const tbb::blocked_range &range) const - { - operator()(range.begin(),range.end()); - } -#endif void operator() (const size_type begin, const size_type end) const { @@ -499,12 +472,6 @@ namespace internal Number *val; Number *u_val; Number a; -#ifdef DEAL_II_WITH_THREADS - void operator() (const tbb::blocked_range &range) const - { - operator()(range.begin(),range.end()); - } -#endif void operator() (const size_type begin, const size_type end) const { @@ -530,12 +497,6 @@ namespace internal Number *v_val; Number a; Number b; -#ifdef DEAL_II_WITH_THREADS - void operator() (const tbb::blocked_range &range) const - { - operator()(range.begin(),range.end()); - } -#endif void operator() (const size_type begin, const size_type end) const { @@ -563,12 +524,6 @@ namespace internal Number a; Number b; Number c; -#ifdef DEAL_II_WITH_THREADS - void operator() (const tbb::blocked_range &range) const - { - operator()(range.begin(),range.end()); - } -#endif void operator() (const size_type begin, const size_type end) const { @@ -592,12 +547,6 @@ namespace internal Number *val; Number *a_val; Number *b_val; -#ifdef DEAL_II_WITH_THREADS - void operator() (const tbb::blocked_range &range) const - { - operator()(range.begin(),range.end()); - } -#endif void operator() (const size_type begin, const size_type end) const { @@ -614,6 +563,498 @@ namespace internal } } }; + + + + // All sums over all the vector entries (l2-norm, inner product, etc.) are + // performed with the same code, using a templated operation defined + // here. There are always two versions defined, a standard one that covers + // most cases and a vectorized one which is only for equal types and float + // and double. + template + struct Dot + { + static const bool vectorizes = types_are_equal::value && + (types_are_equal::value || + types_are_equal::value); + + Number + operator() (size_type &i) const + { + Number xi = X[i]; + return xi * Number(numbers::NumberTraits::conjugate(Y[i++])); + } + + VectorizedArray + do_vectorized(size_type &i) const + { + VectorizedArray x, y; + x.load(X+i); + y.load(Y+i); + i += VectorizedArray::n_array_elements; + return x * y; + } + + const Number *X; + const Number2 *Y; + }; + + template + struct Norm2 + { + static const bool vectorizes = types_are_equal::value || + types_are_equal::value; + + RealType + operator() (size_type &i) const + { + return numbers::NumberTraits::abs_square(X[i++]); + } + + VectorizedArray + do_vectorized(size_type &i) const + { + VectorizedArray x; + x.load(X+i); + i += VectorizedArray::n_array_elements; + return x * x; + } + + const Number *X; + }; + + template + struct Norm1 + { + static const bool vectorizes = types_are_equal::value || + types_are_equal::value; + + RealType + operator() (size_type &i) const + { + return numbers::NumberTraits::abs(X[i++]); + } + + VectorizedArray + do_vectorized(size_type &i) const + { + VectorizedArray x; + x.load(X+i); + i += VectorizedArray::n_array_elements; + return std::abs(x); + } + + const Number *X; + }; + + template + struct NormP + { + static const bool vectorizes = types_are_equal::value || + types_are_equal::value; + + RealType + operator() (size_type &i) const + { + return std::pow(numbers::NumberTraits::abs(X[i++]), p); + } + + VectorizedArray + do_vectorized(size_type &i) const + { + VectorizedArray x; + x.load(X+i); + i += VectorizedArray::n_array_elements; + return std::pow(std::abs(x),p); + } + + const Number *X; + RealType p; + }; + + template + struct MeanValue + { + static const bool vectorizes = types_are_equal::value || + types_are_equal::value; + + Number + operator() (size_type &i) const + { + return X[i++]; + } + + VectorizedArray + do_vectorized(size_type &i) const + { + VectorizedArray x; + x.load(X+i); + i += VectorizedArray::n_array_elements; + return x; + } + + const Number *X; + }; + + template + struct AddAndDot + { + static const bool vectorizes = types_are_equal::value || + types_are_equal::value; + + Number + operator() (size_type &i) const + { + X[i] += a * V[i]; + Number xi = X[i]; + return xi * Number(numbers::NumberTraits::conjugate(W[i++])); + } + + VectorizedArray + do_vectorized(size_type &i) const + { + VectorizedArray x, w, v; + x.load(X+i); + v.load(V+i); + x += a * v; + x.store(X+i); + // may only load from W after storing in X because the pointers might + // point to the same memory + w.load(W+i); + i += VectorizedArray::n_array_elements; + return x * w; + } + + Number *X; + const Number *V, *W; + Number a; + }; + + + + // this is the inner working routine for the accumulation loops + // below. This is the standard case where the loop bounds are known. We + // pulled this function out of the regular accumulate routine because we + // might do this thing vectorized (see specialized function below) + template + void + accumulate_regular(const Operation &op, + size_type &n_chunks, + size_type &index, + ResultType (&outer_results)[128], + internal::bool2type, + const unsigned int start_chunk=0) + { + AssertIndexRange(start_chunk, n_chunks+1); + for (size_type i=start_chunk; i + void + accumulate_regular(const Operation &op, + size_type &n_chunks, + size_type &index, + Number (&outer_results)[128], + internal::bool2type) + { + const size_type regular_chunks = n_chunks/VectorizedArray::n_array_elements; + for (size_type i=0; i r0 = op.do_vectorized(index); + VectorizedArray r1 = op.do_vectorized(index); + VectorizedArray r2 = op.do_vectorized(index); + VectorizedArray r3 = op.do_vectorized(index); + for (size_type j=1; j<8; ++j) + { + r0 += op.do_vectorized(index); + r1 += op.do_vectorized(index); + r2 += op.do_vectorized(index); + r3 += op.do_vectorized(index); + } + r0 += r1; + r2 += r3; + r0 += r2; + r0.store(&outer_results[i*VectorizedArray::n_array_elements]); + } + + // If we are treating a case where the vector length is not divisible by + // the vectorization length, need a cleanup loop + AssertIndexRange(VectorizedArray::n_array_elements, + 17); + if (n_chunks % VectorizedArray::n_array_elements != 0) + { + VectorizedArray r0 = VectorizedArray(), + r1 = VectorizedArray(); + const size_type start_irreg = regular_chunks * VectorizedArray::n_array_elements; + for (size_type c=start_irreg; c::n_array_elements) + { + r0 += op.do_vectorized(index); + r1 += op.do_vectorized(index); + } + r0 += r1; + r0.store(&outer_results[start_irreg]); + n_chunks = start_irreg + VectorizedArray::n_array_elements; + } + } + + + + // this is the main working loop for all vector sums using the templated + // operation above. it accumulates the sums using a block-wise summation + // algorithm with post-update. this blocked algorithm has been proposed in + // a similar form by Castaldo, Whaley and Chronopoulos (SIAM + // J. Sci. Comput. 31, 1156-1174, 2008) and we use the smallest possible + // block size, 2. Sometimes it is referred to as pairwise summation. The + // worst case error made by this algorithm is on the order O(eps * + // log2(vec_size)), whereas a naive summation is O(eps * vec_size). Even + // though the Kahan summation is even more accurate with an error O(eps) + // by carrying along remainders not captured by the main sum, that involves + // additional costs which are not worthwhile. See the Wikipedia article on + // the Kahan summation algorithm. + + // The algorithm implemented here has the additional benefit that it is + // easily parallelized without changing the order of how the elements are + // added (floating point addition is not associative). For the same vector + // size and minimum_parallel_grainsize, the blocks are always the + // same and added pairwise. At the innermost level, eight values are added + // consecutively in order to better balance multiplications and additions. + + // The code returns the result as the last argument in order to make + // spawning tasks simpler and use automatic template deduction. + template + void accumulate_recursive (const Operation &op, + const size_type first, + const size_type last, + ResultType &result) + { + const size_type vec_size = last - first; + if (vec_size <= 4096) + { + // the vector is short enough so we perform the summation. first + // work on the regular part. The innermost 32 values are expanded in + // order to obtain known loop bounds for most of the work. + size_type index = first; + ResultType outer_results [128]; + size_type n_chunks = vec_size / 32; + const size_type remainder = vec_size % 32; + Assert (remainder == 0 || n_chunks < 128, ExcInternalError()); + + // Select between the regular version and vectorized version based + // on the number types we are given. To choose the vectorized + // version often enough, we need to have all tasks but the last one + // to be divisible by the vectorization length + accumulate_regular(op, n_chunks, index, outer_results, + internal::bool2type()); + + // now work on the remainder, i.e., the last up to 32 values. Use + // switch statement with fall-through to work on these values. + if (remainder > 0) + { + AssertIndexRange(n_chunks, 129); + const size_type inner_chunks = remainder / 8; + Assert (inner_chunks <= 3, ExcInternalError()); + const size_type remainder_inner = remainder % 8; + ResultType r0 = ResultType(), r1 = ResultType(), + r2 = ResultType(); + switch (inner_chunks) + { + case 3: + r2 = op(index); + for (size_type j=1; j<8; ++j) + r2 += op(index); + // no break + case 2: + r1 = op(index); + for (size_type j=1; j<8; ++j) + r1 += op(index); + r1 += r2; + // no break + case 1: + r2 = op(index); + for (size_type j=1; j<8; ++j) + r2 += op(index); + // no break + default: + for (size_type j=0; j 1) + { + if (n_chunks % 2 == 1) + outer_results[n_chunks++] = ResultType(); + for (size_type i=0; i + struct TBBReduceFunctor + { + TBBReduceFunctor(const Operation &op, + const size_type vec_size) + : + op(op), + vec_size(vec_size) + { + // set chunk size for sub-tasks + const unsigned int gs = internal::Vector::minimum_parallel_grain_size; + n_chunks = std::min(4*MultithreadInfo::n_threads(), vec_size / gs); + chunk_size = vec_size / n_chunks; + + // round to next multiple of 512 (or leave it at the minimum grain size + // if that happens to be smaller) + if (chunk_size > 512) + chunk_size = ((chunk_size + 511)/512)*512; + n_chunks = (vec_size + chunk_size - 1) / chunk_size; + AssertIndexRange((n_chunks-1)*chunk_size, vec_size); + AssertIndexRange(vec_size, n_chunks*chunk_size+1); + + if (n_chunks > 512) + { + large_array.resize(n_chunks); + array_ptr = &large_array[0]; + } + else + array_ptr = &small_array[0]; + }; + + void operator() (const tbb::blocked_range &range) const + { + for (size_type i = range.begin(); i < range.end(); ++i) + accumulate_recursive(op, i*chunk_size, std::min((i+1)*chunk_size, vec_size), + array_ptr[i]); + } + + ResultType do_sum() const + { + while (n_chunks > 1) + { + if (n_chunks % 2 == 1) + array_ptr[n_chunks++] = ResultType(); + for (size_type i=0; i large_array; + mutable ResultType *array_ptr; + }; +#endif + + + + /** + * This is the general caller for parallel reduction operations that work in + * parallel. + */ + template + void parallel_reduce (const Operation &op, + const size_type vec_size, + ResultType &result, + std_cxx11::shared_ptr &partitioner) + { +#ifdef DEAL_II_WITH_THREADS + // only go to the parallel function in case there are at least 4 parallel + // items, otherwise the overhead is too large + if (vec_size >= 4*internal::Vector::minimum_parallel_grain_size && + MultithreadInfo::n_threads() > 1) + { + Assert(partitioner.get() != NULL, + ExcInternalError("Unexpected initialization of Vector that does " + "not set the TBB partitioner to a usable state.")); + std_cxx11::shared_ptr tbb_partitioner = + partitioner->acquire_one_partitioner(); + + TBBReduceFunctor generic_functor(op, vec_size); + tbb::parallel_for (tbb::blocked_range (0, + generic_functor.n_chunks, + 1), + generic_functor, + *tbb_partitioner); + partitioner->release_one_partitioner(tbb_partitioner); + result = generic_functor.do_sum(); + } + else if (vec_size > 0) + accumulate_recursive(op,0,vec_size,result); +#else + accumulate_recursive(op,0,vec_size,result); +#endif + } } @@ -800,7 +1241,7 @@ Vector::operator= (const Vector &v) dealii::internal::Vector_copy copier; copier.dst = val; copier.src = v.val; - internal::vectorized_transform(copier,vec_size,thread_loop_partitioner); + internal::parallel_for(copier,vec_size,thread_loop_partitioner); return *this; } @@ -845,7 +1286,7 @@ Vector::operator= (const Vector &v) dealii::internal::Vector_copy copier; copier.dst = val; copier.src = v.val; - internal::vectorized_transform(copier,vec_size,thread_loop_partitioner); + internal::parallel_for(copier,vec_size,thread_loop_partitioner); return *this; } @@ -958,7 +1399,7 @@ Vector::operator= (const Number s) setter.dst = val; setter.value = s; - internal::vectorized_transform(setter,vec_size,thread_loop_partitioner); + internal::parallel_for(setter,vec_size,thread_loop_partitioner); return *this; } @@ -993,7 +1434,7 @@ Vector &Vector::operator *= (const Number factor) vector_multiply.val = val; vector_multiply.factor = factor; - internal::vectorized_transform(vector_multiply,vec_size,thread_loop_partitioner); + internal::parallel_for(vector_multiply,vec_size,thread_loop_partitioner); return *this; } @@ -1014,7 +1455,7 @@ Vector::add (const Number a, vector_add_av.val = val; vector_add_av.v_val = v.val; vector_add_av.factor = a; - internal::vectorized_transform(vector_add_av,vec_size,thread_loop_partitioner); + internal::parallel_for(vector_add_av,vec_size,thread_loop_partitioner); } @@ -1036,7 +1477,7 @@ Vector::sadd (const Number x, vector_sadd_xav.v_val = v.val; vector_sadd_xav.a = a; vector_sadd_xav.x = x; - internal::vectorized_transform(vector_sadd_xav,vec_size,thread_loop_partitioner); + internal::parallel_for(vector_sadd_xav,vec_size,thread_loop_partitioner); } @@ -1045,389 +1486,6 @@ namespace internal { namespace Vector { - // All sums over all the vector entries (l2-norm, inner product, etc.) are - // performed with the same code, using a templated operation defined - // here. There are always two versions defined, a standard one that covers - // most cases and a vectorized one which is only for equal types and float - // and double. - template - struct Dot - { - Number - operator() (const Number *&X, const Number2 *&Y, const Number &, Number *&) const - { - return *X++ * Number(numbers::NumberTraits::conjugate(*Y++)); - } - - VectorizedArray - do_vectorized(const Number *&X, const Number *&Y, const Number &, Number *&) const - { - VectorizedArray x, y; - x.load(X); - y.load(Y); - X += VectorizedArray::n_array_elements; - Y += VectorizedArray::n_array_elements; - return x * y; - } - }; - - template - struct Norm2 - { - RealType - operator() (const Number *&X, const Number *&, const RealType &, Number *&) const - { - return numbers::NumberTraits::abs_square(*X++); - } - - VectorizedArray - do_vectorized(const Number *&X, const Number *&, const Number &, Number *&) const - { - VectorizedArray x; - x.load(X); - X += VectorizedArray::n_array_elements; - return x * x; - } - }; - - template - struct Norm1 - { - RealType - operator() (const Number *&X, const Number *&, const RealType &, Number *&) const - { - return numbers::NumberTraits::abs(*X++); - } - - VectorizedArray - do_vectorized(const Number *&X, const Number *&, const Number &, Number *&) const - { - VectorizedArray x; - x.load(X); - X += VectorizedArray::n_array_elements; - return std::abs(x); - } - }; - - template - struct NormP - { - RealType - operator() (const Number *&X, const Number *&, const RealType &p, Number *&) const - { - return std::pow(numbers::NumberTraits::abs(*X++), p); - } - - VectorizedArray - do_vectorized(const Number *&X, const Number *&, const Number &p, Number *&) const - { - VectorizedArray x; - x.load(X); - X += VectorizedArray::n_array_elements; - return std::pow(std::abs(x),p); - } - }; - - template - struct MeanValue - { - Number - operator() (const Number *&X, const Number *&, const Number &, Number *&) const - { - return *X++; - } - - VectorizedArray - do_vectorized(const Number *&X, const Number *&, const Number &, Number *&) const - { - VectorizedArray x; - x.load(X); - X += VectorizedArray::n_array_elements; - return x; - } - }; - - template - struct AddAndDot - { - Number - operator() (const Number *&V, const Number *&W, const Number &a, - Number *&X) const - { - *X += a **V++; - return *X++ * Number(numbers::NumberTraits::conjugate(*W++)); - } - - VectorizedArray - do_vectorized(const Number *&V, const Number *&W, const Number &a, - Number *&X) const - { - VectorizedArray x, w, v; - x.load(X); - v.load(V); - x += a * v; - x.store(X); - // may only load from W after storing in X because the pointers might - // point to the same memory - w.load(W); - X += VectorizedArray::n_array_elements; - V += VectorizedArray::n_array_elements; - W += VectorizedArray::n_array_elements; - return x * w; - } - }; - - - - // this is the inner working routine for the accumulation loops - // below. This is the standard case where the loop bounds are known. We - // pulled this function out of the regular accumulate routine because we - // might do this thing vectorized (see specialized function below) - template - void - accumulate_regular(const Operation &op, - const Number *&X, - const Number2 *&Y, - const ResultType power, - const size_type n_chunks, - Number *&Z, - ResultType (&outer_results)[128], - internal::bool2type, - const unsigned int start_chunk=0) - { - AssertIndexRange(start_chunk, n_chunks+1); - for (size_type i=start_chunk; i - void - accumulate_regular(const Operation &op, - const Number *&X, - const Number *&Y, - const Number power, - const size_type n_chunks, - Number *&Z, - Number (&outer_results)[128], - internal::bool2type) - { - for (size_type i=0; i::n_array_elements; ++i) - { - VectorizedArray r0 = op.do_vectorized(X, Y, power, Z); - VectorizedArray r1 = op.do_vectorized(X, Y, power, Z); - VectorizedArray r2 = op.do_vectorized(X, Y, power, Z); - VectorizedArray r3 = op.do_vectorized(X, Y, power, Z); - for (size_type j=1; j<8; ++j) - { - r0 += op.do_vectorized(X, Y, power, Z); - r1 += op.do_vectorized(X, Y, power, Z); - r2 += op.do_vectorized(X, Y, power, Z); - r3 += op.do_vectorized(X, Y, power, Z); - } - r0 += r1; - r2 += r3; - r0 += r2; - r0.store(&outer_results[i*VectorizedArray::n_array_elements]); - } - - // If we are treating a case where the vector length is not divisible by - // the vectorization length, need the other routine for the cleanup work - if (n_chunks % VectorizedArray::n_array_elements != 0) - accumulate_regular(op, X, Y, power, n_chunks, Z, outer_results, - internal::bool2type(), - (n_chunks/VectorizedArray::n_array_elements) - * VectorizedArray::n_array_elements); - } - - - - // this is the main working loop for all vector sums using the templated - // operation above. it accumulates the sums using a block-wise summation - // algorithm with post-update. this blocked algorithm has been proposed in - // a similar form by Castaldo, Whaley and Chronopoulos (SIAM - // J. Sci. Comput. 31, 1156-1174, 2008) and we use the smallest possible - // block size, 2. Sometimes it is referred to as pairwise summation. The - // worst case error made by this algorithm is on the order O(eps * - // log2(vec_size)), whereas a naive summation is O(eps * vec_size). Even - // though the Kahan summation is even more accurate with an error O(eps) - // by carrying along remainders not captured by the main sum, that involves - // additional costs which are not worthwhile. See the Wikipedia article on - // the Kahan summation algorithm. - - // The algorithm implemented here has the additional benefit that it is - // easily parallelized without changing the order of how the elements are - // added (floating point addition is not associative). For the same vector - // size and minimum_parallel_grainsize, the blocks are always the - // same and added pairwise. At the innermost level, eight values are added - // consecutively in order to better balance multiplications and additions. - - // The code returns the result as the last argument in order to make - // spawning tasks simpler and use automatic template deduction. - template - void accumulate (const Operation &op, - const Number *X, - const Number2 *Y, - const ResultType power, - const size_type vec_size, - Number *Z, - ResultType &result, - const int depth = -1) - { - (void)depth; - - if (vec_size <= 4096) - { - // the vector is short enough so we perform the summation. first - // work on the regular part. The innermost 32 values are expanded in - // order to obtain known loop bounds for most of the work. - const Number *X_original = X; - (void)X_original; - ResultType outer_results [128]; - size_type n_chunks = vec_size / 32; - const size_type remainder = vec_size % 32; - Assert (remainder == 0 || n_chunks < 128, ExcInternalError()); - - // Select between the regular version and vectorized version based - // on the number types we are given. To choose the vectorized - // version often enough, we need to have all tasks but the last one - // to be divisible by the vectorization length - accumulate_regular(op, X, Y, power, n_chunks, Z, outer_results, - internal::bool2type<(types_are_equal::value && - (types_are_equal::value || - types_are_equal::value))>()); - - // now work on the remainder, i.e., the last up to 32 values. Use - // switch statement with fall-through to work on these values. - if (remainder > 0) - { - const size_type inner_chunks = remainder / 8; - Assert (inner_chunks <= 3, ExcInternalError()); - const size_type remainder_inner = remainder % 8; - ResultType r0 = ResultType(), r1 = ResultType(), - r2 = ResultType(); - switch (inner_chunks) - { - case 3: - r2 = op(X, Y, power, Z); - for (size_type j=1; j<8; ++j) - r2 += op(X, Y, power, Z); - // no break - case 2: - r1 = op(X, Y, power, Z); - for (size_type j=1; j<8; ++j) - r1 += op(X, Y, power, Z); - r1 += r2; - // no break - case 1: - r2 = op(X, Y, power, Z); - for (size_type j=1; j<8; ++j) - r2 += op(X, Y, power, Z); - // no break - default: - for (size_type j=0; j (X - X_original), vec_size); - - // now sum the results from the chunks - // recursively - while (n_chunks > 1) - { - if (n_chunks % 2 == 1) - outer_results[n_chunks++] = ResultType(); - for (size_type i=0; i 1 && - vec_size > 4 * internal::Vector::minimum_parallel_grain_size && - depth != 0) - { - // split the vector into smaller pieces to be worked on recursively - // and create tasks for them. Make pieces divisible by 1024. - const size_type new_size = (vec_size / 4096) * 1024; - ResultType r0, r1, r2, r3; - - // find out how many recursions we should make (avoid too deep - // hierarchies of tasks on large vectors), max use 8 * - // MultithreadInfo::n_threads() - int next_depth = depth; - if (depth == -1) - next_depth = 8 * MultithreadInfo::n_threads(); - next_depth /= 4; - - Threads::TaskGroup<> task_group; - task_group += Threads::new_task(&accumulate, - op, X, Y, power, new_size, Z, r0, next_depth); - task_group += Threads::new_task(&accumulate, - op, X+new_size, Y+new_size, power, - new_size, Z+new_size, r1, next_depth); - task_group += Threads::new_task(&accumulate, - op, X+2*new_size, Y+2*new_size, power, - new_size, Z+2*new_size, r2, next_depth); - task_group += Threads::new_task(&accumulate, - op, X+3*new_size, Y+3*new_size, power, - vec_size-3*new_size, Z+3*new_size, r3, - next_depth); - task_group.join_all(); - r0 += r1; - r2 += r3; - result = r0 + r2; - } -#endif - else - { - // split vector into four pieces and work on the pieces - // recursively. Make pieces (except last) divisible by 1024. - const size_type new_size = (vec_size / 4096) * 1024; - ResultType r0, r1, r2, r3; - accumulate (op, X, Y, power, new_size, Z, r0); - accumulate (op, X+new_size, Y+new_size, power, new_size, Z+new_size, r1); - accumulate (op, X+2*new_size, Y+2*new_size, power, new_size, Z+2*new_size, r2); - accumulate (op, X+3*new_size, Y+3*new_size, power, vec_size-3*new_size, - Z+3*new_size, r3); - r0 += r1; - r2 += r3; - result = r0 + r2; - } - } } } @@ -1446,8 +1504,10 @@ Number Vector::operator * (const Vector &v) const ExcDimensionMismatch(vec_size, v.size())); Number sum; - internal::Vector::accumulate (internal::Vector::Dot(), - val, v.val, Number(), vec_size, val, sum); + internal::Dot dot; + dot.X = val; + dot.Y = v.val; + internal::parallel_reduce (dot, vec_size, sum, thread_loop_partitioner); AssertIsFinite(sum); return sum; @@ -1462,8 +1522,9 @@ Vector::norm_sqr () const Assert (vec_size!=0, ExcEmptyObject()); real_type sum; - internal::Vector::accumulate (internal::Vector::Norm2(), - val, val, real_type(), vec_size, val, sum); + internal::Norm2 norm2; + norm2.X = val; + internal::parallel_reduce (norm2, vec_size, sum, thread_loop_partitioner); AssertIsFinite(sum); @@ -1478,8 +1539,9 @@ Number Vector::mean_value () const Assert (vec_size!=0, ExcEmptyObject()); Number sum; - internal::Vector::accumulate (internal::Vector::MeanValue(), - val, val, Number(), vec_size, val, sum); + internal::MeanValue mean; + mean.X = val; + internal::parallel_reduce (mean, vec_size, sum, thread_loop_partitioner); return sum / real_type(size()); } @@ -1493,8 +1555,9 @@ Vector::l1_norm () const Assert (vec_size!=0, ExcEmptyObject()); real_type sum; - internal::Vector::accumulate (internal::Vector::Norm1(), - val, val, real_type(), vec_size, val, sum); + internal::Norm1 norm1; + norm1.X = val; + internal::parallel_reduce (norm1, vec_size, sum, thread_loop_partitioner); return sum; } @@ -1513,8 +1576,10 @@ Vector::l2_norm () const Assert (vec_size!=0, ExcEmptyObject()); real_type norm_square; - internal::Vector::accumulate (internal::Vector::Norm2(), - val, val, real_type(), vec_size, val, norm_square); + internal::Norm2 norm2; + norm2.X = val; + internal::parallel_reduce (norm2, vec_size, norm_square, + thread_loop_partitioner); if (numbers::is_finite(norm_square) && norm_square >= std::numeric_limits::min()) return std::sqrt(norm_square); @@ -1556,8 +1621,10 @@ Vector::lp_norm (const real_type p) const return l2_norm(); real_type sum; - internal::Vector::accumulate (internal::Vector::NormP(), - val, val, p, vec_size, val, sum); + internal::NormP normp; + normp.X = val; + normp.p = p; + internal::parallel_reduce (normp, vec_size, sum, thread_loop_partitioner); if (numbers::is_finite(sum) && sum >= std::numeric_limits::min()) return std::pow(sum, static_cast(1./p)); @@ -1623,8 +1690,12 @@ Vector::add_and_dot (const Number a, AssertDimension (vec_size, W.size()); Number sum; - internal::Vector::accumulate (internal::Vector::AddAndDot(), - V.val, W.val, a, vec_size, val, sum); + internal::AddAndDot adder; + adder.X = val; + adder.a = a; + adder.V = V.val; + adder.W = W.val; + internal::parallel_reduce (adder, vec_size, sum, thread_loop_partitioner); AssertIsFinite(sum); return sum; @@ -1652,7 +1723,7 @@ Vector &Vector::operator -= (const Vector &v) internal::Vectorization_subtract_v vector_subtract; vector_subtract.val = val; vector_subtract.v_val = v.val; - internal::vectorized_transform(vector_subtract,vec_size,thread_loop_partitioner); + internal::parallel_for(vector_subtract,vec_size,thread_loop_partitioner); return *this; } @@ -1667,7 +1738,7 @@ void Vector::add (const Number v) internal::Vectorization_add_factor vector_add; vector_add.val = val; vector_add.factor = v; - internal::vectorized_transform(vector_add,vec_size,thread_loop_partitioner); + internal::parallel_for(vector_add,vec_size,thread_loop_partitioner); } @@ -1681,7 +1752,7 @@ void Vector::add (const Vector &v) internal::Vectorization_add_v vector_add; vector_add.val = val; vector_add.v_val = v.val; - internal::vectorized_transform(vector_add,vec_size,thread_loop_partitioner); + internal::parallel_for(vector_add,vec_size,thread_loop_partitioner); } @@ -1703,7 +1774,7 @@ void Vector::add (const Number a, const Vector &v, vector_add.w_val = w.val; vector_add.a = a; vector_add.b = b; - internal::vectorized_transform(vector_add,vec_size,thread_loop_partitioner); + internal::parallel_for(vector_add,vec_size,thread_loop_partitioner); } @@ -1721,7 +1792,7 @@ void Vector::sadd (const Number x, vector_sadd.val = val; vector_sadd.v_val = v.val; vector_sadd.x = x; - internal::vectorized_transform(vector_sadd,vec_size,thread_loop_partitioner); + internal::parallel_for(vector_sadd,vec_size,thread_loop_partitioner); } @@ -1746,7 +1817,7 @@ void Vector::sadd (const Number x, const Number a, vector_sadd.x = x; vector_sadd.a = a; vector_sadd.b = b; - internal::vectorized_transform(vector_sadd,vec_size,thread_loop_partitioner); + internal::parallel_for(vector_sadd,vec_size,thread_loop_partitioner); } @@ -1771,7 +1842,7 @@ void Vector::scale (const Vector &s) internal::Vectorization_scale vector_scale; vector_scale.val = val; vector_scale.v_val = s.val; - internal::vectorized_transform(vector_scale,vec_size,thread_loop_partitioner); + internal::parallel_for(vector_scale,vec_size,thread_loop_partitioner); } @@ -1802,7 +1873,7 @@ void Vector::equ (const Number a, vector_equ.val = val; vector_equ.u_val = u.val; vector_equ.a = a; - internal::vectorized_transform(vector_equ,vec_size,thread_loop_partitioner); + internal::parallel_for(vector_equ,vec_size,thread_loop_partitioner); } @@ -1846,7 +1917,7 @@ void Vector::equ (const Number a, const Vector &u, vector_equ.v_val = v.val; vector_equ.a = a; vector_equ.b = b; - internal::vectorized_transform(vector_equ,vec_size,thread_loop_partitioner); + internal::parallel_for(vector_equ,vec_size,thread_loop_partitioner); } @@ -1868,7 +1939,7 @@ void Vector::equ (const Number a, const Vector &u, vector_equ.a = a; vector_equ.b = b; vector_equ.c = c; - internal::vectorized_transform(vector_equ,vec_size,thread_loop_partitioner); + internal::parallel_for(vector_equ,vec_size,thread_loop_partitioner); } @@ -1888,7 +1959,7 @@ void Vector::ratio (const Vector &a, vector_ratio.val = val; vector_ratio.a_val = a.val; vector_ratio.b_val = b.val; - internal::vectorized_transform(vector_ratio,vec_size,thread_loop_partitioner); + internal::parallel_for(vector_ratio,vec_size,thread_loop_partitioner); } -- 2.39.5