From b19fd2c957d68d9186a7a5596f1aa2fbceeefc24 Mon Sep 17 00:00:00 2001 From: Bruno Turcksin Date: Tue, 3 May 2016 17:02:00 -0400 Subject: [PATCH] Move the functions of the internal namespace of vector.templates to their own file. --- include/deal.II/lac/vector.templates.h | 1048 +---------------------- include/deal.II/lac/vector_internal.h | 1068 ++++++++++++++++++++++++ 2 files changed, 1069 insertions(+), 1047 deletions(-) create mode 100644 include/deal.II/lac/vector_internal.h diff --git a/include/deal.II/lac/vector.templates.h b/include/deal.II/lac/vector.templates.h index 053dda0c78..78f89d2c8d 100644 --- a/include/deal.II/lac/vector.templates.h +++ b/include/deal.II/lac/vector.templates.h @@ -25,6 +25,7 @@ #include #include #include +#include #ifdef DEAL_II_WITH_PETSC # include @@ -44,1053 +45,6 @@ DEAL_II_NAMESPACE_OPEN - -namespace internal -{ - typedef types::global_dof_index size_type; - - template - bool is_non_negative (const T &t) - { - return t >= 0; - } - - - template - bool is_non_negative (const std::complex &) - { - Assert (false, - ExcMessage ("Complex numbers do not have an ordering.")); - - return false; - } - - - template - void print (const T &t, - const char *format) - { - if (format != 0) - std::printf (format, t); - else - std::printf (" %5.2f", double(t)); - } - - - - template - void print (const std::complex &t, - const char *format) - { - if (format != 0) - std::printf (format, t.real(), t.imag()); - else - std::printf (" %5.2f+%5.2fi", - double(t.real()), double(t.imag())); - } - - // call std::copy, except for in - // the case where we want to copy - // from std::complex to a - // non-complex type - template - void copy (const T *begin, - const T *end, - U *dest) - { - std::copy (begin, end, dest); - } - - template - void copy (const std::complex *begin, - const std::complex *end, - std::complex *dest) - { - std::copy (begin, end, dest); - } - - template - void copy (const std::complex *, - const std::complex *, - U *) - { - Assert (false, ExcMessage ("Can't convert a vector of complex numbers " - "into a vector of reals/doubles")); - } - - - -#ifdef DEAL_II_WITH_THREADS - /** - * This struct takes the loop range from the tbb parallel for loop and - * translates it to the actual ranges of the for loop within the vector. It - * encodes the grain size but might choose larger values of chunks than the - * minimum grain size. The minimum grain size given to tbb is then simple - * 1. For affinity reasons, the layout in this loop must be kept in sync - * with the respective class for reductions further down. - */ - template - struct TBBForFunctor - { - TBBForFunctor(Functor &functor, - const size_type vec_size) - : - functor(functor), - vec_size(vec_size) - { - // set chunk size for sub-tasks - const unsigned int gs = internal::Vector::minimum_parallel_grain_size; - n_chunks = std::min(static_cast(4*MultithreadInfo::n_threads()), - vec_size / gs); - chunk_size = vec_size / n_chunks; - - // round to next multiple of 512 (or minimum grain size if that happens - // to be smaller). this is advantageous because our accumulation - // algorithms favor lengths of a power of 2 due to pairwise summation -> - // at most one 'oddly' sized chunk - if (chunk_size > 512) - chunk_size = ((chunk_size + 511)/512)*512; - n_chunks = (vec_size + chunk_size - 1) / chunk_size; - AssertIndexRange((n_chunks-1)*chunk_size, vec_size); - AssertIndexRange(vec_size, n_chunks*chunk_size+1); - }; - - void operator() (const tbb::blocked_range &range) const - { - const size_type begin = range.begin()*chunk_size; - const size_type end = std::min(range.end()*chunk_size, vec_size); - functor(begin, end); - } - - Functor &functor; - const size_type vec_size; - unsigned int n_chunks; - size_type chunk_size; - }; -#endif - - template - void parallel_for(Functor &functor, - size_type vec_size, - std_cxx11::shared_ptr &partitioner) - { -#ifdef DEAL_II_WITH_THREADS - // only go to the parallel function in case there are at least 4 parallel - // items, otherwise the overhead is too large - if (vec_size >= 4*internal::Vector::minimum_parallel_grain_size && - MultithreadInfo::n_threads() > 1) - { - Assert(partitioner.get() != NULL, - ExcInternalError("Unexpected initialization of Vector that does " - "not set the TBB partitioner to a usable state.")); - std_cxx11::shared_ptr tbb_partitioner = - partitioner->acquire_one_partitioner(); - - TBBForFunctor generic_functor(functor, vec_size); - tbb::parallel_for (tbb::blocked_range (0, - generic_functor.n_chunks, - 1), - generic_functor, - *tbb_partitioner); - partitioner->release_one_partitioner(tbb_partitioner); - } - else if (vec_size > 0) - functor(0,vec_size); -#else - functor(0,vec_size); - (void)partitioner; -#endif - } - - - // Define the functors necessary to use SIMD with TBB. we also include the - // simple copy and set operations - - template - struct Vector_set - { - Number *dst; - Number value; - - void operator() (const size_type begin, const size_type end) const - { - if (value == Number()) - std::memset (dst+begin,0,(end-begin)*sizeof(Number)); - else - std::fill (dst+begin, dst+end, value); - } - }; - - template - struct Vector_copy - { - const OtherNumber *src; - Number *dst; - - void operator() (const size_type begin, const size_type end) const - { - if (types_are_equal::value) - std::memcpy(dst+begin, src+begin, (end-begin)*sizeof(Number)); - else - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (typename dealii::Vector::size_type i=begin; i - struct Vectorization_multiply_factor - { - Number *val; - Number factor; - - void operator() (const size_type begin, const size_type end) const - { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_add_av - { - Number *val; - Number *v_val; - Number factor; - - void operator() (const size_type begin, const size_type end) const - { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_sadd_xav - { - Number *val; - Number *v_val; - Number a; - Number x; - - void operator() (const size_type begin, const size_type end) const - { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_subtract_v - { - Number *val; - Number *v_val; - - void operator() (const size_type begin, const size_type end) const - { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_add_factor - { - Number *val; - Number factor; - - void operator() (const size_type begin, const size_type end) const - { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_add_v - { - Number *val; - Number *v_val; - - void operator() (const size_type begin, const size_type end) const - { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_add_avpbw - { - Number *val; - Number *v_val; - Number *w_val; - Number a; - Number b; - - void operator() (const size_type begin, const size_type end) const - { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_sadd_xv - { - Number *val; - Number *v_val; - Number x; - - void operator() (const size_type begin, const size_type end) const - { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_sadd_xavbw - { - Number *val; - Number *v_val; - Number *w_val; - Number x; - Number a; - Number b; - - void operator() (const size_type begin, const size_type end) const - { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_scale - { - Number *val; - Number *v_val; - - void operator() (const size_type begin, const size_type end) const - { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_equ_au - { - Number *val; - Number *u_val; - Number a; - - void operator() (const size_type begin, const size_type end) const - { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_equ_aubv - { - Number *val; - Number *u_val; - Number *v_val; - Number a; - Number b; - - void operator() (const size_type begin, const size_type end) const - { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_equ_aubvcw - { - Number *val; - Number *u_val; - Number *v_val; - Number *w_val; - Number a; - Number b; - Number c; - - void operator() (const size_type begin, const size_type end) const - { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_ratio - { - Number *val; - Number *a_val; - Number *b_val; - - void operator() (const size_type begin, const size_type end) const - { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Dot - { - static const bool vectorizes = types_are_equal::value && - (VectorizedArray::n_array_elements > 1); - - Number - operator() (const size_type i) const - { - return X[i] * Number(numbers::NumberTraits::conjugate(Y[i])); - } - - VectorizedArray - do_vectorized(const size_type i) const - { - VectorizedArray x, y; - x.load(X+i); - y.load(Y+i); - return x * y; - } - - const Number *X; - const Number2 *Y; - }; - - template - struct Norm2 - { - static const bool vectorizes = VectorizedArray::n_array_elements > 1; - - RealType - operator() (const size_type i) const - { - return numbers::NumberTraits::abs_square(X[i]); - } - - VectorizedArray - do_vectorized(const size_type i) const - { - VectorizedArray x; - x.load(X+i); - return x * x; - } - - const Number *X; - }; - - template - struct Norm1 - { - static const bool vectorizes = VectorizedArray::n_array_elements > 1; - - RealType - operator() (const size_type i) const - { - return numbers::NumberTraits::abs(X[i]); - } - - VectorizedArray - do_vectorized(const size_type i) const - { - VectorizedArray x; - x.load(X+i); - return std::abs(x); - } - - const Number *X; - }; - - template - struct NormP - { - static const bool vectorizes = VectorizedArray::n_array_elements > 1; - - RealType - operator() (const size_type i) const - { - return std::pow(numbers::NumberTraits::abs(X[i]), p); - } - - VectorizedArray - do_vectorized(const size_type i) const - { - VectorizedArray x; - x.load(X+i); - return std::pow(std::abs(x),p); - } - - const Number *X; - RealType p; - }; - - template - struct MeanValue - { - static const bool vectorizes = VectorizedArray::n_array_elements > 1; - - Number - operator() (const size_type i) const - { - return X[i]; - } - - VectorizedArray - do_vectorized(const size_type i) const - { - VectorizedArray x; - x.load(X+i); - return x; - } - - const Number *X; - }; - - template - struct AddAndDot - { - static const bool vectorizes = VectorizedArray::n_array_elements > 1; - - Number - operator() (const size_type i) const - { - X[i] += a * V[i]; - return X[i] * Number(numbers::NumberTraits::conjugate(W[i])); - } - - VectorizedArray - do_vectorized(const size_type i) const - { - VectorizedArray x, w, v; - x.load(X+i); - v.load(V+i); - x += a * v; - x.store(X+i); - // may only load from W after storing in X because the pointers might - // point to the same memory - w.load(W+i); - return x * w; - } - - Number *X; - const Number *V, *W; - Number a; - }; - - - - // this is the main working loop for all vector sums using the templated - // operation above. it accumulates the sums using a block-wise summation - // algorithm with post-update. this blocked algorithm has been proposed in - // a similar form by Castaldo, Whaley and Chronopoulos (SIAM - // J. Sci. Comput. 31, 1156-1174, 2008) and we use the smallest possible - // block size, 2. Sometimes it is referred to as pairwise summation. The - // worst case error made by this algorithm is on the order O(eps * - // log2(vec_size)), whereas a naive summation is O(eps * vec_size). Even - // though the Kahan summation is even more accurate with an error O(eps) - // by carrying along remainders not captured by the main sum, that involves - // additional costs which are not worthwhile. See the Wikipedia article on - // the Kahan summation algorithm. - - // The algorithm implemented here has the additional benefit that it is - // easily parallelized without changing the order of how the elements are - // added (floating point addition is not associative). For the same vector - // size and minimum_parallel_grainsize, the blocks are always the - // same and added pairwise. - - // The depth of recursion is controlled by the 'magic' parameter - // vector_accumulation_recursion_threshold: If the length is below - // vector_accumulation_recursion_threshold * 32 (32 is the part of code we - // unroll), a straight loop instead of recursion will be used. At the - // innermost level, eight values are added consecutively in order to better - // balance multiplications and additions. - - // The code returns the result as the last argument in order to make - // spawning tasks simpler and use automatic template deduction. - - const unsigned int vector_accumulation_recursion_threshold = 128; - - template - void accumulate_recursive (const Operation &op, - const size_type first, - const size_type last, - ResultType &result) - { - const size_type vec_size = last - first; - if (vec_size <= vector_accumulation_recursion_threshold * 32) - { - // the vector is short enough so we perform the summation. first - // work on the regular part. The innermost 32 values are expanded in - // order to obtain known loop bounds for most of the work. - size_type index = first; - ResultType outer_results [vector_accumulation_recursion_threshold]; - size_type n_chunks = vec_size / 32; - const size_type remainder = vec_size % 32; - Assert (remainder == 0 || n_chunks < vector_accumulation_recursion_threshold, - ExcInternalError()); - - // Select between the regular version and vectorized version based - // on the number types we are given. To choose the vectorized - // version often enough, we need to have all tasks but the last one - // to be divisible by the vectorization length - accumulate_regular(op, n_chunks, index, outer_results, - internal::bool2type()); - - // now work on the remainder, i.e., the last up to 32 values. Use - // switch statement with fall-through to work on these values. - if (remainder > 0) - { - AssertIndexRange(n_chunks, vector_accumulation_recursion_threshold+1); - const size_type inner_chunks = remainder / 8; - Assert (inner_chunks <= 3, ExcInternalError()); - const size_type remainder_inner = remainder % 8; - ResultType r0 = ResultType(), r1 = ResultType(), - r2 = ResultType(); - switch (inner_chunks) - { - case 3: - r2 = op(index++); - for (size_type j=1; j<8; ++j) - r2 += op(index++); - // no break - case 2: - r1 = op(index++); - for (size_type j=1; j<8; ++j) - r1 += op(index++); - r1 += r2; - // no break - case 1: - r2 = op(index++); - for (size_type j=1; j<8; ++j) - r2 += op(index++); - // no break - default: - for (size_type j=0; j 1) - { - if (n_chunks % 2 == 1) - outer_results[n_chunks++] = ResultType(); - for (size_type i=0; i - void - accumulate_regular(const Operation &op, - size_type &n_chunks, - size_type &index, - ResultType (&outer_results)[vector_accumulation_recursion_threshold], - internal::bool2type) - { - for (size_type i=0; i - void - accumulate_regular(const Operation &op, - size_type &n_chunks, - size_type &index, - Number (&outer_results)[vector_accumulation_recursion_threshold], - internal::bool2type) - { - const unsigned int nvecs = VectorizedArray::n_array_elements; - const size_type regular_chunks = n_chunks/nvecs; - for (size_type i=0; i r0 = op.do_vectorized(index); - VectorizedArray r1 = op.do_vectorized(index+nvecs); - VectorizedArray r2 = op.do_vectorized(index+2*nvecs); - VectorizedArray r3 = op.do_vectorized(index+3*nvecs); - index += nvecs*4; - for (size_type j=1; j<8; ++j, index += nvecs*4) - { - r0 += op.do_vectorized(index); - r1 += op.do_vectorized(index+nvecs); - r2 += op.do_vectorized(index+2*nvecs); - r3 += op.do_vectorized(index+3*nvecs); - } - r0 += r1; - r2 += r3; - r0 += r2; - r0.store(&outer_results[i*VectorizedArray::n_array_elements]); - } - - // If we are treating a case where the vector length is not divisible by - // the vectorization length, need a cleanup loop - AssertIndexRange(VectorizedArray::n_array_elements, - 17); - if (n_chunks % VectorizedArray::n_array_elements != 0) - { - VectorizedArray r0 = VectorizedArray(), - r1 = VectorizedArray(); - const size_type start_irreg = regular_chunks * nvecs; - for (size_type c=start_irreg; c::n_array_elements; - } - } - - - -#ifdef DEAL_II_WITH_THREADS - /** - * This struct takes the loop range from the tbb parallel for loop and - * translates it to the actual ranges of the reduction loop inside the - * vector. It encodes the grain size but might choose larger values of - * chunks than the minimum grain size. The minimum grain size given to tbb - * is 1. For affinity reasons, the layout in this loop must be kept in sync - * with the respective class for plain for loops further up. - * - * Due to this construction, TBB usually only sees a loop of length - * 4*num_threads with grain size 1. The actual ranges inside the vector are - * computed outside of TBB because otherwise TBB would split the ranges in - * some unpredictable position which destroys exact bitwise - * reproducibility. An important part of this is that inside - * TBBReduceFunctor::operator() the recursive calls to accumulate are done - * sequentially on one item a time (even though we could directly run it on - * the whole range given through the tbb::blocked_range times the chunk size - * - but that would be unpredictable). Thus, the values we cannot control - * are the positions in the array that gets filled - but up to that point - * the algorithm TBB sees is just a parallel for and nothing unpredictable - * can happen. - * - * To sum up: Once the number of threads and the vector size are fixed, we - * have an exact layout of how the calls into the recursive function will - * happen. Inside the recursive function, we again only depend on the - * length. Finally, the concurrent threads write into different positions in - * a result vector in a thread-safe way and the addition in the short array - * is again serial. - */ - template - struct TBBReduceFunctor - { - static const unsigned int threshold_array_allocate = 512; - - TBBReduceFunctor(const Operation &op, - const size_type vec_size) - : - op(op), - vec_size(vec_size) - { - // set chunk size for sub-tasks - const unsigned int gs = internal::Vector::minimum_parallel_grain_size; - n_chunks = std::min(static_cast(4*MultithreadInfo::n_threads()), - vec_size / gs); - chunk_size = vec_size / n_chunks; - - // round to next multiple of 512 (or leave it at the minimum grain size - // if that happens to be smaller). this is advantageous because our - // algorithm favors lengths of a power of 2 due to pairwise summation -> - // at most one 'oddly' sized chunk - if (chunk_size > 512) - chunk_size = ((chunk_size + 511)/512)*512; - n_chunks = (vec_size + chunk_size - 1) / chunk_size; - AssertIndexRange((n_chunks-1)*chunk_size, vec_size); - AssertIndexRange(vec_size, n_chunks*chunk_size+1); - - if (n_chunks > threshold_array_allocate) - { - large_array.resize(n_chunks); - array_ptr = &large_array[0]; - } - else - array_ptr = &small_array[0]; - }; - - void operator() (const tbb::blocked_range &range) const - { - for (size_type i = range.begin(); i < range.end(); ++i) - accumulate_recursive(op, i*chunk_size, std::min((i+1)*chunk_size, vec_size), - array_ptr[i]); - } - - ResultType do_sum() const - { - while (n_chunks > 1) - { - if (n_chunks % 2 == 1) - array_ptr[n_chunks++] = ResultType(); - for (size_type i=0; i large_array; - // this variable either points to small_array or large_array depending on - // the number of threads we want to feed - mutable ResultType *array_ptr; - }; -#endif - - - - /** - * This is the general caller for parallel reduction operations that work in - * parallel. - */ - template - void parallel_reduce (const Operation &op, - const size_type vec_size, - ResultType &result, - std_cxx11::shared_ptr &partitioner) - { -#ifdef DEAL_II_WITH_THREADS - // only go to the parallel function in case there are at least 4 parallel - // items, otherwise the overhead is too large - if (vec_size >= 4*internal::Vector::minimum_parallel_grain_size && - MultithreadInfo::n_threads() > 1) - { - Assert(partitioner.get() != NULL, - ExcInternalError("Unexpected initialization of Vector that does " - "not set the TBB partitioner to a usable state.")); - std_cxx11::shared_ptr tbb_partitioner = - partitioner->acquire_one_partitioner(); - - TBBReduceFunctor generic_functor(op, vec_size); - tbb::parallel_for (tbb::blocked_range (0, - generic_functor.n_chunks, - 1), - generic_functor, - *tbb_partitioner); - partitioner->release_one_partitioner(tbb_partitioner); - result = generic_functor.do_sum(); - } - else if (vec_size > 0) - accumulate_recursive(op,0,vec_size,result); -#else - accumulate_recursive(op,0,vec_size,result); - (void)partitioner; -#endif - } -} - - - template Vector::Vector (const Vector &v) : diff --git a/include/deal.II/lac/vector_internal.h b/include/deal.II/lac/vector_internal.h new file mode 100644 index 0000000000..eeedf23be4 --- /dev/null +++ b/include/deal.II/lac/vector_internal.h @@ -0,0 +1,1068 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2016 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +#ifndef dealii__vector_internal_h +#define dealii__vector_internal_h + +DEAL_II_NAMESPACE_OPEN + +namespace internal +{ + typedef types::global_dof_index size_type; + + template + bool is_non_negative (const T &t) + { + return t >= 0; + } + + + template + bool is_non_negative (const std::complex &) + { + Assert (false, + ExcMessage ("Complex numbers do not have an ordering.")); + + return false; + } + + + template + void print (const T &t, + const char *format) + { + if (format != 0) + std::printf (format, t); + else + std::printf (" %5.2f", double(t)); + } + + + + template + void print (const std::complex &t, + const char *format) + { + if (format != 0) + std::printf (format, t.real(), t.imag()); + else + std::printf (" %5.2f+%5.2fi", + double(t.real()), double(t.imag())); + } + + // call std::copy, except for in + // the case where we want to copy + // from std::complex to a + // non-complex type + template + void copy (const T *begin, + const T *end, + U *dest) + { + std::copy (begin, end, dest); + } + + template + void copy (const std::complex *begin, + const std::complex *end, + std::complex *dest) + { + std::copy (begin, end, dest); + } + + template + void copy (const std::complex *, + const std::complex *, + U *) + { + Assert (false, ExcMessage ("Can't convert a vector of complex numbers " + "into a vector of reals/doubles")); + } + + + +#ifdef DEAL_II_WITH_THREADS + /** + * This struct takes the loop range from the tbb parallel for loop and + * translates it to the actual ranges of the for loop within the vector. It + * encodes the grain size but might choose larger values of chunks than the + * minimum grain size. The minimum grain size given to tbb is then simple + * 1. For affinity reasons, the layout in this loop must be kept in sync + * with the respective class for reductions further down. + */ + template + struct TBBForFunctor + { + TBBForFunctor(Functor &functor, + const size_type vec_size) + : + functor(functor), + vec_size(vec_size) + { + // set chunk size for sub-tasks + const unsigned int gs = internal::Vector::minimum_parallel_grain_size; + n_chunks = std::min(static_cast(4*MultithreadInfo::n_threads()), + vec_size / gs); + chunk_size = vec_size / n_chunks; + + // round to next multiple of 512 (or minimum grain size if that happens + // to be smaller). this is advantageous because our accumulation + // algorithms favor lengths of a power of 2 due to pairwise summation -> + // at most one 'oddly' sized chunk + if (chunk_size > 512) + chunk_size = ((chunk_size + 511)/512)*512; + n_chunks = (vec_size + chunk_size - 1) / chunk_size; + AssertIndexRange((n_chunks-1)*chunk_size, vec_size); + AssertIndexRange(vec_size, n_chunks*chunk_size+1); + }; + + void operator() (const tbb::blocked_range &range) const + { + const size_type begin = range.begin()*chunk_size; + const size_type end = std::min(range.end()*chunk_size, vec_size); + functor(begin, end); + } + + Functor &functor; + const size_type vec_size; + unsigned int n_chunks; + size_type chunk_size; + }; +#endif + + template + void parallel_for(Functor &functor, + size_type vec_size, + std_cxx11::shared_ptr &partitioner) + { +#ifdef DEAL_II_WITH_THREADS + // only go to the parallel function in case there are at least 4 parallel + // items, otherwise the overhead is too large + if (vec_size >= 4*internal::Vector::minimum_parallel_grain_size && + MultithreadInfo::n_threads() > 1) + { + Assert(partitioner.get() != NULL, + ExcInternalError("Unexpected initialization of Vector that does " + "not set the TBB partitioner to a usable state.")); + std_cxx11::shared_ptr tbb_partitioner = + partitioner->acquire_one_partitioner(); + + TBBForFunctor generic_functor(functor, vec_size); + tbb::parallel_for (tbb::blocked_range (0, + generic_functor.n_chunks, + 1), + generic_functor, + *tbb_partitioner); + partitioner->release_one_partitioner(tbb_partitioner); + } + else if (vec_size > 0) + functor(0,vec_size); +#else + functor(0,vec_size); + (void)partitioner; +#endif + } + + + // Define the functors necessary to use SIMD with TBB. we also include the + // simple copy and set operations + + template + struct Vector_set + { + Number *dst; + Number value; + + void operator() (const size_type begin, const size_type end) const + { + if (value == Number()) + std::memset (dst+begin,0,(end-begin)*sizeof(Number)); + else + std::fill (dst+begin, dst+end, value); + } + }; + + template + struct Vector_copy + { + const OtherNumber *src; + Number *dst; + + void operator() (const size_type begin, const size_type end) const + { + if (types_are_equal::value) + std::memcpy(dst+begin, src+begin, (end-begin)*sizeof(Number)); + else + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (typename dealii::Vector::size_type i=begin; i + struct Vectorization_multiply_factor + { + Number *val; + Number factor; + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_add_av + { + Number *val; + Number *v_val; + Number factor; + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_sadd_xav + { + Number *val; + Number *v_val; + Number a; + Number x; + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_subtract_v + { + Number *val; + Number *v_val; + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_add_factor + { + Number *val; + Number factor; + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_add_v + { + Number *val; + Number *v_val; + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_add_avpbw + { + Number *val; + Number *v_val; + Number *w_val; + Number a; + Number b; + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_sadd_xv + { + Number *val; + Number *v_val; + Number x; + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_sadd_xavbw + { + Number *val; + Number *v_val; + Number *w_val; + Number x; + Number a; + Number b; + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_scale + { + Number *val; + Number *v_val; + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_equ_au + { + Number *val; + Number *u_val; + Number a; + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_equ_aubv + { + Number *val; + Number *u_val; + Number *v_val; + Number a; + Number b; + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_equ_aubvcw + { + Number *val; + Number *u_val; + Number *v_val; + Number *w_val; + Number a; + Number b; + Number c; + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_ratio + { + Number *val; + Number *a_val; + Number *b_val; + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Dot + { + static const bool vectorizes = types_are_equal::value && + (VectorizedArray::n_array_elements > 1); + + Number + operator() (const size_type i) const + { + return X[i] * Number(numbers::NumberTraits::conjugate(Y[i])); + } + + VectorizedArray + do_vectorized(const size_type i) const + { + VectorizedArray x, y; + x.load(X+i); + y.load(Y+i); + return x * y; + } + + const Number *X; + const Number2 *Y; + }; + + template + struct Norm2 + { + static const bool vectorizes = VectorizedArray::n_array_elements > 1; + + RealType + operator() (const size_type i) const + { + return numbers::NumberTraits::abs_square(X[i]); + } + + VectorizedArray + do_vectorized(const size_type i) const + { + VectorizedArray x; + x.load(X+i); + return x * x; + } + + const Number *X; + }; + + template + struct Norm1 + { + static const bool vectorizes = VectorizedArray::n_array_elements > 1; + + RealType + operator() (const size_type i) const + { + return numbers::NumberTraits::abs(X[i]); + } + + VectorizedArray + do_vectorized(const size_type i) const + { + VectorizedArray x; + x.load(X+i); + return std::abs(x); + } + + const Number *X; + }; + + template + struct NormP + { + static const bool vectorizes = VectorizedArray::n_array_elements > 1; + + RealType + operator() (const size_type i) const + { + return std::pow(numbers::NumberTraits::abs(X[i]), p); + } + + VectorizedArray + do_vectorized(const size_type i) const + { + VectorizedArray x; + x.load(X+i); + return std::pow(std::abs(x),p); + } + + const Number *X; + RealType p; + }; + + template + struct MeanValue + { + static const bool vectorizes = VectorizedArray::n_array_elements > 1; + + Number + operator() (const size_type i) const + { + return X[i]; + } + + VectorizedArray + do_vectorized(const size_type i) const + { + VectorizedArray x; + x.load(X+i); + return x; + } + + const Number *X; + }; + + template + struct AddAndDot + { + static const bool vectorizes = VectorizedArray::n_array_elements > 1; + + Number + operator() (const size_type i) const + { + X[i] += a * V[i]; + return X[i] * Number(numbers::NumberTraits::conjugate(W[i])); + } + + VectorizedArray + do_vectorized(const size_type i) const + { + VectorizedArray x, w, v; + x.load(X+i); + v.load(V+i); + x += a * v; + x.store(X+i); + // may only load from W after storing in X because the pointers might + // point to the same memory + w.load(W+i); + return x * w; + } + + Number *X; + const Number *V, *W; + Number a; + }; + + + + // this is the main working loop for all vector sums using the templated + // operation above. it accumulates the sums using a block-wise summation + // algorithm with post-update. this blocked algorithm has been proposed in + // a similar form by Castaldo, Whaley and Chronopoulos (SIAM + // J. Sci. Comput. 31, 1156-1174, 2008) and we use the smallest possible + // block size, 2. Sometimes it is referred to as pairwise summation. The + // worst case error made by this algorithm is on the order O(eps * + // log2(vec_size)), whereas a naive summation is O(eps * vec_size). Even + // though the Kahan summation is even more accurate with an error O(eps) + // by carrying along remainders not captured by the main sum, that involves + // additional costs which are not worthwhile. See the Wikipedia article on + // the Kahan summation algorithm. + + // The algorithm implemented here has the additional benefit that it is + // easily parallelized without changing the order of how the elements are + // added (floating point addition is not associative). For the same vector + // size and minimum_parallel_grainsize, the blocks are always the + // same and added pairwise. + + // The depth of recursion is controlled by the 'magic' parameter + // vector_accumulation_recursion_threshold: If the length is below + // vector_accumulation_recursion_threshold * 32 (32 is the part of code we + // unroll), a straight loop instead of recursion will be used. At the + // innermost level, eight values are added consecutively in order to better + // balance multiplications and additions. + + // The code returns the result as the last argument in order to make + // spawning tasks simpler and use automatic template deduction. + + const unsigned int vector_accumulation_recursion_threshold = 128; + + template + void accumulate_recursive (const Operation &op, + const size_type first, + const size_type last, + ResultType &result) + { + const size_type vec_size = last - first; + if (vec_size <= vector_accumulation_recursion_threshold * 32) + { + // the vector is short enough so we perform the summation. first + // work on the regular part. The innermost 32 values are expanded in + // order to obtain known loop bounds for most of the work. + size_type index = first; + ResultType outer_results [vector_accumulation_recursion_threshold]; + size_type n_chunks = vec_size / 32; + const size_type remainder = vec_size % 32; + Assert (remainder == 0 || n_chunks < vector_accumulation_recursion_threshold, + ExcInternalError()); + + // Select between the regular version and vectorized version based + // on the number types we are given. To choose the vectorized + // version often enough, we need to have all tasks but the last one + // to be divisible by the vectorization length + accumulate_regular(op, n_chunks, index, outer_results, + internal::bool2type()); + + // now work on the remainder, i.e., the last up to 32 values. Use + // switch statement with fall-through to work on these values. + if (remainder > 0) + { + AssertIndexRange(n_chunks, vector_accumulation_recursion_threshold+1); + const size_type inner_chunks = remainder / 8; + Assert (inner_chunks <= 3, ExcInternalError()); + const size_type remainder_inner = remainder % 8; + ResultType r0 = ResultType(), r1 = ResultType(), + r2 = ResultType(); + switch (inner_chunks) + { + case 3: + r2 = op(index++); + for (size_type j=1; j<8; ++j) + r2 += op(index++); + // no break + case 2: + r1 = op(index++); + for (size_type j=1; j<8; ++j) + r1 += op(index++); + r1 += r2; + // no break + case 1: + r2 = op(index++); + for (size_type j=1; j<8; ++j) + r2 += op(index++); + // no break + default: + for (size_type j=0; j 1) + { + if (n_chunks % 2 == 1) + outer_results[n_chunks++] = ResultType(); + for (size_type i=0; i + void + accumulate_regular(const Operation &op, + size_type &n_chunks, + size_type &index, + ResultType (&outer_results)[vector_accumulation_recursion_threshold], + internal::bool2type) + { + for (size_type i=0; i + void + accumulate_regular(const Operation &op, + size_type &n_chunks, + size_type &index, + Number (&outer_results)[vector_accumulation_recursion_threshold], + internal::bool2type) + { + const unsigned int nvecs = VectorizedArray::n_array_elements; + const size_type regular_chunks = n_chunks/nvecs; + for (size_type i=0; i r0 = op.do_vectorized(index); + VectorizedArray r1 = op.do_vectorized(index+nvecs); + VectorizedArray r2 = op.do_vectorized(index+2*nvecs); + VectorizedArray r3 = op.do_vectorized(index+3*nvecs); + index += nvecs*4; + for (size_type j=1; j<8; ++j, index += nvecs*4) + { + r0 += op.do_vectorized(index); + r1 += op.do_vectorized(index+nvecs); + r2 += op.do_vectorized(index+2*nvecs); + r3 += op.do_vectorized(index+3*nvecs); + } + r0 += r1; + r2 += r3; + r0 += r2; + r0.store(&outer_results[i*VectorizedArray::n_array_elements]); + } + + // If we are treating a case where the vector length is not divisible by + // the vectorization length, need a cleanup loop + AssertIndexRange(VectorizedArray::n_array_elements, + 17); + if (n_chunks % VectorizedArray::n_array_elements != 0) + { + VectorizedArray r0 = VectorizedArray(), + r1 = VectorizedArray(); + const size_type start_irreg = regular_chunks * nvecs; + for (size_type c=start_irreg; c::n_array_elements; + } + } + + + +#ifdef DEAL_II_WITH_THREADS + /** + * This struct takes the loop range from the tbb parallel for loop and + * translates it to the actual ranges of the reduction loop inside the + * vector. It encodes the grain size but might choose larger values of + * chunks than the minimum grain size. The minimum grain size given to tbb + * is 1. For affinity reasons, the layout in this loop must be kept in sync + * with the respective class for plain for loops further up. + * + * Due to this construction, TBB usually only sees a loop of length + * 4*num_threads with grain size 1. The actual ranges inside the vector are + * computed outside of TBB because otherwise TBB would split the ranges in + * some unpredictable position which destroys exact bitwise + * reproducibility. An important part of this is that inside + * TBBReduceFunctor::operator() the recursive calls to accumulate are done + * sequentially on one item a time (even though we could directly run it on + * the whole range given through the tbb::blocked_range times the chunk size + * - but that would be unpredictable). Thus, the values we cannot control + * are the positions in the array that gets filled - but up to that point + * the algorithm TBB sees is just a parallel for and nothing unpredictable + * can happen. + * + * To sum up: Once the number of threads and the vector size are fixed, we + * have an exact layout of how the calls into the recursive function will + * happen. Inside the recursive function, we again only depend on the + * length. Finally, the concurrent threads write into different positions in + * a result vector in a thread-safe way and the addition in the short array + * is again serial. + */ + template + struct TBBReduceFunctor + { + static const unsigned int threshold_array_allocate = 512; + + TBBReduceFunctor(const Operation &op, + const size_type vec_size) + : + op(op), + vec_size(vec_size) + { + // set chunk size for sub-tasks + const unsigned int gs = internal::Vector::minimum_parallel_grain_size; + n_chunks = std::min(static_cast(4*MultithreadInfo::n_threads()), + vec_size / gs); + chunk_size = vec_size / n_chunks; + + // round to next multiple of 512 (or leave it at the minimum grain size + // if that happens to be smaller). this is advantageous because our + // algorithm favors lengths of a power of 2 due to pairwise summation -> + // at most one 'oddly' sized chunk + if (chunk_size > 512) + chunk_size = ((chunk_size + 511)/512)*512; + n_chunks = (vec_size + chunk_size - 1) / chunk_size; + AssertIndexRange((n_chunks-1)*chunk_size, vec_size); + AssertIndexRange(vec_size, n_chunks*chunk_size+1); + + if (n_chunks > threshold_array_allocate) + { + large_array.resize(n_chunks); + array_ptr = &large_array[0]; + } + else + array_ptr = &small_array[0]; + }; + + void operator() (const tbb::blocked_range &range) const + { + for (size_type i = range.begin(); i < range.end(); ++i) + accumulate_recursive(op, i*chunk_size, std::min((i+1)*chunk_size, vec_size), + array_ptr[i]); + } + + ResultType do_sum() const + { + while (n_chunks > 1) + { + if (n_chunks % 2 == 1) + array_ptr[n_chunks++] = ResultType(); + for (size_type i=0; i large_array; + // this variable either points to small_array or large_array depending on + // the number of threads we want to feed + mutable ResultType *array_ptr; + }; +#endif + + + + /** + * This is the general caller for parallel reduction operations that work in + * parallel. + */ + template + void parallel_reduce (const Operation &op, + const size_type vec_size, + ResultType &result, + std_cxx11::shared_ptr &partitioner) + { +#ifdef DEAL_II_WITH_THREADS + // only go to the parallel function in case there are at least 4 parallel + // items, otherwise the overhead is too large + if (vec_size >= 4*internal::Vector::minimum_parallel_grain_size && + MultithreadInfo::n_threads() > 1) + { + Assert(partitioner.get() != NULL, + ExcInternalError("Unexpected initialization of Vector that does " + "not set the TBB partitioner to a usable state.")); + std_cxx11::shared_ptr tbb_partitioner = + partitioner->acquire_one_partitioner(); + + TBBReduceFunctor generic_functor(op, vec_size); + tbb::parallel_for (tbb::blocked_range (0, + generic_functor.n_chunks, + 1), + generic_functor, + *tbb_partitioner); + partitioner->release_one_partitioner(tbb_partitioner); + result = generic_functor.do_sum(); + } + else if (vec_size > 0) + accumulate_recursive(op,0,vec_size,result); +#else + accumulate_recursive(op,0,vec_size,result); + (void)partitioner; +#endif + } +} + +DEAL_II_NAMESPACE_CLOSE + +#endif -- 2.39.5