From: Bruno Turcksin Date: Sun, 8 Jan 2017 20:26:44 +0000 (-0500) Subject: Encapsulate functions from vector_operations_internal in VectorOperations namespace. X-Git-Tag: v8.5.0-rc1~281^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F3748%2Fhead;p=dealii.git Encapsulate functions from vector_operations_internal in VectorOperations namespace. --- diff --git a/include/deal.II/lac/la_parallel_vector.templates.h b/include/deal.II/lac/la_parallel_vector.templates.h index 37d8433162..28b648dd69 100644 --- a/include/deal.II/lac/la_parallel_vector.templates.h +++ b/include/deal.II/lac/la_parallel_vector.templates.h @@ -237,9 +237,9 @@ namespace LinearAlgebra reinit (v, true); thread_loop_partitioner = v.thread_loop_partitioner; - dealii::internal::Vector_copy copier(v.val, val); - internal::parallel_for(copier, partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::Vector_copy copier(v.val, val); + internal::VectorOperations::parallel_for(copier, partitioner->local_size(), + thread_loop_partitioner); zero_out_ghosts(); } @@ -380,9 +380,9 @@ namespace LinearAlgebra must_update_ghost_values |= vector_is_ghosted; thread_loop_partitioner = c.thread_loop_partitioner; - dealii::internal::Vector_copy copier(c.val, val); - internal::parallel_for(copier, partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::Vector_copy copier(c.val, val); + internal::VectorOperations::parallel_for(copier, partitioner->local_size(), + thread_loop_partitioner); if (must_update_ghost_values) update_ghost_values(); @@ -854,9 +854,9 @@ namespace LinearAlgebra tmp_vector.compress(operation); - dealii::internal::Vector_copy copier(tmp_vector.val, val); - internal::parallel_for(copier, partitioner->local_size(), - thread_loop_partitioner); + dealii::internal::VectorOperations::Vector_copy copier(tmp_vector.val, val); + internal::VectorOperations::parallel_for(copier, partitioner->local_size(), + thread_loop_partitioner); } @@ -913,10 +913,10 @@ namespace LinearAlgebra Vector & Vector::operator = (const Number s) { - internal::Vector_set setter(s, val); + internal::VectorOperations::Vector_set setter(s, val); - internal::parallel_for(setter, partitioner->local_size(), - thread_loop_partitioner); + internal::VectorOperations::parallel_for(setter, partitioner->local_size(), + thread_loop_partitioner); // if we call Vector::operator=0, we want to zero out all the entries // plus ghosts. @@ -939,9 +939,9 @@ namespace LinearAlgebra AssertDimension (local_size(), v.local_size()); - internal::Vectorization_add_v vector_add(val, v.val); - internal::parallel_for(vector_add, partitioner->local_size(), - thread_loop_partitioner); + internal::VectorOperations::Vectorization_add_v vector_add(val, v.val); + internal::VectorOperations::parallel_for(vector_add, partitioner->local_size(), + thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -962,9 +962,9 @@ namespace LinearAlgebra AssertDimension (local_size(), v.local_size()); - internal::Vectorization_subtract_v vector_subtract(val, v.val); - internal::parallel_for(vector_subtract, partitioner->local_size(), - thread_loop_partitioner); + internal::VectorOperations::Vectorization_subtract_v vector_subtract(val, v.val); + internal::VectorOperations::parallel_for(vector_subtract, partitioner->local_size(), + thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -980,9 +980,9 @@ namespace LinearAlgebra { AssertIsFinite(a); - internal::Vectorization_add_factor vector_add(val, a); - internal::parallel_for(vector_add, partitioner->local_size(), - thread_loop_partitioner); + internal::VectorOperations::Vectorization_add_factor vector_add(val, a); + internal::VectorOperations::parallel_for(vector_add, partitioner->local_size(), + thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -1003,9 +1003,9 @@ namespace LinearAlgebra AssertIsFinite(a); AssertDimension (local_size(), v.local_size()); - internal::Vectorization_add_av vector_add(val, v.val, a); - internal::parallel_for(vector_add, partitioner->local_size(), - thread_loop_partitioner); + internal::VectorOperations::Vectorization_add_av vector_add(val, v.val, a); + internal::VectorOperations::parallel_for(vector_add, partitioner->local_size(), + thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -1034,9 +1034,10 @@ namespace LinearAlgebra AssertDimension (local_size(), v.local_size()); AssertDimension (local_size(), w.local_size()); - internal::Vectorization_add_avpbw vector_add(val, v.val, w.val, a, b); - internal::parallel_for(vector_add, partitioner->local_size(), - thread_loop_partitioner); + internal::VectorOperations::Vectorization_add_avpbw vector_add(val, v.val, + w.val, a, b); + internal::VectorOperations::parallel_for(vector_add, partitioner->local_size(), + thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -1052,9 +1053,9 @@ namespace LinearAlgebra AssertIsFinite(x); AssertDimension (local_size(), v.local_size()); - internal::Vectorization_sadd_xv vector_sadd(val, v.val, x); - internal::parallel_for(vector_sadd, partitioner->local_size(), - thread_loop_partitioner); + internal::VectorOperations::Vectorization_sadd_xv vector_sadd(val, v.val, x); + internal::VectorOperations::parallel_for(vector_sadd, partitioner->local_size(), + thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -1077,9 +1078,9 @@ namespace LinearAlgebra AssertIsFinite(a); AssertDimension (local_size(), v.local_size()); - internal::Vectorization_sadd_xav vector_sadd(val, v.val, a, x); - internal::parallel_for(vector_sadd, partitioner->local_size(), - thread_loop_partitioner); + internal::VectorOperations::Vectorization_sadd_xav vector_sadd(val, v.val, a, x); + internal::VectorOperations::parallel_for(vector_sadd, partitioner->local_size(), + thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -1102,10 +1103,10 @@ namespace LinearAlgebra AssertDimension (local_size(), v.local_size()); AssertDimension (local_size(), w.local_size()); - internal::Vectorization_sadd_xavbw vector_sadd(val, v.val, w.val, - x, a, b); - internal::parallel_for(vector_sadd, partitioner->local_size(), - thread_loop_partitioner); + internal::VectorOperations::Vectorization_sadd_xavbw vector_sadd(val, v.val, w.val, + x, a, b); + internal::VectorOperations::parallel_for(vector_sadd, partitioner->local_size(), + thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -1118,10 +1119,11 @@ namespace LinearAlgebra Vector::operator *= (const Number factor) { AssertIsFinite(factor); - internal::Vectorization_multiply_factor vector_multiply(val, factor); + internal::VectorOperations::Vectorization_multiply_factor vector_multiply(val, + factor); - internal::parallel_for(vector_multiply, partitioner->local_size(), - thread_loop_partitioner); + internal::VectorOperations::parallel_for(vector_multiply, partitioner->local_size(), + thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -1152,9 +1154,9 @@ namespace LinearAlgebra AssertDimension (local_size(), v.local_size()); - internal::Vectorization_scale vector_scale(val, v.val); - internal::parallel_for(vector_scale, partitioner->local_size(), - thread_loop_partitioner); + internal::VectorOperations::Vectorization_scale vector_scale(val, v.val); + internal::VectorOperations::parallel_for(vector_scale, partitioner->local_size(), + thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -1175,9 +1177,9 @@ namespace LinearAlgebra AssertIsFinite(a); AssertDimension (local_size(), v.local_size()); - internal::Vectorization_equ_au vector_equ(val, v.val, a); - internal::parallel_for(vector_equ, partitioner->local_size(), - thread_loop_partitioner); + internal::VectorOperations::Vectorization_equ_au vector_equ(val, v.val, a); + internal::VectorOperations::parallel_for(vector_equ, partitioner->local_size(), + thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -1198,9 +1200,10 @@ namespace LinearAlgebra AssertDimension (local_size(), v.local_size()); AssertDimension (local_size(), w.local_size()); - internal::Vectorization_equ_aubv vector_equ(val, v.val, w.val, a, b); - internal::parallel_for(vector_equ, partitioner->local_size(), - thread_loop_partitioner); + internal::VectorOperations::Vectorization_equ_aubv vector_equ(val, v.val, + w.val, a, b); + internal::VectorOperations::parallel_for(vector_equ, partitioner->local_size(), + thread_loop_partitioner); if (vector_is_ghosted) update_ghost_values(); @@ -1251,9 +1254,9 @@ namespace LinearAlgebra AssertDimension (partitioner->local_size(), v.partitioner->local_size()); Number sum; - internal::Dot dot(val, v.val); - internal::parallel_reduce (dot, partitioner->local_size(), sum, - thread_loop_partitioner); + internal::VectorOperations::Dot dot(val, v.val); + internal::VectorOperations::parallel_reduce (dot, partitioner->local_size(), sum, + thread_loop_partitioner); AssertIsFinite(sum); return sum; @@ -1285,9 +1288,9 @@ namespace LinearAlgebra Vector::norm_sqr_local () const { real_type sum; - internal::Norm2 norm2(val); - internal::parallel_reduce (norm2, partitioner->local_size(), sum, - thread_loop_partitioner); + internal::VectorOperations::Norm2 norm2(val); + internal::VectorOperations::parallel_reduce (norm2, partitioner->local_size(), sum, + thread_loop_partitioner); AssertIsFinite(sum); return sum; @@ -1305,9 +1308,9 @@ namespace LinearAlgebra return Number(); Number sum; - internal::MeanValue mean(val); - internal::parallel_reduce (mean, partitioner->local_size(), sum, - thread_loop_partitioner); + internal::VectorOperations::MeanValue mean(val); + internal::VectorOperations::parallel_reduce (mean, partitioner->local_size(), sum, + thread_loop_partitioner); return sum / real_type(partitioner->local_size()); } @@ -1335,9 +1338,9 @@ namespace LinearAlgebra Vector::l1_norm_local () const { real_type sum; - internal::Norm1 norm1(val); - internal::parallel_reduce (norm1, partitioner->local_size(), sum, - thread_loop_partitioner); + internal::VectorOperations::Norm1 norm1(val); + internal::VectorOperations::parallel_reduce (norm1, partitioner->local_size(), sum, + thread_loop_partitioner); return sum; } @@ -1377,9 +1380,9 @@ namespace LinearAlgebra Vector::lp_norm_local (const real_type p) const { real_type sum; - internal::NormP normp(val, p); - internal::parallel_reduce (normp, partitioner->local_size(), sum, - thread_loop_partitioner); + internal::VectorOperations::NormP normp(val, p); + internal::VectorOperations::parallel_reduce (normp, partitioner->local_size(), sum, + thread_loop_partitioner); return std::pow(sum, 1./p); } @@ -1441,8 +1444,8 @@ namespace LinearAlgebra AssertDimension (vec_size, w.local_size()); Number sum; - internal::AddAndDot adder(this->val, v.val, w.val, a); - internal::parallel_reduce (adder, vec_size, sum, thread_loop_partitioner); + internal::VectorOperations::AddAndDot adder(this->val, v.val, w.val, a); + internal::VectorOperations::parallel_reduce (adder, vec_size, sum, thread_loop_partitioner); AssertIsFinite(sum); return sum; } diff --git a/include/deal.II/lac/la_vector.templates.h b/include/deal.II/lac/la_vector.templates.h index d9430dc62c..09d2e022ba 100644 --- a/include/deal.II/lac/la_vector.templates.h +++ b/include/deal.II/lac/la_vector.templates.h @@ -35,8 +35,8 @@ namespace LinearAlgebra if (this->size() != in_vector.size()) this->reinit(in_vector, true); - dealii::internal::Vector_copy copier(in_vector.val, this->val); - internal::parallel_for(copier, this->size(), this->thread_loop_partitioner); + dealii::internal::VectorOperations::Vector_copy copier(in_vector.val, this->val); + internal::VectorOperations::parallel_for(copier, this->size(), this->thread_loop_partitioner); return *this; } @@ -51,8 +51,8 @@ namespace LinearAlgebra if (this->size() != in_vector.size()) this->reinit(in_vector, true); - dealii::internal::Vector_copy copier(in_vector.val, this->val); - internal::parallel_for(copier, this->size(), this->thread_loop_partitioner); + dealii::internal::VectorOperations::Vector_copy copier(in_vector.val, this->val); + internal::VectorOperations::parallel_for(copier, this->size(), this->thread_loop_partitioner); return *this; } @@ -65,8 +65,8 @@ namespace LinearAlgebra Assert(s==static_cast(0), ExcMessage("Only 0 can be assigned to a vector.")); (void) s; - internal::Vector_set setter(Number(), this->val); - internal::parallel_for(setter, this->size(), this->thread_loop_partitioner); + internal::VectorOperations::Vector_set setter(Number(), this->val); + internal::VectorOperations::parallel_for(setter, this->size(), this->thread_loop_partitioner); return *this; } @@ -78,8 +78,8 @@ namespace LinearAlgebra { AssertIsFinite(factor); - internal::Vectorization_multiply_factor vector_multiply(this->val, factor); - internal::parallel_for(vector_multiply, this->size(), this->thread_loop_partitioner); + internal::VectorOperations::Vectorization_multiply_factor vector_multiply(this->val, factor); + internal::VectorOperations::parallel_for(vector_multiply, this->size(), this->thread_loop_partitioner); return *this; } @@ -109,8 +109,8 @@ namespace LinearAlgebra Assert(down_V.size()==this->size(), ExcMessage("Cannot add two vectors with different numbers of elements")); - internal::Vectorization_add_v vector_add(this->val, down_V.val); - internal::parallel_for(vector_add, this->size(), this->thread_loop_partitioner); + internal::VectorOperations::Vectorization_add_v vector_add(this->val, down_V.val); + internal::VectorOperations::parallel_for(vector_add, this->size(), this->thread_loop_partitioner); return *this; } @@ -127,8 +127,8 @@ namespace LinearAlgebra const Vector &down_V = dynamic_cast&>(V); Assert(down_V.size()==this->size(), ExcMessage("Cannot subtract two vectors with different numbers of elements")); - internal::Vectorization_subtract_v vector_subtract(this->val, down_V.val); - internal::parallel_for(vector_subtract, this->size(), this->thread_loop_partitioner); + internal::VectorOperations::Vectorization_subtract_v vector_subtract(this->val, down_V.val); + internal::VectorOperations::parallel_for(vector_subtract, this->size(), this->thread_loop_partitioner); return *this; } @@ -148,8 +148,8 @@ namespace LinearAlgebra ExcMessage("Cannot compute the scalar product " "of two vectors with different numbers of elements")); Number sum; - internal::Dot dot(this->val, down_V.val); - internal::parallel_reduce(dot, this->size(), sum, this->thread_loop_partitioner); + internal::VectorOperations::Dot dot(this->val, down_V.val); + internal::VectorOperations::parallel_reduce(dot, this->size(), sum, this->thread_loop_partitioner); return sum; } @@ -172,8 +172,8 @@ namespace LinearAlgebra { AssertIsFinite(a); - internal::Vectorization_add_factor vector_add(this->val, a); - internal::parallel_for(vector_add, this->size(), this->thread_loop_partitioner); + internal::VectorOperations::Vectorization_add_factor vector_add(this->val, a); + internal::VectorOperations::parallel_for(vector_add, this->size(), this->thread_loop_partitioner); } @@ -191,8 +191,8 @@ namespace LinearAlgebra Assert(down_V.size()==this->size(), ExcMessage("Cannot add two vectors with different numbers of elements")); - internal::Vectorization_add_av vector_add_av(this->val, down_V.val, a); - internal::parallel_for(vector_add_av, this->size(), this->thread_loop_partitioner); + internal::VectorOperations::Vectorization_add_av vector_add_av(this->val, down_V.val, a); + internal::VectorOperations::parallel_for(vector_add_av, this->size(), this->thread_loop_partitioner); } @@ -219,9 +219,9 @@ namespace LinearAlgebra Assert(down_W.size()==this->size(), ExcMessage("Cannot add two vectors with different numbers of elements")); - internal::Vectorization_add_avpbw vector_add(this->val, down_V.val, - down_W.val, a, b); - internal::parallel_for(vector_add, this->size(), this->thread_loop_partitioner); + internal::VectorOperations::Vectorization_add_avpbw vector_add(this->val, down_V.val, + down_W.val, a, b); + internal::VectorOperations::parallel_for(vector_add, this->size(), this->thread_loop_partitioner); } @@ -239,9 +239,9 @@ namespace LinearAlgebra // Downcast V. It fails, throws an exception. const Vector &down_V = dynamic_cast&>(V); - internal::Vectorization_sadd_xav vector_sadd_xav(this->val, down_V.val, - a, s); - internal::parallel_for(vector_sadd_xav, this->size(), this->thread_loop_partitioner); + internal::VectorOperations::Vectorization_sadd_xav vector_sadd_xav(this->val, down_V.val, + a, s); + internal::VectorOperations::parallel_for(vector_sadd_xav, this->size(), this->thread_loop_partitioner); } @@ -259,8 +259,8 @@ namespace LinearAlgebra Assert(down_scaling_factors.size()==this->size(), ExcMessage("Cannot add two vectors with different numbers of elements")); - internal::Vectorization_scale vector_scale(this->val, down_scaling_factors.val); - internal::parallel_for(vector_scale, this->size(), this->thread_loop_partitioner); + internal::VectorOperations::Vectorization_scale vector_scale(this->val, down_scaling_factors.val); + internal::VectorOperations::parallel_for(vector_scale, this->size(), this->thread_loop_partitioner); } @@ -276,8 +276,8 @@ namespace LinearAlgebra // Downcast V. If fails, throws an exception. const Vector &down_V = dynamic_cast&>(V); - internal::Vectorization_equ_au vector_equ(this->val, down_V.val, a); - internal::parallel_for(vector_equ, this->size(), this->thread_loop_partitioner); + internal::VectorOperations::Vectorization_equ_au vector_equ(this->val, down_V.val, a); + internal::VectorOperations::parallel_for(vector_equ, this->size(), this->thread_loop_partitioner); } @@ -289,8 +289,8 @@ namespace LinearAlgebra typedef typename VectorSpaceVector::real_type real_type; value_type sum; - internal::MeanValue mean_value(this->val); - internal::parallel_reduce(mean_value, this->size(), sum, this->thread_loop_partitioner); + internal::VectorOperations::MeanValue mean_value(this->val); + internal::VectorOperations::parallel_reduce(mean_value, this->size(), sum, this->thread_loop_partitioner); return sum/static_cast(this->size()); } @@ -304,8 +304,8 @@ namespace LinearAlgebra typedef typename VectorSpaceVector::real_type real_type; real_type sum; - internal::Norm1 norm1(this->val); - internal::parallel_reduce(norm1, this->size(), sum, this->thread_loop_partitioner); + internal::VectorOperations::Norm1 norm1(this->val); + internal::VectorOperations::parallel_reduce(norm1, this->size(), sum, this->thread_loop_partitioner); return sum; } @@ -325,9 +325,9 @@ namespace LinearAlgebra // precision) using the BLAS approach with a weight, see e.g. dnrm2.f. typedef typename VectorSpaceVector::real_type real_type; real_type norm_square; - internal::Norm2 norm2(this->val); - internal::parallel_reduce(norm2, this->size(), norm_square, - this->thread_loop_partitioner); + internal::VectorOperations::Norm2 norm2(this->val); + internal::VectorOperations::parallel_reduce(norm2, this->size(), norm_square, + this->thread_loop_partitioner); if (numbers::is_finite(norm_square) && norm_square>=std::numeric_limits::min()) return std::sqrt(norm_square); @@ -394,8 +394,8 @@ namespace LinearAlgebra ExcMessage("Cannot add two vectors with different numbers of elements")); Number sum; - internal::AddAndDot adder(this->val, down_V.val, down_W.val, a); - internal::parallel_reduce(adder, this->size(), sum, this->thread_loop_partitioner); + internal::VectorOperations::AddAndDot adder(this->val, down_V.val, down_W.val, a); + internal::VectorOperations::parallel_reduce(adder, this->size(), sum, this->thread_loop_partitioner); AssertIsFinite(sum); return sum; diff --git a/include/deal.II/lac/read_write_vector.templates.h b/include/deal.II/lac/read_write_vector.templates.h index 9ba53fe3c4..f24c76ced1 100644 --- a/include/deal.II/lac/read_write_vector.templates.h +++ b/include/deal.II/lac/read_write_vector.templates.h @@ -140,7 +140,7 @@ namespace LinearAlgebra ReadWriteVector::apply(const Functor &func) { FunctorTemplate functor(*this, func); - internal::parallel_for(functor, n_elements(), thread_loop_partitioner); + internal::VectorOperations::parallel_for(functor, n_elements(), thread_loop_partitioner); } #endif @@ -157,8 +157,8 @@ namespace LinearAlgebra if (n_elements() != in_vector.n_elements()) reinit(in_vector, true); - dealii::internal::Vector_copy copier(in_vector.val, val); - internal::parallel_for(copier, n_elements(), thread_loop_partitioner); + dealii::internal::VectorOperations::Vector_copy copier(in_vector.val, val); + internal::VectorOperations::parallel_for(copier, n_elements(), thread_loop_partitioner); return *this; } @@ -174,8 +174,8 @@ namespace LinearAlgebra if (n_elements() != in_vector.n_elements()) reinit(in_vector, true); - dealii::internal::Vector_copy copier(in_vector.val, val); - internal::parallel_for(copier, n_elements(), thread_loop_partitioner); + dealii::internal::VectorOperations::Vector_copy copier(in_vector.val, val); + internal::VectorOperations::parallel_for(copier, n_elements(), thread_loop_partitioner); return *this; } @@ -189,8 +189,8 @@ namespace LinearAlgebra Assert(s==static_cast(0), ExcMessage("Only 0 can be assigned to a vector.")); (void)s; - internal::Vector_set setter(Number(), val); - internal::parallel_for(setter, n_elements(), thread_loop_partitioner); + internal::VectorOperations::Vector_set setter(Number(), val); + internal::VectorOperations::parallel_for(setter, n_elements(), thread_loop_partitioner); return *this; } diff --git a/include/deal.II/lac/vector.templates.h b/include/deal.II/lac/vector.templates.h index 8975c13558..e710484248 100644 --- a/include/deal.II/lac/vector.templates.h +++ b/include/deal.II/lac/vector.templates.h @@ -116,7 +116,7 @@ Vector::Vector (const PETScWrappers::Vector &v) PetscErrorCode ierr = VecGetArray (static_cast(v), &start_ptr); AssertThrow (ierr == 0, ExcPETScError(ierr)); - internal::copy (start_ptr, start_ptr+vec_size, begin()); + internal::VectorOperations::copy (start_ptr, start_ptr+vec_size, begin()); // restore the representation of the // vector @@ -221,8 +221,8 @@ Vector::operator= (const Vector &v) if (vec_size != v.vec_size) reinit (v, true); - dealii::internal::Vector_copy copier(v.val, val); - internal::parallel_for(copier,vec_size,thread_loop_partitioner); + dealii::internal::VectorOperations::Vector_copy copier(v.val, val); + internal::VectorOperations::parallel_for(copier,vec_size,thread_loop_partitioner); return *this; } @@ -264,8 +264,8 @@ Vector::operator= (const Vector &v) if (vec_size != v.vec_size) reinit (v, true); - dealii::internal::Vector_copy copier(v.val, val); - internal::parallel_for(copier,vec_size,thread_loop_partitioner); + dealii::internal::VectorOperations::Vector_copy copier(v.val, val); + internal::VectorOperations::parallel_for(copier,vec_size,thread_loop_partitioner); return *this; } @@ -358,7 +358,7 @@ Vector::is_non_negative () const Assert (vec_size!=0, ExcEmptyObject()); for (size_type i=0; i::operator= (const Number s) if (s != Number()) Assert (vec_size!=0, ExcEmptyObject()); - internal::Vector_set setter(s, val); + internal::VectorOperations::Vector_set setter(s, val); - internal::parallel_for(setter,vec_size,thread_loop_partitioner); + internal::VectorOperations::parallel_for(setter,vec_size,thread_loop_partitioner); return *this; } @@ -390,9 +390,9 @@ Vector &Vector::operator *= (const Number factor) Assert (vec_size!=0, ExcEmptyObject()); - internal::Vectorization_multiply_factor vector_multiply(val, factor); + internal::VectorOperations::Vectorization_multiply_factor vector_multiply(val, factor); - internal::parallel_for(vector_multiply,vec_size,thread_loop_partitioner); + internal::VectorOperations::parallel_for(vector_multiply,vec_size,thread_loop_partitioner); return *this; } @@ -409,8 +409,8 @@ Vector::add (const Number a, Assert (vec_size!=0, ExcEmptyObject()); Assert (vec_size == v.vec_size, ExcDimensionMismatch(vec_size, v.vec_size)); - internal::Vectorization_add_av vector_add_av(val, v.val, a); - internal::parallel_for(vector_add_av,vec_size,thread_loop_partitioner); + internal::VectorOperations::Vectorization_add_av vector_add_av(val, v.val, a); + internal::VectorOperations::parallel_for(vector_add_av,vec_size,thread_loop_partitioner); } @@ -427,8 +427,8 @@ Vector::sadd (const Number x, Assert (vec_size!=0, ExcEmptyObject()); Assert (vec_size == v.vec_size, ExcDimensionMismatch(vec_size, v.vec_size)); - internal::Vectorization_sadd_xav vector_sadd_xav(val, v.val, a, x); - internal::parallel_for(vector_sadd_xav,vec_size,thread_loop_partitioner); + internal::VectorOperations::Vectorization_sadd_xav vector_sadd_xav(val, v.val, a, x); + internal::VectorOperations::parallel_for(vector_sadd_xav,vec_size,thread_loop_partitioner); } @@ -446,8 +446,8 @@ Number Vector::operator * (const Vector &v) const ExcDimensionMismatch(vec_size, v.size())); Number sum; - internal::Dot dot(val, v.val); - internal::parallel_reduce (dot, vec_size, sum, thread_loop_partitioner); + internal::VectorOperations::Dot dot(val, v.val); + internal::VectorOperations::parallel_reduce (dot, vec_size, sum, thread_loop_partitioner); AssertIsFinite(sum); return sum; @@ -462,8 +462,8 @@ Vector::norm_sqr () const Assert (vec_size!=0, ExcEmptyObject()); real_type sum; - internal::Norm2 norm2(val); - internal::parallel_reduce (norm2, vec_size, sum, thread_loop_partitioner); + internal::VectorOperations::Norm2 norm2(val); + internal::VectorOperations::parallel_reduce (norm2, vec_size, sum, thread_loop_partitioner); AssertIsFinite(sum); @@ -478,8 +478,8 @@ Number Vector::mean_value () const Assert (vec_size!=0, ExcEmptyObject()); Number sum; - internal::MeanValue mean(val); - internal::parallel_reduce (mean, vec_size, sum, thread_loop_partitioner); + internal::VectorOperations::MeanValue mean(val); + internal::VectorOperations::parallel_reduce (mean, vec_size, sum, thread_loop_partitioner); return sum / real_type(size()); } @@ -493,8 +493,8 @@ Vector::l1_norm () const Assert (vec_size!=0, ExcEmptyObject()); real_type sum; - internal::Norm1 norm1(val); - internal::parallel_reduce (norm1, vec_size, sum, thread_loop_partitioner); + internal::VectorOperations::Norm1 norm1(val); + internal::VectorOperations::parallel_reduce (norm1, vec_size, sum, thread_loop_partitioner); return sum; } @@ -513,9 +513,9 @@ Vector::l2_norm () const Assert (vec_size!=0, ExcEmptyObject()); real_type norm_square; - internal::Norm2 norm2(val); - internal::parallel_reduce (norm2, vec_size, norm_square, - thread_loop_partitioner); + internal::VectorOperations::Norm2 norm2(val); + internal::VectorOperations::parallel_reduce (norm2, vec_size, norm_square, + thread_loop_partitioner); if (numbers::is_finite(norm_square) && norm_square >= std::numeric_limits::min()) return std::sqrt(norm_square); @@ -557,8 +557,8 @@ Vector::lp_norm (const real_type p) const return l2_norm(); real_type sum; - internal::NormP normp(val, p); - internal::parallel_reduce (normp, vec_size, sum, thread_loop_partitioner); + internal::VectorOperations::NormP normp(val, p); + internal::VectorOperations::parallel_reduce (normp, vec_size, sum, thread_loop_partitioner); if (numbers::is_finite(sum) && sum >= std::numeric_limits::min()) return std::pow(sum, static_cast(1./p)); @@ -614,8 +614,8 @@ Vector::add_and_dot (const Number a, AssertDimension (vec_size, W.size()); Number sum; - internal::AddAndDot adder(this->val, V.val, W.val, a); - internal::parallel_reduce (adder, vec_size, sum, thread_loop_partitioner); + internal::VectorOperations::AddAndDot adder(this->val, V.val, W.val, a); + internal::VectorOperations::parallel_reduce (adder, vec_size, sum, thread_loop_partitioner); AssertIsFinite(sum); return sum; @@ -640,8 +640,8 @@ Vector &Vector::operator -= (const Vector &v) Assert (vec_size!=0, ExcEmptyObject()); Assert (vec_size == v.vec_size, ExcDimensionMismatch(vec_size, v.vec_size)); - internal::Vectorization_subtract_v vector_subtract(val, v.val); - internal::parallel_for(vector_subtract,vec_size,thread_loop_partitioner); + internal::VectorOperations::Vectorization_subtract_v vector_subtract(val, v.val); + internal::VectorOperations::parallel_for(vector_subtract,vec_size,thread_loop_partitioner); return *this; } @@ -653,8 +653,8 @@ void Vector::add (const Number v) { Assert (vec_size!=0, ExcEmptyObject()); - internal::Vectorization_add_factor vector_add(val, v); - internal::parallel_for(vector_add,vec_size,thread_loop_partitioner); + internal::VectorOperations::Vectorization_add_factor vector_add(val, v); + internal::VectorOperations::parallel_for(vector_add,vec_size,thread_loop_partitioner); } @@ -665,8 +665,8 @@ void Vector::add (const Vector &v) Assert (vec_size!=0, ExcEmptyObject()); Assert (vec_size == v.vec_size, ExcDimensionMismatch(vec_size, v.vec_size)); - internal::Vectorization_add_v vector_add(val, v.val); - internal::parallel_for(vector_add,vec_size,thread_loop_partitioner); + internal::VectorOperations::Vectorization_add_v vector_add(val, v.val); + internal::VectorOperations::parallel_for(vector_add,vec_size,thread_loop_partitioner); } @@ -682,8 +682,8 @@ void Vector::add (const Number a, const Vector &v, Assert (vec_size == v.vec_size, ExcDimensionMismatch(vec_size, v.vec_size)); Assert (vec_size == w.vec_size, ExcDimensionMismatch(vec_size, w.vec_size)); - internal::Vectorization_add_avpbw vector_add(val, v.val, w.val, a, b); - internal::parallel_for(vector_add,vec_size,thread_loop_partitioner); + internal::VectorOperations::Vectorization_add_avpbw vector_add(val, v.val, w.val, a, b); + internal::VectorOperations::parallel_for(vector_add,vec_size,thread_loop_partitioner); } @@ -697,8 +697,8 @@ void Vector::sadd (const Number x, Assert (vec_size!=0, ExcEmptyObject()); Assert (vec_size == v.vec_size, ExcDimensionMismatch(vec_size, v.vec_size)); - internal::Vectorization_sadd_xv vector_sadd(val, v.val, x); - internal::parallel_for(vector_sadd,vec_size,thread_loop_partitioner); + internal::VectorOperations::Vectorization_sadd_xv vector_sadd(val, v.val, x); + internal::VectorOperations::parallel_for(vector_sadd,vec_size,thread_loop_partitioner); } @@ -716,9 +716,9 @@ void Vector::sadd (const Number x, const Number a, Assert (vec_size == v.vec_size, ExcDimensionMismatch(vec_size, v.vec_size)); Assert (vec_size == w.vec_size, ExcDimensionMismatch(vec_size, w.vec_size)); - internal::Vectorization_sadd_xavbw vector_sadd(val, v.val, w.val, x, - a, b); - internal::parallel_for(vector_sadd,vec_size,thread_loop_partitioner); + internal::VectorOperations::Vectorization_sadd_xavbw vector_sadd(val, v.val, w.val, x, + a, b); + internal::VectorOperations::parallel_for(vector_sadd,vec_size,thread_loop_partitioner); } @@ -740,8 +740,8 @@ void Vector::scale (const Vector &s) Assert (vec_size!=0, ExcEmptyObject()); Assert (vec_size == s.vec_size, ExcDimensionMismatch(vec_size, s.vec_size)); - internal::Vectorization_scale vector_scale(val, s.val); - internal::parallel_for(vector_scale,vec_size,thread_loop_partitioner); + internal::VectorOperations::Vectorization_scale vector_scale(val, s.val); + internal::VectorOperations::parallel_for(vector_scale,vec_size,thread_loop_partitioner); } @@ -768,8 +768,8 @@ void Vector::equ (const Number a, Assert (vec_size!=0, ExcEmptyObject()); Assert (vec_size == u.vec_size, ExcDimensionMismatch(vec_size, u.vec_size)); - internal::Vectorization_equ_au vector_equ(val, u.val, a); - internal::parallel_for(vector_equ,vec_size,thread_loop_partitioner); + internal::VectorOperations::Vectorization_equ_au vector_equ(val, u.val, a); + internal::VectorOperations::parallel_for(vector_equ,vec_size,thread_loop_partitioner); } @@ -807,8 +807,8 @@ void Vector::equ (const Number a, const Vector &u, Assert (vec_size == u.vec_size, ExcDimensionMismatch(vec_size, u.vec_size)); Assert (vec_size == v.vec_size, ExcDimensionMismatch(vec_size, v.vec_size)); - internal::Vectorization_equ_aubv vector_equ(val, u.val, v.val, a, b); - internal::parallel_for(vector_equ,vec_size,thread_loop_partitioner); + internal::VectorOperations::Vectorization_equ_aubv vector_equ(val, u.val, v.val, a, b); + internal::VectorOperations::parallel_for(vector_equ,vec_size,thread_loop_partitioner); } @@ -822,9 +822,9 @@ void Vector::equ (const Number a, const Vector &u, Assert (vec_size == v.vec_size, ExcDimensionMismatch(vec_size, v.vec_size)); Assert (vec_size == w.vec_size, ExcDimensionMismatch(vec_size, w.vec_size)); - internal::Vectorization_equ_aubvcw vector_equ(val, u.val, v.val, w.val, - a, b, c); - internal::parallel_for(vector_equ,vec_size,thread_loop_partitioner); + internal::VectorOperations::Vectorization_equ_aubvcw vector_equ(val, u.val, v.val, w.val, + a, b, c); + internal::VectorOperations::parallel_for(vector_equ,vec_size,thread_loop_partitioner); } @@ -840,8 +840,8 @@ void Vector::ratio (const Vector &a, // we overwrite them anyway reinit (a.size(), true); - internal::Vectorization_ratio vector_ratio(val, a.val, b.val); - internal::parallel_for(vector_ratio,vec_size,thread_loop_partitioner); + internal::VectorOperations::Vectorization_ratio vector_ratio(val, a.val, b.val); + internal::VectorOperations::parallel_for(vector_ratio,vec_size,thread_loop_partitioner); } @@ -879,7 +879,7 @@ Vector::operator= (const PETScWrappers::Vector &v) PetscErrorCode ierr = VecGetArray (static_cast(v), &start_ptr); AssertThrow (ierr == 0, ExcPETScError(ierr)); - internal::copy (start_ptr, start_ptr+vec_size, begin()); + internal::VectorOperations::copy (start_ptr, start_ptr+vec_size, begin()); // restore the representation of the // vector @@ -976,7 +976,7 @@ void Vector::print (const char *format) const Assert (vec_size!=0, ExcEmptyObject()); for (size_type j=0; j - bool is_non_negative (const T &t) - { - return t >= 0; - } - - - template - bool is_non_negative (const std::complex &) - { - Assert (false, - ExcMessage ("Complex numbers do not have an ordering.")); - - return false; - } - - - template - void print (const T &t, - const char *format) + namespace VectorOperations { - if (format != 0) - std::printf (format, t); - else - std::printf (" %5.2f", double(t)); - } - - - - template - void print (const std::complex &t, - const char *format) - { - if (format != 0) - std::printf (format, t.real(), t.imag()); - else - std::printf (" %5.2f+%5.2fi", - double(t.real()), double(t.imag())); - } - - // call std::copy, except for in - // the case where we want to copy - // from std::complex to a - // non-complex type - template - void copy (const T *begin, - const T *end, - U *dest) - { - std::copy (begin, end, dest); - } - - template - void copy (const std::complex *begin, - const std::complex *end, - std::complex *dest) - { - std::copy (begin, end, dest); - } - - template - void copy (const std::complex *, - const std::complex *, - U *) - { - Assert (false, ExcMessage ("Can't convert a vector of complex numbers " - "into a vector of reals/doubles")); - } - - + typedef types::global_dof_index size_type; -#ifdef DEAL_II_WITH_THREADS - /** - * This struct takes the loop range from the tbb parallel for loop and - * translates it to the actual ranges of the for loop within the vector. It - * encodes the grain size but might choose larger values of chunks than the - * minimum grain size. The minimum grain size given to tbb is then simple - * 1. For affinity reasons, the layout in this loop must be kept in sync - * with the respective class for reductions further down. - */ - template - struct TBBForFunctor - { - TBBForFunctor(Functor &functor, - const size_type vec_size) - : - functor(functor), - vec_size(vec_size) + template + bool is_non_negative (const T &t) { - // set chunk size for sub-tasks - const unsigned int gs = internal::Vector::minimum_parallel_grain_size; - n_chunks = std::min(static_cast(4*MultithreadInfo::n_threads()), - vec_size / gs); - chunk_size = vec_size / n_chunks; - - // round to next multiple of 512 (or minimum grain size if that happens - // to be smaller). this is advantageous because our accumulation - // algorithms favor lengths of a power of 2 due to pairwise summation -> - // at most one 'oddly' sized chunk - if (chunk_size > 512) - chunk_size = ((chunk_size + 511)/512)*512; - n_chunks = (vec_size + chunk_size - 1) / chunk_size; - AssertIndexRange((n_chunks-1)*chunk_size, vec_size); - AssertIndexRange(vec_size, n_chunks*chunk_size+1); - }; - - void operator() (const tbb::blocked_range &range) const - { - const size_type begin = range.begin()*chunk_size; - const size_type end = std::min(range.end()*chunk_size, vec_size); - functor(begin, end); + return t >= 0; } - Functor &functor; - const size_type vec_size; - unsigned int n_chunks; - size_type chunk_size; - }; -#endif - - template - void parallel_for(Functor &functor, - size_type vec_size, - std_cxx11::shared_ptr &partitioner) - { -#ifdef DEAL_II_WITH_THREADS - // only go to the parallel function in case there are at least 4 parallel - // items, otherwise the overhead is too large - if (vec_size >= 4*internal::Vector::minimum_parallel_grain_size && - MultithreadInfo::n_threads() > 1) - { - Assert(partitioner.get() != NULL, - ExcInternalError("Unexpected initialization of Vector that does " - "not set the TBB partitioner to a usable state.")); - std_cxx11::shared_ptr tbb_partitioner = - partitioner->acquire_one_partitioner(); - - TBBForFunctor generic_functor(functor, vec_size); - tbb::parallel_for (tbb::blocked_range (0, - generic_functor.n_chunks, - 1), - generic_functor, - *tbb_partitioner); - partitioner->release_one_partitioner(tbb_partitioner); - } - else if (vec_size > 0) - functor(0,vec_size); -#else - functor(0,vec_size); - (void)partitioner; -#endif - } + template + bool is_non_negative (const std::complex &) + { + Assert (false, + ExcMessage ("Complex numbers do not have an ordering.")); - // Define the functors necessary to use SIMD with TBB. we also include the - // simple copy and set operations + return false; + } - template - struct Vector_set - { - Vector_set(Number value, Number *dst) - : - value(value), - dst(dst) - {} - void operator() (const size_type begin, const size_type end) const + template + void print (const T &t, + const char *format) { - if (value == Number()) - std::memset (dst+begin,0,(end-begin)*sizeof(Number)); + if (format != 0) + std::printf (format, t); else - std::fill (dst+begin, dst+end, value); + std::printf (" %5.2f", double(t)); } - Number value; - Number *dst; - }; - template - struct Vector_copy - { - Vector_copy(const OtherNumber *src, Number *dst) - : - src(src), - dst(dst) - {} - void operator() (const size_type begin, const size_type end) const + template + void print (const std::complex &t, + const char *format) { - if (types_are_equal::value) - std::memcpy(dst+begin, src+begin, (end-begin)*sizeof(Number)); + if (format != 0) + std::printf (format, t.real(), t.imag()); else - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i + void copy (const T *begin, + const T *end, + U *dest) + { + std::copy (begin, end, dest); + } - template - struct Vectorization_multiply_factor - { - Vectorization_multiply_factor(Number *val, Number factor) - : - val(val), - factor(factor) - {} + template + void copy (const std::complex *begin, + const std::complex *end, + std::complex *dest) + { + std::copy (begin, end, dest); + } - void operator() (const size_type begin, const size_type end) const + template + void copy (const std::complex *, + const std::complex *, + U *) { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_add_av - { - Vectorization_add_av(Number *val, Number *v_val, Number factor) - : - val(val), - v_val(v_val), - factor(factor) - {} - - void operator() (const size_type begin, const size_type end) const + +#ifdef DEAL_II_WITH_THREADS + /** + * This struct takes the loop range from the tbb parallel for loop and + * translates it to the actual ranges of the for loop within the vector. It + * encodes the grain size but might choose larger values of chunks than the + * minimum grain size. The minimum grain size given to tbb is then simple + * 1. For affinity reasons, the layout in this loop must be kept in sync + * with the respective class for reductions further down. + */ + template + struct TBBForFunctor { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i(4*MultithreadInfo::n_threads()), + vec_size / gs); + chunk_size = vec_size / n_chunks; + + // round to next multiple of 512 (or minimum grain size if that happens + // to be smaller). this is advantageous because our accumulation + // algorithms favor lengths of a power of 2 due to pairwise summation -> + // at most one 'oddly' sized chunk + if (chunk_size > 512) + chunk_size = ((chunk_size + 511)/512)*512; + n_chunks = (vec_size + chunk_size - 1) / chunk_size; + AssertIndexRange((n_chunks-1)*chunk_size, vec_size); + AssertIndexRange(vec_size, n_chunks*chunk_size+1); + }; + + void operator() (const tbb::blocked_range &range) const + { + const size_type begin = range.begin()*chunk_size; + const size_type end = std::min(range.end()*chunk_size, vec_size); + functor(begin, end); + } - Number *val; - Number *v_val; - Number factor; - }; + Functor &functor; + const size_type vec_size; + unsigned int n_chunks; + size_type chunk_size; + }; +#endif - template - struct Vectorization_sadd_xav - { - Vectorization_sadd_xav(Number *val, Number *v_val, Number a, Number x) - : - val(val), - v_val(v_val), - a(a), - x(x) - {} - - void operator() (const size_type begin, const size_type end) const + template + void parallel_for(Functor &functor, + size_type vec_size, + std_cxx11::shared_ptr &partitioner) { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i= 4*internal::Vector::minimum_parallel_grain_size && + MultithreadInfo::n_threads() > 1) { - for (size_type i=begin; i tbb_partitioner = + partitioner->acquire_one_partitioner(); + + TBBForFunctor generic_functor(functor, vec_size); + tbb::parallel_for (tbb::blocked_range (0, + generic_functor.n_chunks, + 1), + generic_functor, + *tbb_partitioner); + partitioner->release_one_partitioner(tbb_partitioner); } + else if (vec_size > 0) + functor(0,vec_size); +#else + functor(0,vec_size); + (void)partitioner; +#endif } - Number *val; - Number *v_val; - Number a; - Number x; - }; - template - struct Vectorization_subtract_v - { - Vectorization_subtract_v(Number *val, Number *v_val) - : - val(val), - v_val(v_val) - {} + // Define the functors necessary to use SIMD with TBB. we also include the + // simple copy and set operations - void operator() (const size_type begin, const size_type end) const + template + struct Vector_set { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_add_factor - { - Vectorization_add_factor(Number *val, Number factor) - : - val(val), - factor(factor) - {} + Number value; + Number *dst; + }; - void operator() (const size_type begin, const size_type end) const + template + struct Vector_copy { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i::value) + std::memcpy(dst+begin, src+begin, (end-begin)*sizeof(Number)); + else + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i - struct Vectorization_add_v - { - Vectorization_add_v(Number *val, Number *v_val) - : - val(val), - v_val(v_val) - {} + const OtherNumber *src; + Number *dst; + }; - void operator() (const size_type begin, const size_type end) const + template + struct Vectorization_multiply_factor { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i - struct Vectorization_add_avpbw - { - Vectorization_add_avpbw(Number *val, Number *v_val, Number *w_val, Number a, Number b) - : - val(val), - v_val(v_val), - w_val(w_val), - a(a), - b(b) - {} - - void operator() (const size_type begin, const size_type end) const + template + struct Vectorization_add_av { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i - struct Vectorization_sadd_xv - { - Vectorization_sadd_xv(Number *val, Number *v_val, Number x) - : - val(val), - v_val(v_val), - x(x) - {} - - void operator() (const size_type begin, const size_type end) const + template + struct Vectorization_sadd_xav { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i - struct Vectorization_sadd_xavbw - { - Vectorization_sadd_xavbw(Number *val, Number *v_val, Number *w_val, - Number x, Number a, Number b) - : - val(val), - v_val(v_val), - w_val(w_val), - x(x), - a(a), - b(b) - {} - - void operator() (const size_type begin, const size_type end) const + template + struct Vectorization_subtract_v { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i - struct Vectorization_scale - { - Vectorization_scale(Number *val, Number *v_val) - : - val(val), - v_val(v_val) - {} + Number *val; + Number *v_val; + }; - void operator() (const size_type begin, const size_type end) const + template + struct Vectorization_add_factor { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_equ_au - { - Vectorization_equ_au(Number *val, Number *u_val, Number a) - : - val(val), - u_val(u_val), - a(a) - {} - - void operator() (const size_type begin, const size_type end) const - { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i - struct Vectorization_equ_aubv - { - Vectorization_equ_aubv(Number *val, Number *u_val, Number *v_val, - Number a, Number b) - : - val(val), - u_val(u_val), - v_val(v_val), - a(a), - b(b) - {} - - void operator() (const size_type begin, const size_type end) const + template + struct Vectorization_add_v { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i - struct Vectorization_equ_aubvcw - { - Vectorization_equ_aubvcw(Number *val, Number *u_val, Number *v_val, - Number *w_val, Number a, Number b, Number c) - : - val(val), - u_val(u_val), - v_val(v_val), - w_val(w_val), - a(a), - b(b), - c(c) - {} - - void operator() (const size_type begin, const size_type end) const - { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i - struct Vectorization_ratio - { - Vectorization_ratio(Number *val, Number *a_val, Number *b_val) - : - val(val), - a_val(a_val), - b_val(b_val) - {} - - void operator() (const size_type begin, const size_type end) const + template + struct Vectorization_add_avpbw { - if (parallel::internal::EnableOpenMPSimdFor::value) - { - DEAL_II_OPENMP_SIMD_PRAGMA - for (size_type i=begin; i::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_sadd_xv + { + Vectorization_sadd_xv(Number *val, Number *v_val, Number x) + : + val(val), + v_val(v_val), + x(x) + {} + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i - struct Dot - { - static const bool vectorizes = types_are_equal::value && - (VectorizedArray::n_array_elements > 1); + template + struct Vectorization_sadd_xavbw + { + Vectorization_sadd_xavbw(Number *val, Number *v_val, Number *w_val, + Number x, Number a, Number b) + : + val(val), + v_val(v_val), + w_val(w_val), + x(x), + a(a), + b(b) + {} + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_scale { - return X[i] * Number(numbers::NumberTraits::conjugate(Y[i])); - } + Vectorization_scale(Number *val, Number *v_val) + : + val(val), + v_val(v_val) + {} - VectorizedArray - do_vectorized(const size_type i) const + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_equ_au { - VectorizedArray x, y; - x.load(X+i); - y.load(Y+i); - return x * y; - } + Vectorization_equ_au(Number *val, Number *u_val, Number a) + : + val(val), + u_val(u_val), + a(a) + {} + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i - struct Norm2 - { - static const bool vectorizes = VectorizedArray::n_array_elements > 1; + template + struct Vectorization_equ_aubv + { + Vectorization_equ_aubv(Number *val, Number *u_val, Number *v_val, + Number a, Number b) + : + val(val), + u_val(u_val), + v_val(v_val), + a(a), + b(b) + {} + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i + struct Vectorization_equ_aubvcw { - return numbers::NumberTraits::abs_square(X[i]); - } + Vectorization_equ_aubvcw(Number *val, Number *u_val, Number *v_val, + Number *w_val, Number a, Number b, Number c) + : + val(val), + u_val(u_val), + v_val(v_val), + w_val(w_val), + a(a), + b(b), + c(c) + {} + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i - do_vectorized(const size_type i) const + template + struct Vectorization_ratio { - VectorizedArray x; - x.load(X+i); - return x * x; - } + Vectorization_ratio(Number *val, Number *a_val, Number *b_val) + : + val(val), + a_val(a_val), + b_val(b_val) + {} + + void operator() (const size_type begin, const size_type end) const + { + if (parallel::internal::EnableOpenMPSimdFor::value) + { + DEAL_II_OPENMP_SIMD_PRAGMA + for (size_type i=begin; i - struct Norm1 - { - static const bool vectorizes = VectorizedArray::n_array_elements > 1; - Norm1(const Number *X) - : - X(X) - {} - RealType - operator() (const size_type i) const + // All sums over all the vector entries (l2-norm, inner product, etc.) are + // performed with the same code, using a templated operation defined + // here. There are always two versions defined, a standard one that covers + // most cases and a vectorized one which is only for equal types and float + // and double. + template + struct Dot { - return numbers::NumberTraits::abs(X[i]); - } + static const bool vectorizes = types_are_equal::value && + (VectorizedArray::n_array_elements > 1); - VectorizedArray - do_vectorized(const size_type i) const - { - VectorizedArray x; - x.load(X+i); - return std::abs(x); - } + Dot(const Number *X, const Number2 *Y) + : + X(X), + Y(Y) + {} - const Number *X; - }; + Number + operator() (const size_type i) const + { + return X[i] * Number(numbers::NumberTraits::conjugate(Y[i])); + } - template - struct NormP - { - static const bool vectorizes = VectorizedArray::n_array_elements > 1; + VectorizedArray + do_vectorized(const size_type i) const + { + VectorizedArray x, y; + x.load(X+i); + y.load(Y+i); + return x * y; + } - NormP(const Number *X, RealType p) - : - X(X), - p(p) - {} + const Number *X; + const Number2 *Y; + }; - RealType - operator() (const size_type i) const + template + struct Norm2 { - return std::pow(numbers::NumberTraits::abs(X[i]), p); - } + static const bool vectorizes = VectorizedArray::n_array_elements > 1; - VectorizedArray - do_vectorized(const size_type i) const - { - VectorizedArray x; - x.load(X+i); - return std::pow(std::abs(x),p); - } + Norm2(const Number *X) + : + X(X) + {} - const Number *X; - RealType p; - }; + RealType + operator() (const size_type i) const + { + return numbers::NumberTraits::abs_square(X[i]); + } - template - struct MeanValue - { - static const bool vectorizes = VectorizedArray::n_array_elements > 1; + VectorizedArray + do_vectorized(const size_type i) const + { + VectorizedArray x; + x.load(X+i); + return x * x; + } - MeanValue(const Number *X) - : - X(X) - {} + const Number *X; + }; - Number - operator() (const size_type i) const + template + struct Norm1 { - return X[i]; - } + static const bool vectorizes = VectorizedArray::n_array_elements > 1; - VectorizedArray - do_vectorized(const size_type i) const - { - VectorizedArray x; - x.load(X+i); - return x; - } + Norm1(const Number *X) + : + X(X) + {} + + RealType + operator() (const size_type i) const + { + return numbers::NumberTraits::abs(X[i]); + } - const Number *X; - }; + VectorizedArray + do_vectorized(const size_type i) const + { + VectorizedArray x; + x.load(X+i); + return std::abs(x); + } - template - struct AddAndDot - { - static const bool vectorizes = VectorizedArray::n_array_elements > 1; - - AddAndDot(Number *X, const Number *V, const Number *W, Number a) - : - X(X), - V(V), - W(W), - a(a) - {} - - Number - operator() (const size_type i) const - { - X[i] += a * V[i]; - return X[i] * Number(numbers::NumberTraits::conjugate(W[i])); - } + const Number *X; + }; - VectorizedArray - do_vectorized(const size_type i) const + template + struct NormP { - VectorizedArray x, w, v; - x.load(X+i); - v.load(V+i); - x += a * v; - x.store(X+i); - // may only load from W after storing in X because the pointers might - // point to the same memory - w.load(W+i); - return x * w; - } + static const bool vectorizes = VectorizedArray::n_array_elements > 1; - Number *X; - const Number *V, *W; - Number a; - }; - - - - // this is the main working loop for all vector sums using the templated - // operation above. it accumulates the sums using a block-wise summation - // algorithm with post-update. this blocked algorithm has been proposed in - // a similar form by Castaldo, Whaley and Chronopoulos (SIAM - // J. Sci. Comput. 31, 1156-1174, 2008) and we use the smallest possible - // block size, 2. Sometimes it is referred to as pairwise summation. The - // worst case error made by this algorithm is on the order O(eps * - // log2(vec_size)), whereas a naive summation is O(eps * vec_size). Even - // though the Kahan summation is even more accurate with an error O(eps) - // by carrying along remainders not captured by the main sum, that involves - // additional costs which are not worthwhile. See the Wikipedia article on - // the Kahan summation algorithm. - - // The algorithm implemented here has the additional benefit that it is - // easily parallelized without changing the order of how the elements are - // added (floating point addition is not associative). For the same vector - // size and minimum_parallel_grainsize, the blocks are always the - // same and added pairwise. - - // The depth of recursion is controlled by the 'magic' parameter - // vector_accumulation_recursion_threshold: If the length is below - // vector_accumulation_recursion_threshold * 32 (32 is the part of code we - // unroll), a straight loop instead of recursion will be used. At the - // innermost level, eight values are added consecutively in order to better - // balance multiplications and additions. - - // Loops are unrolled as follows: the range [first,last) is broken into - // @p n_chunks each of size 32 plus the @p remainder. - // accumulate_regular() does the work on 32*n_chunks elements employing SIMD - // if possible and stores the result of the operation for each chunk in @p outer_results. - - // The code returns the result as the last argument in order to make - // spawning tasks simpler and use automatic template deduction. - - - /** - * The minimum number of chunks (each of size 32) to divide the range - * [first,last) into two (second part of the if branch in accumulate_recursive). - */ - const unsigned int vector_accumulation_recursion_threshold = 128; - - template - void accumulate_recursive (const Operation &op, - const size_type first, - const size_type last, - ResultType &result) - { - const size_type vec_size = last - first; - if (vec_size <= vector_accumulation_recursion_threshold * 32) - { - // the vector is short enough so we perform the summation. first - // work on the regular part. The innermost 32 values are expanded in - // order to obtain known loop bounds for most of the work. - size_type index = first; - ResultType outer_results [vector_accumulation_recursion_threshold]; - - // set the zeroth element to zero to correctly handle the case where - // vec_size == 0 - outer_results[0] = ResultType(); - - // the variable serves two purposes: (i) number of chunks (each 32 indices) - // for the given size; all results are stored in outer_results[0,n_chunks) - // (ii) in the SIMD case n_chunks is also a next free index in outer_results[] - // to which we can write after accumulate_regular() is executed. - size_type n_chunks = vec_size / 32; - const size_type remainder = vec_size % 32; - Assert (remainder == 0 || n_chunks < vector_accumulation_recursion_threshold, - ExcInternalError()); - - // Select between the regular version and vectorized version based - // on the number types we are given. To choose the vectorized - // version often enough, we need to have all tasks but the last one - // to be divisible by the vectorization length - accumulate_regular(op, n_chunks, index, outer_results, - internal::bool2type()); - - // now work on the remainder, i.e., the last up to 32 values. Use - // switch statement with fall-through to work on these values. - if (remainder > 0) - { - // if we got here, it means that (vec_size <= vector_accumulation_recursion_threshold * 32), - // which is to say that the domain can be split into n_chunks <= vector_accumulation_recursion_threshold: - AssertIndexRange(n_chunks, vector_accumulation_recursion_threshold+1); - // split the remainder into chunks of 8, there could be up to 3 - // such chunks since remainder < 32. - // Work on those chunks without any SIMD, that is we call op(index). - const size_type inner_chunks = remainder / 8; - Assert (inner_chunks <= 3, ExcInternalError()); - const size_type remainder_inner = remainder % 8; - ResultType r0 = ResultType(), r1 = ResultType(), - r2 = ResultType(); - switch (inner_chunks) - { - case 3: - r2 = op(index++); - for (size_type j=1; j<8; ++j) - r2 += op(index++); - // no break - case 2: - r1 = op(index++); - for (size_type j=1; j<8; ++j) - r1 += op(index++); - r1 += r2; - // no break - case 1: - r2 = op(index++); - for (size_type j=1; j<8; ++j) - r2 += op(index++); - // no break - default: - for (size_type j=0; j 1) - { - if (n_chunks % 2 == 1) - outer_results[n_chunks++] = ResultType(); - for (size_type i=0; i::abs(X[i]), p); } - else + + VectorizedArray + do_vectorized(const size_type i) const { - // split vector into four pieces and work on the pieces - // recursively. Make pieces (except last) divisible by one fourth the - // recursion threshold. - const size_type new_size = - (vec_size / (vector_accumulation_recursion_threshold * 32)) * - vector_accumulation_recursion_threshold * 8; - Assert (first+3*new_size < last, - ExcInternalError()); - ResultType r0, r1, r2, r3; - accumulate_recursive (op, first, first+new_size, r0); - accumulate_recursive (op, first+new_size, first+2*new_size, r1); - accumulate_recursive (op, first+2*new_size, first+3*new_size, r2); - accumulate_recursive (op, first+3*new_size, last, r3); - r0 += r1; - r2 += r3; - result = r0 + r2; + VectorizedArray x; + x.load(X+i); + return std::pow(std::abs(x),p); } - } + const Number *X; + RealType p; + }; - // this is the inner working routine for the accumulation loops - // below. This is the standard case where the loop bounds are known. We - // pulled this function out of the regular accumulate routine because we - // might do this thing vectorized (see specialized function below) - template - void - accumulate_regular(const Operation &op, - size_type &n_chunks, - size_type &index, - ResultType (&outer_results)[vector_accumulation_recursion_threshold], - internal::bool2type) - { - // note that each chunk is chosen to have a width of 32, thereby the index - // is incremented by 4*8 for each @p i. - for (size_type i=0; i + struct MeanValue + { + static const bool vectorizes = VectorizedArray::n_array_elements > 1; + + MeanValue(const Number *X) + : + X(X) + {} + + Number + operator() (const size_type i) const { - ResultType r0 = op(index); - ResultType r1 = op(index+1); - ResultType r2 = op(index+2); - ResultType r3 = op(index+3); - index += 4; - for (size_type j=1; j<8; ++j, index += 4) - { - r0 += op(index); - r1 += op(index+1); - r2 += op(index+2); - r3 += op(index+3); - } - r0 += r1; - r2 += r3; - outer_results[i] = r0 + r2; + return X[i]; } - } + VectorizedArray + do_vectorized(const size_type i) const + { + VectorizedArray x; + x.load(X+i); + return x; + } + const Number *X; + }; - // this is the inner working routine for the accumulation loops - // below. This is the specialized case where the loop bounds are known and - // where we can vectorize. In that case, we request the 'do_vectorized' - // routine of the operation instead of the regular one which does several - // operations at once. - template - void - accumulate_regular(const Operation &op, - size_type &n_chunks, - size_type &index, - Number (&outer_results)[vector_accumulation_recursion_threshold], - internal::bool2type) - { - // we start from @p index and workout @p n_chunks each of size 32. - // in order employ SIMD and work on @p nvecs at a time, we split this - // loop yet again: - // First we work on (n_chunks/nvecs) chunks, where each chunk processes - // nvecs*(4*8) elements. - - const unsigned int nvecs = VectorizedArray::n_array_elements; - const size_type regular_chunks = n_chunks/nvecs; - for (size_type i=0; i + struct AddAndDot + { + static const bool vectorizes = VectorizedArray::n_array_elements > 1; + + AddAndDot(Number *X, const Number *V, const Number *W, Number a) + : + X(X), + V(V), + W(W), + a(a) + {} + + Number + operator() (const size_type i) const { - VectorizedArray r0 = op.do_vectorized(index); - VectorizedArray r1 = op.do_vectorized(index+nvecs); - VectorizedArray r2 = op.do_vectorized(index+2*nvecs); - VectorizedArray r3 = op.do_vectorized(index+3*nvecs); - index += nvecs*4; - for (size_type j=1; j<8; ++j, index += nvecs*4) - { - r0 += op.do_vectorized(index); - r1 += op.do_vectorized(index+nvecs); - r2 += op.do_vectorized(index+2*nvecs); - r3 += op.do_vectorized(index+3*nvecs); - } - r0 += r1; - r2 += r3; - r0 += r2; - r0.store(&outer_results[i*VectorizedArray::n_array_elements]); + X[i] += a * V[i]; + return X[i] * Number(numbers::NumberTraits::conjugate(W[i])); } - // If we are treating a case where the vector length is not divisible by - // the vectorization length, need a cleanup loop - // The remaining chunks are processed one by one starting from regular_chunks * nvecs; - // We do as much as possible with 2 SIMD operations within each chunk. - // Here we assume that nvecs < 32/2 = 16 as well as 16%nvecs==0. - AssertIndexRange(VectorizedArray::n_array_elements, - 17); - Assert (16 % nvecs == 0, - ExcInternalError()); - if (n_chunks % VectorizedArray::n_array_elements != 0) + VectorizedArray + do_vectorized(const size_type i) const { - VectorizedArray r0 = VectorizedArray(), - r1 = VectorizedArray(); - const size_type start_irreg = regular_chunks * nvecs; - for (size_type c=start_irreg; c::n_array_elements; + VectorizedArray x, w, v; + x.load(X+i); + v.load(V+i); + x += a * v; + x.store(X+i); + // may only load from W after storing in X because the pointers might + // point to the same memory + w.load(W+i); + return x * w; } - } + Number *X; + const Number *V, *W; + Number a; + }; -#ifdef DEAL_II_WITH_THREADS - /** - * This struct takes the loop range from the tbb parallel for loop and - * translates it to the actual ranges of the reduction loop inside the - * vector. It encodes the grain size but might choose larger values of - * chunks than the minimum grain size. The minimum grain size given to tbb - * is 1. For affinity reasons, the layout in this loop must be kept in sync - * with the respective class for plain for loops further up. - * - * Due to this construction, TBB usually only sees a loop of length - * 4*num_threads with grain size 1. The actual ranges inside the vector are - * computed outside of TBB because otherwise TBB would split the ranges in - * some unpredictable position which destroys exact bitwise - * reproducibility. An important part of this is that inside - * TBBReduceFunctor::operator() the recursive calls to accumulate are done - * sequentially on one item a time (even though we could directly run it on - * the whole range given through the tbb::blocked_range times the chunk size - * - but that would be unpredictable). Thus, the values we cannot control - * are the positions in the array that gets filled - but up to that point - * the algorithm TBB sees is just a parallel for and nothing unpredictable - * can happen. - * - * To sum up: Once the number of threads and the vector size are fixed, we - * have an exact layout of how the calls into the recursive function will - * happen. Inside the recursive function, we again only depend on the - * length. Finally, the concurrent threads write into different positions in - * a result vector in a thread-safe way and the addition in the short array - * is again serial. - */ - template - struct TBBReduceFunctor - { - static const unsigned int threshold_array_allocate = 512; - TBBReduceFunctor(const Operation &op, - const size_type vec_size) - : - op(op), - vec_size(vec_size) + // this is the main working loop for all vector sums using the templated + // operation above. it accumulates the sums using a block-wise summation + // algorithm with post-update. this blocked algorithm has been proposed in + // a similar form by Castaldo, Whaley and Chronopoulos (SIAM + // J. Sci. Comput. 31, 1156-1174, 2008) and we use the smallest possible + // block size, 2. Sometimes it is referred to as pairwise summation. The + // worst case error made by this algorithm is on the order O(eps * + // log2(vec_size)), whereas a naive summation is O(eps * vec_size). Even + // though the Kahan summation is even more accurate with an error O(eps) + // by carrying along remainders not captured by the main sum, that involves + // additional costs which are not worthwhile. See the Wikipedia article on + // the Kahan summation algorithm. + + // The algorithm implemented here has the additional benefit that it is + // easily parallelized without changing the order of how the elements are + // added (floating point addition is not associative). For the same vector + // size and minimum_parallel_grainsize, the blocks are always the + // same and added pairwise. + + // The depth of recursion is controlled by the 'magic' parameter + // vector_accumulation_recursion_threshold: If the length is below + // vector_accumulation_recursion_threshold * 32 (32 is the part of code we + // unroll), a straight loop instead of recursion will be used. At the + // innermost level, eight values are added consecutively in order to better + // balance multiplications and additions. + + // Loops are unrolled as follows: the range [first,last) is broken into + // @p n_chunks each of size 32 plus the @p remainder. + // accumulate_regular() does the work on 32*n_chunks elements employing SIMD + // if possible and stores the result of the operation for each chunk in @p outer_results. + + // The code returns the result as the last argument in order to make + // spawning tasks simpler and use automatic template deduction. + + + /** + * The minimum number of chunks (each of size 32) to divide the range + * [first,last) into two (second part of the if branch in accumulate_recursive). + */ + const unsigned int vector_accumulation_recursion_threshold = 128; + + template + void accumulate_recursive (const Operation &op, + const size_type first, + const size_type last, + ResultType &result) { - // set chunk size for sub-tasks - const unsigned int gs = internal::Vector::minimum_parallel_grain_size; - n_chunks = std::min(static_cast(4*MultithreadInfo::n_threads()), - vec_size / gs); - chunk_size = vec_size / n_chunks; - - // round to next multiple of 512 (or leave it at the minimum grain size - // if that happens to be smaller). this is advantageous because our - // algorithm favors lengths of a power of 2 due to pairwise summation -> - // at most one 'oddly' sized chunk - if (chunk_size > 512) - chunk_size = ((chunk_size + 511)/512)*512; - n_chunks = (vec_size + chunk_size - 1) / chunk_size; - AssertIndexRange((n_chunks-1)*chunk_size, vec_size); - AssertIndexRange(vec_size, n_chunks*chunk_size+1); - - if (n_chunks > threshold_array_allocate) + const size_type vec_size = last - first; + if (vec_size <= vector_accumulation_recursion_threshold * 32) { - // make sure we allocate an even number of elements, - // access to the new last element is needed in do_sum() - large_array.resize(2*((n_chunks+1)/2)); - array_ptr = &large_array[0]; + // the vector is short enough so we perform the summation. first + // work on the regular part. The innermost 32 values are expanded in + // order to obtain known loop bounds for most of the work. + size_type index = first; + ResultType outer_results [vector_accumulation_recursion_threshold]; + + // set the zeroth element to zero to correctly handle the case where + // vec_size == 0 + outer_results[0] = ResultType(); + + // the variable serves two purposes: (i) number of chunks (each 32 indices) + // for the given size; all results are stored in outer_results[0,n_chunks) + // (ii) in the SIMD case n_chunks is also a next free index in outer_results[] + // to which we can write after accumulate_regular() is executed. + size_type n_chunks = vec_size / 32; + const size_type remainder = vec_size % 32; + Assert (remainder == 0 || n_chunks < vector_accumulation_recursion_threshold, + ExcInternalError()); + + // Select between the regular version and vectorized version based + // on the number types we are given. To choose the vectorized + // version often enough, we need to have all tasks but the last one + // to be divisible by the vectorization length + accumulate_regular(op, n_chunks, index, outer_results, + internal::bool2type()); + + // now work on the remainder, i.e., the last up to 32 values. Use + // switch statement with fall-through to work on these values. + if (remainder > 0) + { + // if we got here, it means that (vec_size <= vector_accumulation_recursion_threshold * 32), + // which is to say that the domain can be split into n_chunks <= vector_accumulation_recursion_threshold: + AssertIndexRange(n_chunks, vector_accumulation_recursion_threshold+1); + // split the remainder into chunks of 8, there could be up to 3 + // such chunks since remainder < 32. + // Work on those chunks without any SIMD, that is we call op(index). + const size_type inner_chunks = remainder / 8; + Assert (inner_chunks <= 3, ExcInternalError()); + const size_type remainder_inner = remainder % 8; + ResultType r0 = ResultType(), r1 = ResultType(), + r2 = ResultType(); + switch (inner_chunks) + { + case 3: + r2 = op(index++); + for (size_type j=1; j<8; ++j) + r2 += op(index++); + // no break + case 2: + r1 = op(index++); + for (size_type j=1; j<8; ++j) + r1 += op(index++); + r1 += r2; + // no break + case 1: + r2 = op(index++); + for (size_type j=1; j<8; ++j) + r2 += op(index++); + // no break + default: + for (size_type j=0; j 1) + { + if (n_chunks % 2 == 1) + outer_results[n_chunks++] = ResultType(); + for (size_type i=0; i &range) const + + // this is the inner working routine for the accumulation loops + // below. This is the standard case where the loop bounds are known. We + // pulled this function out of the regular accumulate routine because we + // might do this thing vectorized (see specialized function below) + template + void + accumulate_regular(const Operation &op, + size_type &n_chunks, + size_type &index, + ResultType (&outer_results)[vector_accumulation_recursion_threshold], + internal::bool2type) { - for (size_type i = range.begin(); i < range.end(); ++i) - accumulate_recursive(op, i*chunk_size, std::min((i+1)*chunk_size, vec_size), - array_ptr[i]); + // note that each chunk is chosen to have a width of 32, thereby the index + // is incremented by 4*8 for each @p i. + for (size_type i=0; i + void + accumulate_regular(const Operation &op, + size_type &n_chunks, + size_type &index, + Number (&outer_results)[vector_accumulation_recursion_threshold], + internal::bool2type) { - while (n_chunks > 1) + // we start from @p index and workout @p n_chunks each of size 32. + // in order employ SIMD and work on @p nvecs at a time, we split this + // loop yet again: + // First we work on (n_chunks/nvecs) chunks, where each chunk processes + // nvecs*(4*8) elements. + + const unsigned int nvecs = VectorizedArray::n_array_elements; + const size_type regular_chunks = n_chunks/nvecs; + for (size_type i=0; i r0 = op.do_vectorized(index); + VectorizedArray r1 = op.do_vectorized(index+nvecs); + VectorizedArray r2 = op.do_vectorized(index+2*nvecs); + VectorizedArray r3 = op.do_vectorized(index+3*nvecs); + index += nvecs*4; + for (size_type j=1; j<8; ++j, index += nvecs*4) + { + r0 += op.do_vectorized(index); + r1 += op.do_vectorized(index+nvecs); + r2 += op.do_vectorized(index+2*nvecs); + r3 += op.do_vectorized(index+3*nvecs); + } + r0 += r1; + r2 += r3; + r0 += r2; + r0.store(&outer_results[i*VectorizedArray::n_array_elements]); } - return array_ptr[0]; - } - const Operation &op; - const size_type vec_size; - - mutable unsigned int n_chunks; - unsigned int chunk_size; - ResultType small_array [threshold_array_allocate]; - std::vector large_array; - // this variable either points to small_array or large_array depending on - // the number of threads we want to feed - mutable ResultType *array_ptr; - }; -#endif + // If we are treating a case where the vector length is not divisible by + // the vectorization length, need a cleanup loop + // The remaining chunks are processed one by one starting from regular_chunks * nvecs; + // We do as much as possible with 2 SIMD operations within each chunk. + // Here we assume that nvecs < 32/2 = 16 as well as 16%nvecs==0. + AssertIndexRange(VectorizedArray::n_array_elements, + 17); + Assert (16 % nvecs == 0, + ExcInternalError()); + if (n_chunks % VectorizedArray::n_array_elements != 0) + { + VectorizedArray r0 = VectorizedArray(), + r1 = VectorizedArray(); + const size_type start_irreg = regular_chunks * nvecs; + for (size_type c=start_irreg; c::n_array_elements; + } + } - /** - * This is the general caller for parallel reduction operations that work in - * parallel. - */ - template - void parallel_reduce (const Operation &op, - const size_type vec_size, - ResultType &result, - std_cxx11::shared_ptr &partitioner) - { #ifdef DEAL_II_WITH_THREADS - // only go to the parallel function in case there are at least 4 parallel - // items, otherwise the overhead is too large - if (vec_size >= 4*internal::Vector::minimum_parallel_grain_size && - MultithreadInfo::n_threads() > 1) + /** + * This struct takes the loop range from the tbb parallel for loop and + * translates it to the actual ranges of the reduction loop inside the + * vector. It encodes the grain size but might choose larger values of + * chunks than the minimum grain size. The minimum grain size given to tbb + * is 1. For affinity reasons, the layout in this loop must be kept in sync + * with the respective class for plain for loops further up. + * + * Due to this construction, TBB usually only sees a loop of length + * 4*num_threads with grain size 1. The actual ranges inside the vector are + * computed outside of TBB because otherwise TBB would split the ranges in + * some unpredictable position which destroys exact bitwise + * reproducibility. An important part of this is that inside + * TBBReduceFunctor::operator() the recursive calls to accumulate are done + * sequentially on one item a time (even though we could directly run it on + * the whole range given through the tbb::blocked_range times the chunk size + * - but that would be unpredictable). Thus, the values we cannot control + * are the positions in the array that gets filled - but up to that point + * the algorithm TBB sees is just a parallel for and nothing unpredictable + * can happen. + * + * To sum up: Once the number of threads and the vector size are fixed, we + * have an exact layout of how the calls into the recursive function will + * happen. Inside the recursive function, we again only depend on the + * length. Finally, the concurrent threads write into different positions in + * a result vector in a thread-safe way and the addition in the short array + * is again serial. + */ + template + struct TBBReduceFunctor + { + static const unsigned int threshold_array_allocate = 512; + + TBBReduceFunctor(const Operation &op, + const size_type vec_size) + : + op(op), + vec_size(vec_size) + { + // set chunk size for sub-tasks + const unsigned int gs = internal::Vector::minimum_parallel_grain_size; + n_chunks = std::min(static_cast(4*MultithreadInfo::n_threads()), + vec_size / gs); + chunk_size = vec_size / n_chunks; + + // round to next multiple of 512 (or leave it at the minimum grain size + // if that happens to be smaller). this is advantageous because our + // algorithm favors lengths of a power of 2 due to pairwise summation -> + // at most one 'oddly' sized chunk + if (chunk_size > 512) + chunk_size = ((chunk_size + 511)/512)*512; + n_chunks = (vec_size + chunk_size - 1) / chunk_size; + AssertIndexRange((n_chunks-1)*chunk_size, vec_size); + AssertIndexRange(vec_size, n_chunks*chunk_size+1); + + if (n_chunks > threshold_array_allocate) + { + // make sure we allocate an even number of elements, + // access to the new last element is needed in do_sum() + large_array.resize(2*((n_chunks+1)/2)); + array_ptr = &large_array[0]; + } + else + array_ptr = &small_array[0]; + }; + + /** + * An operator used by TBB to work on a given @p range of chunks + * [range.begin(), range.end()). + */ + void operator() (const tbb::blocked_range &range) const { - Assert(partitioner.get() != NULL, - ExcInternalError("Unexpected initialization of Vector that does " - "not set the TBB partitioner to a usable state.")); - std_cxx11::shared_ptr tbb_partitioner = - partitioner->acquire_one_partitioner(); - - TBBReduceFunctor generic_functor(op, vec_size); - tbb::parallel_for (tbb::blocked_range (0, - generic_functor.n_chunks, - 1), - generic_functor, - *tbb_partitioner); - partitioner->release_one_partitioner(tbb_partitioner); - result = generic_functor.do_sum(); + for (size_type i = range.begin(); i < range.end(); ++i) + accumulate_recursive(op, i*chunk_size, std::min((i+1)*chunk_size, vec_size), + array_ptr[i]); } - else - accumulate_recursive(op,0,vec_size,result); + + ResultType do_sum() const + { + while (n_chunks > 1) + { + if (n_chunks % 2 == 1) + array_ptr[n_chunks++] = ResultType(); + for (size_type i=0; i large_array; + // this variable either points to small_array or large_array depending on + // the number of threads we want to feed + mutable ResultType *array_ptr; + }; +#endif + + + + /** + * This is the general caller for parallel reduction operations that work in + * parallel. + */ + template + void parallel_reduce (const Operation &op, + const size_type vec_size, + ResultType &result, + std_cxx11::shared_ptr &partitioner) + { +#ifdef DEAL_II_WITH_THREADS + // only go to the parallel function in case there are at least 4 parallel + // items, otherwise the overhead is too large + if (vec_size >= 4*internal::Vector::minimum_parallel_grain_size && + MultithreadInfo::n_threads() > 1) + { + Assert(partitioner.get() != NULL, + ExcInternalError("Unexpected initialization of Vector that does " + "not set the TBB partitioner to a usable state.")); + std_cxx11::shared_ptr tbb_partitioner = + partitioner->acquire_one_partitioner(); + + TBBReduceFunctor generic_functor(op, vec_size); + tbb::parallel_for (tbb::blocked_range (0, + generic_functor.n_chunks, + 1), + generic_functor, + *tbb_partitioner); + partitioner->release_one_partitioner(tbb_partitioner); + result = generic_functor.do_sum(); + } + else + accumulate_recursive(op,0,vec_size,result); #else - accumulate_recursive(op,0,vec_size,result); - (void)partitioner; + accumulate_recursive(op,0,vec_size,result); + (void)partitioner; #endif + } } }