From: Denis Davydov Date: Tue, 14 Mar 2017 15:35:40 +0000 (+0100) Subject: extend internal::VectorOperations::parallel_reduce() to [start,end) X-Git-Tag: v8.5.0-rc1~40^2~2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=2b9153ca53e4e41aaf8c2d807f60e4adf2d2fce6;p=dealii.git extend internal::VectorOperations::parallel_reduce() to [start,end) --- diff --git a/doc/news/changes/minor/20170314DenisDavydov b/doc/news/changes/minor/20170314DenisDavydov new file mode 100644 index 0000000000..8da4911c99 --- /dev/null +++ b/doc/news/changes/minor/20170314DenisDavydov @@ -0,0 +1,4 @@ +New: extend internal::VectorOperations::parallel_for() and +internal::VectorOperations::parallel_reduce() to [start,end). +
+(Denis Davydov, 2017/03/14) diff --git a/include/deal.II/lac/vector_operations_internal.h b/include/deal.II/lac/vector_operations_internal.h index 82e3e2fce4..8b52d1db41 100644 --- a/include/deal.II/lac/vector_operations_internal.h +++ b/include/deal.II/lac/vector_operations_internal.h @@ -1171,11 +1171,14 @@ namespace internal static const unsigned int threshold_array_allocate = 512; TBBReduceFunctor(const Operation &op, - const size_type vec_size) + const size_type start, + const size_type end) : op(op), - vec_size(vec_size) + start(start), + end(end) { + const size_type vec_size = end-start; // set chunk size for sub-tasks const unsigned int gs = internal::Vector::minimum_parallel_grain_size; n_chunks = std::min(static_cast(4*MultithreadInfo::n_threads()), @@ -1210,7 +1213,7 @@ namespace internal void operator() (const tbb::blocked_range &range) const { for (size_type i = range.begin(); i < range.end(); ++i) - accumulate_recursive(op, i*chunk_size, std::min((i+1)*chunk_size, vec_size), + accumulate_recursive(op, start+i*chunk_size, std::min(start+(i+1)*chunk_size, end), array_ptr[i]); } @@ -1228,7 +1231,8 @@ namespace internal } const Operation &op; - const size_type vec_size; + const size_type start; + const size_type end; mutable unsigned int n_chunks; unsigned int chunk_size; @@ -1248,11 +1252,13 @@ namespace internal */ template void parallel_reduce (const Operation &op, - const size_type vec_size, + const size_type end, ResultType &result, - std_cxx11::shared_ptr &partitioner) + std_cxx11::shared_ptr &partitioner, + const size_type start = 0) { #ifdef DEAL_II_WITH_THREADS + size_type vec_size = end-start; // only go to the parallel function in case there are at least 4 parallel // items, otherwise the overhead is too large if (vec_size >= 4*internal::Vector::minimum_parallel_grain_size && @@ -1264,7 +1270,7 @@ namespace internal std_cxx11::shared_ptr tbb_partitioner = partitioner->acquire_one_partitioner(); - TBBReduceFunctor generic_functor(op, vec_size); + TBBReduceFunctor generic_functor(op, start, end); tbb::parallel_for (tbb::blocked_range (0, generic_functor.n_chunks, 1), @@ -1274,9 +1280,9 @@ namespace internal result = generic_functor.do_sum(); } else - accumulate_recursive(op,0,vec_size,result); + accumulate_recursive(op,start,end,result); #else - accumulate_recursive(op,0,vec_size,result); + accumulate_recursive(op,start,end,result); (void)partitioner; #endif } diff --git a/tests/lac/vector_operations_parallel_reduce_start_end.cc b/tests/lac/vector_operations_parallel_reduce_start_end.cc new file mode 100644 index 0000000000..054ba9d46c --- /dev/null +++ b/tests/lac/vector_operations_parallel_reduce_start_end.cc @@ -0,0 +1,90 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2012 - 2015 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// check that internal::VectorOperations::parallel_reduce works for start-end + +#include "../tests.h" +#include +#include +#include +#include +#include +#include + + + + +template +void check () +{ + for (unsigned int test=0; test<5; ++test) + { + const unsigned int size = 17 + test*1101; + + std_cxx11::shared_ptr< ::dealii::parallel::internal::TBBPartitioner> thread_loop_partitioner; + thread_loop_partitioner.reset(new ::dealii::parallel::internal::TBBPartitioner()); + + Number *val; + Utilities::System::posix_memalign ((void **)&val, 64, sizeof(Number)*size); + + for (unsigned int i = 0; i < size; ++i) + val[i] = (double)rand()/RAND_MAX; + + + internal::VectorOperations::MeanValue mean(val); + + Number sum_direct = 0.; + internal::VectorOperations::parallel_reduce (mean, size, sum_direct, + thread_loop_partitioner); + + Number sum = 0.; + // now break the size in chunks + const unsigned int n_chunks = 3; + const unsigned int chunk_size = size / n_chunks; + for (unsigned int i = 0; i <= n_chunks; ++i) + { + const unsigned int begin = i*chunk_size; + const unsigned int end = std::min((i+1)*chunk_size, size); + + Number sum_i = 0.; + internal::VectorOperations::parallel_reduce (mean, end, sum_i, + thread_loop_partitioner, + begin); + sum+= sum_i; + } + + // check values: + AssertThrow(std::fabs(sum-sum_direct) < 1e-6*sum_direct, + ExcInternalError()); + + free(val); + } +} + + +int main() +{ + std::ofstream logfile("output"); + deallog << std::fixed; + deallog << std::setprecision(2); + deallog.attach(logfile); + deallog.threshold_double(1.e-10); + + check(); + check(); + check(); + deallog << "OK" << std::endl; +} diff --git a/tests/lac/vector_operations_parallel_reduce_start_end.output b/tests/lac/vector_operations_parallel_reduce_start_end.output new file mode 100644 index 0000000000..0fd8fc12f0 --- /dev/null +++ b/tests/lac/vector_operations_parallel_reduce_start_end.output @@ -0,0 +1,2 @@ + +DEAL::OK