]> https://gitweb.dealii.org/ - dealii.git/commitdiff
extend internal::VectorOperations::parallel_reduce() to [start,end)
authorDenis Davydov <davydden@gmail.com>
Tue, 14 Mar 2017 15:35:40 +0000 (16:35 +0100)
committerDenis Davydov <davydden@gmail.com>
Tue, 14 Mar 2017 15:42:11 +0000 (16:42 +0100)
doc/news/changes/minor/20170314DenisDavydov [new file with mode: 0644]
include/deal.II/lac/vector_operations_internal.h
tests/lac/vector_operations_parallel_reduce_start_end.cc [new file with mode: 0644]
tests/lac/vector_operations_parallel_reduce_start_end.output [new file with mode: 0644]

diff --git a/doc/news/changes/minor/20170314DenisDavydov b/doc/news/changes/minor/20170314DenisDavydov
new file mode 100644 (file)
index 0000000..8da4911
--- /dev/null
@@ -0,0 +1,4 @@
+New: extend internal::VectorOperations::parallel_for() and
+internal::VectorOperations::parallel_reduce() to [start,end).
+<br>
+(Denis Davydov, 2017/03/14)
index 82e3e2fce43f41924e7d8d965cfc621447bfc61d..8b52d1db4109fb66e2968b3ff278bcc13dab3ea5 100644 (file)
@@ -1171,11 +1171,14 @@ namespace internal
       static const unsigned int threshold_array_allocate = 512;
 
       TBBReduceFunctor(const Operation   &op,
-                       const size_type    vec_size)
+                       const size_type    start,
+                       const size_type    end)
         :
         op(op),
-        vec_size(vec_size)
+        start(start),
+        end(end)
       {
+        const size_type vec_size = end-start;
         // set chunk size for sub-tasks
         const unsigned int gs = internal::Vector::minimum_parallel_grain_size;
         n_chunks = std::min(static_cast<size_type>(4*MultithreadInfo::n_threads()),
@@ -1210,7 +1213,7 @@ namespace internal
       void operator() (const tbb::blocked_range<size_type> &range) const
       {
         for (size_type i = range.begin(); i < range.end(); ++i)
-          accumulate_recursive(op, i*chunk_size, std::min((i+1)*chunk_size, vec_size),
+          accumulate_recursive(op, start+i*chunk_size, std::min(start+(i+1)*chunk_size, end),
                                array_ptr[i]);
       }
 
@@ -1228,7 +1231,8 @@ namespace internal
       }
 
       const Operation &op;
-      const size_type vec_size;
+      const size_type start;
+      const size_type end;
 
       mutable unsigned int n_chunks;
       unsigned int chunk_size;
@@ -1248,11 +1252,13 @@ namespace internal
      */
     template <typename Operation, typename ResultType>
     void parallel_reduce (const Operation   &op,
-                          const size_type    vec_size,
+                          const size_type    end,
                           ResultType        &result,
-                          std_cxx11::shared_ptr<parallel::internal::TBBPartitioner> &partitioner)
+                          std_cxx11::shared_ptr<parallel::internal::TBBPartitioner> &partitioner,
+                          const size_type start = 0)
     {
 #ifdef DEAL_II_WITH_THREADS
+      size_type vec_size = end-start;
       // only go to the parallel function in case there are at least 4 parallel
       // items, otherwise the overhead is too large
       if (vec_size >= 4*internal::Vector::minimum_parallel_grain_size &&
@@ -1264,7 +1270,7 @@ namespace internal
           std_cxx11::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
             partitioner->acquire_one_partitioner();
 
-          TBBReduceFunctor<Operation,ResultType> generic_functor(op, vec_size);
+          TBBReduceFunctor<Operation,ResultType> generic_functor(op, start, end);
           tbb::parallel_for (tbb::blocked_range<size_type> (0,
                                                             generic_functor.n_chunks,
                                                             1),
@@ -1274,9 +1280,9 @@ namespace internal
           result = generic_functor.do_sum();
         }
       else
-        accumulate_recursive(op,0,vec_size,result);
+        accumulate_recursive(op,start,end,result);
 #else
-      accumulate_recursive(op,0,vec_size,result);
+      accumulate_recursive(op,start,end,result);
       (void)partitioner;
 #endif
     }
diff --git a/tests/lac/vector_operations_parallel_reduce_start_end.cc b/tests/lac/vector_operations_parallel_reduce_start_end.cc
new file mode 100644 (file)
index 0000000..054ba9d
--- /dev/null
@@ -0,0 +1,90 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2012 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// check that internal::VectorOperations::parallel_reduce works for start-end
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/lac/vector_operations_internal.h>
+#include <cmath>
+#include <fstream>
+#include <iomanip>
+
+
+
+
+template <typename Number>
+void check ()
+{
+  for (unsigned int test=0; test<5; ++test)
+    {
+      const unsigned int size = 17 + test*1101;
+
+      std_cxx11::shared_ptr< ::dealii::parallel::internal::TBBPartitioner> thread_loop_partitioner;
+      thread_loop_partitioner.reset(new ::dealii::parallel::internal::TBBPartitioner());
+
+      Number *val;
+      Utilities::System::posix_memalign ((void **)&val, 64, sizeof(Number)*size);
+
+      for (unsigned int i = 0; i < size; ++i)
+        val[i] = (double)rand()/RAND_MAX;
+
+
+      internal::VectorOperations::MeanValue<Number> mean(val);
+
+      Number sum_direct = 0.;
+      internal::VectorOperations::parallel_reduce (mean, size, sum_direct,
+                                                   thread_loop_partitioner);
+
+      Number sum = 0.;
+      // now break the size in chunks
+      const unsigned int n_chunks = 3;
+      const unsigned int chunk_size = size / n_chunks;
+      for (unsigned int i = 0; i <= n_chunks; ++i)
+        {
+          const unsigned int begin = i*chunk_size;
+          const unsigned int end   = std::min((i+1)*chunk_size, size);
+
+          Number sum_i = 0.;
+          internal::VectorOperations::parallel_reduce (mean, end, sum_i,
+                                                       thread_loop_partitioner,
+                                                       begin);
+          sum+= sum_i;
+        }
+
+      // check values:
+      AssertThrow(std::fabs(sum-sum_direct) < 1e-6*sum_direct,
+                  ExcInternalError());
+
+      free(val);
+    }
+}
+
+
+int main()
+{
+  std::ofstream logfile("output");
+  deallog << std::fixed;
+  deallog << std::setprecision(2);
+  deallog.attach(logfile);
+  deallog.threshold_double(1.e-10);
+
+  check<float>();
+  check<double>();
+  check<long double>();
+  deallog << "OK" << std::endl;
+}
diff --git a/tests/lac/vector_operations_parallel_reduce_start_end.output b/tests/lac/vector_operations_parallel_reduce_start_end.output
new file mode 100644 (file)
index 0000000..0fd8fc1
--- /dev/null
@@ -0,0 +1,2 @@
+
+DEAL::OK

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.