From cc42c003d7f5a32981e5b1aca2d2121d11355fa3 Mon Sep 17 00:00:00 2001
From: Peter Munch <peterrmuench@gmail.com>
Date: Thu, 27 May 2021 23:52:28 +0200
Subject: [PATCH] Implement Utilities::MPI::reduce()

---
 doc/news/changes/minor/20210527Munch   |   4 +
 include/deal.II/base/mpi.h             |  19 +++++
 include/deal.II/base/mpi.templates.h   |  49 +++++++++---
 source/base/mpi.inst.in                |  12 +++
 tests/mpi/allreduce_01.cc              |  15 ++++
 tests/mpi/allreduce_01.mpirun=1.output |   4 +
 tests/mpi/allreduce_01.mpirun=5.output | 100 +++++++++++++++++++++++++
 7 files changed, 193 insertions(+), 10 deletions(-)
 create mode 100644 doc/news/changes/minor/20210527Munch

diff --git a/doc/news/changes/minor/20210527Munch b/doc/news/changes/minor/20210527Munch
new file mode 100644
index 0000000000..1d5ed59886
--- /dev/null
+++ b/doc/news/changes/minor/20210527Munch
@@ -0,0 +1,4 @@
+New: The new function Utilities::MPI::reduce() allows to reduce 
+arbitrary types with a user-specified binary operation.
+<br>
+(Peter Munch, 2021/05/27)
diff --git a/include/deal.II/base/mpi.h b/include/deal.II/base/mpi.h
index d0f416a5a3..978041f762 100644
--- a/include/deal.II/base/mpi.h
+++ b/include/deal.II/base/mpi.h
@@ -1124,6 +1124,25 @@ namespace Utilities
               const T &          object_to_send,
               const unsigned int root_process = 0);
 
+    /**
+     * A function that combines values @p local_value from all processes
+     * via a user-specified binary operation @p combiner on the @p root_process.
+     * As such this function is similar to MPI_Reduce (and
+     * Utilities::MPI::min/max()): however on the one hand due to the
+     * user-specified binary operation it is slower for built-in types but
+     * on the other hand general object types, including ones that store
+     * variable amounts of data, can be handled.
+     *
+     * In contrast to all_reduce, the result will be only available on a
+     * single rank. On all other processes, the returned value is undefined.
+     */
+    template <typename T>
+    T
+    reduce(const T &                                     local_value,
+           const MPI_Comm &                              comm,
+           const std::function<T(const T &, const T &)> &combiner,
+           const unsigned int                            root_process = 0);
+
     /**
      * A function that combines values @p local_value from all processes
      * via a user-specified binary operation @p combiner and distributes the
diff --git a/include/deal.II/base/mpi.templates.h b/include/deal.II/base/mpi.templates.h
index 449cd90313..8f7413ab1f 100644
--- a/include/deal.II/base/mpi.templates.h
+++ b/include/deal.II/base/mpi.templates.h
@@ -530,12 +530,13 @@ namespace Utilities
 
     template <typename T>
     T
-    all_reduce(const T &                                     vec,
-               const MPI_Comm &                              comm,
-               const std::function<T(const T &, const T &)> &combiner)
+    reduce(const T &                                     vec,
+           const MPI_Comm &                              comm,
+           const std::function<T(const T &, const T &)> &combiner,
+           const unsigned int                            root_process)
     {
 #ifdef DEAL_II_WITH_MPI
-      if (job_supports_mpi())
+      if (job_supports_mpi() && n_mpi_processes(comm) > 1)
         {
           // 1) perform custom reduction
           T result = vec;
@@ -545,13 +546,18 @@ namespace Utilities
 
           for (unsigned int stride = 1; stride < nproc; stride *= 2)
             {
-              const unsigned int rank_recv =
-                (2 * stride) * (rank / (2 * stride));
-              const unsigned int rank_send = rank_recv + stride;
+              unsigned int rank_recv =
+                (2 * stride) *
+                  ((rank + nproc - root_process) % nproc / (2 * stride)) +
+                root_process;
+              unsigned int rank_send = rank_recv + stride;
 
-              if (rank_send >= nproc) // nothing to do
+              if (rank_send >= nproc + root_process) // nothing to do
                 continue;
 
+              rank_recv = rank_recv % nproc;
+              rank_send = rank_send % nproc;
+
               if (rank_recv == rank) // process receives data
                 {
                   MPI_Status status;
@@ -592,16 +598,39 @@ namespace Utilities
                 }
             }
 
-          // 2) broadcast result
-          return Utilities::MPI::broadcast(comm, result);
+          if (rank == root_process)
+            return result;
+          else
+            return {};
         }
 #endif
       (void)comm;
       (void)combiner;
+      (void)root_process;
       return vec;
     }
 
 
+
+    template <typename T>
+    T
+    all_reduce(const T &                                     vec,
+               const MPI_Comm &                              comm,
+               const std::function<T(const T &, const T &)> &combiner)
+    {
+      if (job_supports_mpi() && n_mpi_processes(comm) > 1)
+        {
+          // 1) perform reduction
+          const auto result = reduce(vec, comm, combiner);
+
+          // 2) broadcast result
+          return Utilities::MPI::broadcast(comm, result);
+        }
+      else
+        return vec;
+    }
+
+
     template <typename T>
     std::vector<T>
     compute_set_union(const std::vector<T> &vec, const MPI_Comm &comm)
diff --git a/source/base/mpi.inst.in b/source/base/mpi.inst.in
index ff1625a185..0ba1696696 100644
--- a/source/base/mpi.inst.in
+++ b/source/base/mpi.inst.in
@@ -67,6 +67,18 @@ for (S : MPI_SCALARS)
                          const MPI_Comm &,
                          const ArrayView<S> &);
 
+    template S reduce(const S &                                     vec,
+                      const MPI_Comm &                              comm,
+                      const std::function<S(const S &, const S &)> &process,
+                      const unsigned int root_process);
+
+    template std::vector<S> reduce(
+      const std::vector<S> &                                       vec,
+      const MPI_Comm &                                             comm,
+      const std::function<std::vector<S>(const std::vector<S> &,
+                                         const std::vector<S> &)> &process,
+      const unsigned int root_process);
+
     template S all_reduce(
       const S &                                     vec,
       const MPI_Comm &                              comm,
diff --git a/tests/mpi/allreduce_01.cc b/tests/mpi/allreduce_01.cc
index 46880fcef3..fcd5001af5 100644
--- a/tests/mpi/allreduce_01.cc
+++ b/tests/mpi/allreduce_01.cc
@@ -35,6 +35,21 @@ check(const std::function<std::vector<T>(const std::vector<T> &,
   for (const auto r : result)
     deallog << r << " ";
   deallog << std::endl;
+
+  for (unsigned int rank = 0;
+       rank < Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+       ++rank)
+    {
+      const auto result = Utilities::MPI::reduce(
+        std::vector<T>{Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)},
+        MPI_COMM_WORLD,
+        fu,
+        rank);
+
+      for (const auto r : result)
+        deallog << r << " ";
+      deallog << std::endl;
+    }
 }
 
 
diff --git a/tests/mpi/allreduce_01.mpirun=1.output b/tests/mpi/allreduce_01.mpirun=1.output
index 8a9e3e0734..1a0972453d 100644
--- a/tests/mpi/allreduce_01.mpirun=1.output
+++ b/tests/mpi/allreduce_01.mpirun=1.output
@@ -3,3 +3,7 @@ DEAL:0::0
 DEAL:0::0 
 DEAL:0::0 
 DEAL:0::0 
+DEAL:0::0 
+DEAL:0::0 
+DEAL:0::0 
+DEAL:0::0 
diff --git a/tests/mpi/allreduce_01.mpirun=5.output b/tests/mpi/allreduce_01.mpirun=5.output
index 0b8dac6bdf..bce26c9605 100644
--- a/tests/mpi/allreduce_01.mpirun=5.output
+++ b/tests/mpi/allreduce_01.mpirun=5.output
@@ -1,29 +1,129 @@
 
 DEAL:0::0 
+DEAL:0::0 
+DEAL:0::
+DEAL:0::
+DEAL:0::
+DEAL:0::
+DEAL:0::4 
 DEAL:0::4 
+DEAL:0::
+DEAL:0::
+DEAL:0::
+DEAL:0::
 DEAL:0::10 
+DEAL:0::10 
+DEAL:0::
+DEAL:0::
+DEAL:0::
+DEAL:0::
+DEAL:0::0 1 2 3 4 
 DEAL:0::0 1 2 3 4 
+DEAL:0::
+DEAL:0::
+DEAL:0::
+DEAL:0::
 
 DEAL:1::0 
+DEAL:1::
+DEAL:1::0 
+DEAL:1::
+DEAL:1::
+DEAL:1::
+DEAL:1::4 
+DEAL:1::
 DEAL:1::4 
+DEAL:1::
+DEAL:1::
+DEAL:1::
 DEAL:1::10 
+DEAL:1::
+DEAL:1::10 
+DEAL:1::
+DEAL:1::
+DEAL:1::
 DEAL:1::0 1 2 3 4 
+DEAL:1::
+DEAL:1::1 2 3 4 0 
+DEAL:1::
+DEAL:1::
+DEAL:1::
 
 
 DEAL:2::0 
+DEAL:2::
+DEAL:2::
+DEAL:2::0 
+DEAL:2::
+DEAL:2::
 DEAL:2::4 
+DEAL:2::
+DEAL:2::
+DEAL:2::4 
+DEAL:2::
+DEAL:2::
+DEAL:2::10 
+DEAL:2::
+DEAL:2::
 DEAL:2::10 
+DEAL:2::
+DEAL:2::
 DEAL:2::0 1 2 3 4 
+DEAL:2::
+DEAL:2::
+DEAL:2::2 3 4 0 1 
+DEAL:2::
+DEAL:2::
 
 
 DEAL:3::0 
+DEAL:3::
+DEAL:3::
+DEAL:3::
+DEAL:3::0 
+DEAL:3::
+DEAL:3::4 
+DEAL:3::
+DEAL:3::
+DEAL:3::
 DEAL:3::4 
+DEAL:3::
 DEAL:3::10 
+DEAL:3::
+DEAL:3::
+DEAL:3::
+DEAL:3::10 
+DEAL:3::
 DEAL:3::0 1 2 3 4 
+DEAL:3::
+DEAL:3::
+DEAL:3::
+DEAL:3::3 4 0 1 2 
+DEAL:3::
 
 
+DEAL:4::0 
+DEAL:4::
+DEAL:4::
+DEAL:4::
+DEAL:4::
 DEAL:4::0 
 DEAL:4::4 
+DEAL:4::
+DEAL:4::
+DEAL:4::
+DEAL:4::
+DEAL:4::4 
+DEAL:4::10 
+DEAL:4::
+DEAL:4::
+DEAL:4::
+DEAL:4::
 DEAL:4::10 
 DEAL:4::0 1 2 3 4 
+DEAL:4::
+DEAL:4::
+DEAL:4::
+DEAL:4::
+DEAL:4::4 0 1 2 3 
 
-- 
2.39.5