]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Introduce Utilities::MPI::allreduce 11759/head
authorPeter Munch <peterrmuench@gmail.com>
Tue, 16 Feb 2021 09:34:30 +0000 (10:34 +0100)
committerPeter Munch <peterrmuench@gmail.com>
Sat, 27 Feb 2021 19:22:24 +0000 (20:22 +0100)
include/deal.II/base/mpi.h
include/deal.II/base/mpi.templates.h
source/base/mpi.inst.in
tests/mpi/allreduce_01.cc [new file with mode: 0644]
tests/mpi/allreduce_01.mpirun=1.output [new file with mode: 0644]
tests/mpi/allreduce_01.mpirun=5.output [new file with mode: 0644]

index b138d3433fc915c5a4bd0be8cba053268c62c2fd..d673e2cf8f172f42f38bfe375e7564444f847ba3 100644 (file)
@@ -1008,6 +1008,21 @@ namespace Utilities
            const T &          object_to_send,
            const unsigned int root_process = 0);
 
+    /**
+     * A function that combines values @p local_value from all processes
+     * via a user-specified binary operation @p combiner and distributes the
+     * result back to all processes. As such this function is similar to
+     * MPI_Allreduce (if it were implemented by a global reduction followed
+     * by a broadcast step) but due to the user-specified binary operation also
+     * general object types, including ones that store variable amounts of data,
+     * can be handled.
+     */
+    template <typename T>
+    T
+    all_reduce(const T &                                     local_value,
+               const MPI_Comm &                              comm,
+               const std::function<T(const T &, const T &)> &combiner);
+
     /**
      * Given a partitioned index set space, compute the owning MPI process rank
      * of each element of a second index set according to the partitioned index
index e30dee3e3d91287ad8a98570ea38a2c6a7b00a2e..fa3cf3a31d433cc163a30699fd127c6fcd6368d2 100644 (file)
@@ -457,80 +457,98 @@ namespace Utilities
 
 
     template <typename T>
-    std::vector<T>
-    compute_set_union(const std::vector<T> &vec, const MPI_Comm &comm)
+    T
+    all_reduce(const T &                                     vec,
+               const MPI_Comm &                              comm,
+               const std::function<T(const T &, const T &)> &combiner)
     {
 #ifdef DEAL_II_WITH_MPI
-      // 1) collect vector entries and create union
-      std::vector<T> result = vec;
-
-      const unsigned int rank  = this_mpi_process(comm);
-      const unsigned int nproc = n_mpi_processes(comm);
-
-      for (unsigned int stride = 1; stride < nproc; stride *= 2)
+      if (job_supports_mpi())
         {
-          const unsigned int rank_recv = (2 * stride) * (rank / (2 * stride));
-          const unsigned int rank_send = rank_recv + stride;
+          // 1) perform custom reduction
+          T result = vec;
 
-          if (rank_send >= nproc) // nothing to do
-            continue;
+          const unsigned int rank  = this_mpi_process(comm);
+          const unsigned int nproc = n_mpi_processes(comm);
 
-          if (rank_recv == rank) // process receives data
+          for (unsigned int stride = 1; stride < nproc; stride *= 2)
             {
-              MPI_Status status;
-              const auto ierr_1 = MPI_Probe(rank_send,
-                                            internal::Tags::compute_union,
-                                            comm,
-                                            &status);
-              AssertThrowMPI(ierr_1);
-
-              int        amount;
-              const auto ierr_2 =
-                MPI_Get_count(&status,
-                              internal::mpi_type_id(result.data()),
-                              &amount);
-              AssertThrowMPI(ierr_2);
-
-              const unsigned int old_size = result.size();
-              result.resize(old_size + amount);
-
-              const auto ierr_3 = MPI_Recv(result.data() + old_size,
-                                           amount,
-                                           internal::mpi_type_id(result.data()),
-                                           status.MPI_SOURCE,
-                                           status.MPI_TAG,
-                                           comm,
-                                           MPI_STATUS_IGNORE);
-              AssertThrowMPI(ierr_3);
-
-              std::sort(result.begin(), result.end());
-              result.erase(std::unique(result.begin(), result.end()),
-                           result.end());
+              const unsigned int rank_recv =
+                (2 * stride) * (rank / (2 * stride));
+              const unsigned int rank_send = rank_recv + stride;
+
+              if (rank_send >= nproc) // nothing to do
+                continue;
+
+              if (rank_recv == rank) // process receives data
+                {
+                  MPI_Status status;
+                  const auto ierr_1 = MPI_Probe(rank_send,
+                                                internal::Tags::compute_union,
+                                                comm,
+                                                &status);
+                  AssertThrowMPI(ierr_1);
+
+                  int        amount;
+                  const auto ierr_2 = MPI_Get_count(&status, MPI_CHAR, &amount);
+                  AssertThrowMPI(ierr_2);
+
+                  std::vector<char> temp(amount);
+
+                  const auto ierr_3 = MPI_Recv(temp.data(),
+                                               amount,
+                                               MPI_CHAR,
+                                               status.MPI_SOURCE,
+                                               status.MPI_TAG,
+                                               comm,
+                                               MPI_STATUS_IGNORE);
+                  AssertThrowMPI(ierr_3);
+
+                  result = combiner(result, Utilities::unpack<T>(temp, false));
+                }
+              else if (rank_send == rank) // process sends data
+                {
+                  const std::vector<char> temp = Utilities::pack(result, false);
+
+                  const auto ierr = MPI_Send(temp.data(),
+                                             temp.size(),
+                                             MPI_CHAR,
+                                             rank_recv,
+                                             internal::Tags::compute_union,
+                                             comm);
+                  AssertThrowMPI(ierr);
+                }
             }
-          else if (rank_send == rank) // process sends data
-            {
-              const auto ierr = MPI_Send(result.data(),
-                                         result.size(),
-                                         internal::mpi_type_id(result.data()),
-                                         rank_recv,
-                                         internal::Tags::compute_union,
-                                         comm);
-              AssertThrowMPI(ierr);
-            }
-        }
 
-      // 2) broadcast result
-      int size = result.size();
-      MPI_Bcast(&size, 1, MPI_INT, 0, comm);
-      result.resize(size);
-      MPI_Bcast(
-        result.data(), size, internal::mpi_type_id(vec.data()), 0, comm);
+          // 2) broadcast result
+          std::vector<char> temp = Utilities::pack(result, false);
+          unsigned int      size = temp.size();
+          MPI_Bcast(&size, 1, MPI_UNSIGNED, 0, comm);
+          temp.resize(size);
+          MPI_Bcast(temp.data(), size, MPI_CHAR, 0, comm);
 
-      return result;
-#else
+          return Utilities::unpack<T>(temp, false);
+        }
+#endif
       (void)comm;
+      (void)combiner;
       return vec;
-#endif
+    }
+
+
+    template <typename T>
+    std::vector<T>
+    compute_set_union(const std::vector<T> &vec, const MPI_Comm &comm)
+    {
+      return Utilities::MPI::all_reduce<std::vector<T>>(
+        vec, comm, [](const auto &set_1, const auto &set_2) {
+          // merge vectors, sort, and eliminate duplicates -> mimic std::set<T>
+          auto result = set_1;
+          result.insert(result.end(), set_2.begin(), set_2.end());
+          std::sort(result.begin(), result.end());
+          result.erase(std::unique(result.begin(), result.end()), result.end());
+          return result;
+        });
     }
 
 
index 9e1de666fec55599a8ad88251989843e4b5ffbd0..0b59283041db101a69ca347721ae5f9dbd3226f0 100644 (file)
@@ -67,6 +67,17 @@ for (S : MPI_SCALARS)
                          const MPI_Comm &,
                          const ArrayView<S> &);
 
+    template S all_reduce(
+      const S &                                     vec,
+      const MPI_Comm &                              comm,
+      const std::function<S(const S &, const S &)> &process);
+
+    template std::vector<S> all_reduce(
+      const std::vector<S> &                                       vec,
+      const MPI_Comm &                                             comm,
+      const std::function<std::vector<S>(const std::vector<S> &,
+                                         const std::vector<S> &)> &process);
+
     // The fixed-length array (i.e., things declared like T(&values)[N])
     // versions of the functions above live in the header file mpi.h since the
     // length (N) is a compile-time constant. Those functions all call
diff --git a/tests/mpi/allreduce_01.cc b/tests/mpi/allreduce_01.cc
new file mode 100644 (file)
index 0000000..46880fc
--- /dev/null
@@ -0,0 +1,68 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2021 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+// Test Utilities::MPI::all_reduce().
+
+#include <deal.II/base/mpi.h>
+
+#include "../tests.h"
+
+
+template <typename T>
+void
+check(const std::function<std::vector<T>(const std::vector<T> &,
+                                         const std::vector<T> &)> &fu)
+{
+  const auto result = Utilities::MPI::all_reduce(
+    std::vector<T>{Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)},
+    MPI_COMM_WORLD,
+    fu);
+
+  for (const auto r : result)
+    deallog << r << " ";
+  deallog << std::endl;
+}
+
+
+int
+main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+  MPILogInitAll                    log;
+
+  // compute min
+  check<unsigned int>([](const auto &a, const auto &b) {
+    return std::vector<unsigned int>{std::min(a[0], b[0])};
+  });
+
+  // compute max
+  check<unsigned int>([](const auto &a, const auto &b) {
+    return std::vector<unsigned int>{std::max(a[0], b[0])};
+  });
+
+  // compute sum
+  check<unsigned int>([](const auto &a, const auto &b) {
+    return std::vector<unsigned int>{a[0] + b[0]};
+  });
+
+  // perform gather all
+  check<unsigned int>([](const auto &a, const auto &b) {
+    auto result = a;
+    result.insert(result.end(), b.begin(), b.end());
+    return result;
+  });
+}
diff --git a/tests/mpi/allreduce_01.mpirun=1.output b/tests/mpi/allreduce_01.mpirun=1.output
new file mode 100644 (file)
index 0000000..8a9e3e0
--- /dev/null
@@ -0,0 +1,5 @@
+
+DEAL:0::0 
+DEAL:0::0 
+DEAL:0::0 
+DEAL:0::0 
diff --git a/tests/mpi/allreduce_01.mpirun=5.output b/tests/mpi/allreduce_01.mpirun=5.output
new file mode 100644 (file)
index 0000000..0b8dac6
--- /dev/null
@@ -0,0 +1,29 @@
+
+DEAL:0::0 
+DEAL:0::4 
+DEAL:0::10 
+DEAL:0::0 1 2 3 4 
+
+DEAL:1::0 
+DEAL:1::4 
+DEAL:1::10 
+DEAL:1::0 1 2 3 4 
+
+
+DEAL:2::0 
+DEAL:2::4 
+DEAL:2::10 
+DEAL:2::0 1 2 3 4 
+
+
+DEAL:3::0 
+DEAL:3::4 
+DEAL:3::10 
+DEAL:3::0 1 2 3 4 
+
+
+DEAL:4::0 
+DEAL:4::4 
+DEAL:4::10 
+DEAL:4::0 1 2 3 4 
+

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.