]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Generalize ComputeIndexOwner::Dictionary so that it can handle gaps in the local... 9032/head
authorPeter Munch <peterrmuench@gmail.com>
Sun, 10 Nov 2019 14:55:30 +0000 (15:55 +0100)
committerPeter Munch <peterrmuench@gmail.com>
Tue, 19 Nov 2019 21:55:38 +0000 (22:55 +0100)
include/deal.II/base/mpi_compute_index_owner_internal.h
source/base/partitioner.cc
tests/mpi/compute_index_owner_01.cc [new file with mode: 0644]
tests/mpi/compute_index_owner_01.mpirun=2.output [new file with mode: 0644]
tests/mpi/parallel_partitioner_09.cc [new file with mode: 0644]
tests/mpi/parallel_partitioner_09.mpirun=3.output [new file with mode: 0644]

index a0aaeca124398fd4d115db838cfa5a427765c893..ba3b542c25625b3fa9eec0179964bfdf5be7fd9d 100644 (file)
@@ -34,6 +34,120 @@ namespace Utilities
        */
       namespace ComputeIndexOwner
       {
+        /**
+         * Specialization of ConsensusAlgorithmProcess for setting up the
+         * Dictionary even if there are ranges in the IndexSet space not owned
+         * by any processes.
+         *
+         * @note Only for internal usage.
+         */
+        class DictionaryPayLoad
+          : public ConsensusAlgorithmProcess<
+              std::pair<types::global_dof_index, types::global_dof_index>,
+              unsigned int>
+        {
+        public:
+          /**
+           * Constructor.
+           */
+          DictionaryPayLoad(
+            const std::map<unsigned int,
+                           std::vector<std::pair<types::global_dof_index,
+                                                 types::global_dof_index>>>
+              &                        buffers,
+            std::vector<unsigned int> &actually_owning_ranks,
+            const std::pair<types::global_dof_index, types::global_dof_index>
+              &                        local_range,
+            std::vector<unsigned int> &actually_owning_rank_list)
+            : buffers(buffers)
+            , actually_owning_ranks(actually_owning_ranks)
+            , local_range(local_range)
+            , actually_owning_rank_list(actually_owning_rank_list)
+          {}
+
+          /**
+           * Implementation of
+           * Utilities::MPI::ConsensusAlgorithmProcess::compute_targets().
+           */
+          virtual std::vector<unsigned int>
+          compute_targets() override
+          {
+            std::vector<unsigned int> targets;
+            for (const auto &rank_pair : buffers)
+              targets.push_back(rank_pair.first);
+
+            return targets;
+          }
+
+          /**
+           * Implementation of
+           * Utilities::MPI::ConsensusAlgorithmProcess::pack_recv_buffer().
+           */
+          virtual void
+          pack_recv_buffer(const int other_rank,
+                           std::vector<std::pair<types::global_dof_index,
+                                                 types::global_dof_index>>
+                             &send_buffer) override
+          {
+            send_buffer = this->buffers.at(other_rank);
+          }
+
+          /**
+           * Implementation of
+           * Utilities::MPI::ConsensusAlgorithmProcess::process_request().
+           */
+          virtual void
+          process_request(
+            const unsigned int                                     other_rank,
+            const std::vector<std::pair<types::global_dof_index,
+                                        types::global_dof_index>> &buffer_recv,
+            std::vector<unsigned int> &request_buffer) override
+          {
+            (void)request_buffer; // not needed
+
+
+            // process message: loop over all intervals
+            for (auto interval : buffer_recv)
+              {
+#ifdef DEBUG
+                for (types::global_dof_index i = interval.first;
+                     i < interval.second;
+                     i++)
+                  Assert(actually_owning_ranks[i - local_range.first] ==
+                           numbers::invalid_unsigned_int,
+                         ExcInternalError());
+                Assert(interval.first >= local_range.first &&
+                         interval.first < local_range.second,
+                       ExcInternalError());
+                Assert(interval.second > local_range.first &&
+                         interval.second <= local_range.second,
+                       ExcInternalError());
+#endif
+                std::fill(actually_owning_ranks.data() + interval.first -
+                            local_range.first,
+                          actually_owning_ranks.data() + interval.second -
+                            local_range.first,
+                          other_rank);
+              }
+            actually_owning_rank_list.push_back(other_rank);
+          }
+
+        private:
+          const std::map<unsigned int,
+                         std::vector<std::pair<types::global_dof_index,
+                                               types::global_dof_index>>>
+            &buffers;
+
+          std::vector<unsigned int> &actually_owning_ranks;
+
+          const std::pair<types::global_dof_index, types::global_dof_index>
+            &local_range;
+
+          std::vector<unsigned int> &actually_owning_rank_list;
+        };
+
+
+
         /**
          * Dictionary class with basic partitioning in terms of a single
          * interval of fixed size known to all MPI ranks for two-stage index
@@ -121,6 +235,10 @@ namespace Utilities
                                            types::global_dof_index>>>
               buffers;
 
+            std::fill(actually_owning_ranks.begin(),
+                      actually_owning_ranks.end(),
+                      numbers::invalid_subdomain_id);
+
             // 2) collect relevant processes and process local dict entries
             for (auto interval = owned_indices.begin_intervals();
                  interval != owned_indices.end_intervals();
@@ -180,89 +298,120 @@ namespace Utilities
                   }
               }
 
-            // protect the following communication steps using a mutex:
-            static CollectiveMutex      mutex;
-            CollectiveMutex::ScopedLock lock(mutex, comm);
-
-            const int mpi_tag =
-              Utilities::MPI::internal::Tags::dictionary_reinit;
-
             n_dict_procs_in_owned_indices = buffers.size();
             std::vector<MPI_Request> request;
-            request.reserve(n_dict_procs_in_owned_indices);
 
-            // 3) send messages with local dofs to the right dict process
-            for (const auto &rank_pair : buffers)
+            // Check if index set space is partitioned globally without gaps.
+            if (Utilities::MPI::sum(owned_indices.n_elements(), comm) ==
+                owned_indices.size())
               {
-                request.push_back(MPI_Request());
-                const int ierr = MPI_Isend(rank_pair.second.data(),
-                                           rank_pair.second.size() * 2,
-                                           DEAL_II_DOF_INDEX_MPI_TYPE,
-                                           rank_pair.first,
-                                           mpi_tag,
-                                           comm,
-                                           &request.back());
-                AssertThrowMPI(ierr);
-              }
+                // no gaps: setup is simple! Processes send their locally owned
+                // indices to the dictionary. The dictionary stores the sending
+                // rank for each index. The dictionary knows exactly
+                // when it is set up when all indices it is responsible for
+                // have been processed.
 
-            // 4) receive messages until all dofs in dict are processed
-            while (this->local_size != dic_local_received)
-              {
-                // wait for an incoming message
-                MPI_Status status;
-                auto ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
-                AssertThrowMPI(ierr);
+                request.reserve(n_dict_procs_in_owned_indices);
 
-                // retrieve size of incoming message
-                int number_amount;
-                ierr = MPI_Get_count(&status,
-                                     DEAL_II_DOF_INDEX_MPI_TYPE,
-                                     &number_amount);
-                AssertThrowMPI(ierr);
+                // protect the following communication steps using a mutex:
+                static CollectiveMutex      mutex;
+                CollectiveMutex::ScopedLock lock(mutex, comm);
 
-                const auto other_rank = status.MPI_SOURCE;
-                actually_owning_rank_list.push_back(other_rank);
+                const int mpi_tag =
+                  Utilities::MPI::internal::Tags::dictionary_reinit;
 
-                // receive message
-                Assert(number_amount % 2 == 0, ExcInternalError());
-                std::vector<
-                  std::pair<types::global_dof_index, types::global_dof_index>>
-                  buffer(number_amount / 2);
-                ierr = MPI_Recv(buffer.data(),
-                                number_amount,
-                                DEAL_II_DOF_INDEX_MPI_TYPE,
-                                status.MPI_SOURCE,
-                                status.MPI_TAG,
-                                comm,
-                                &status);
-                AssertThrowMPI(ierr);
 
-                // process message: loop over all intervals
-                for (auto interval : buffer)
+                // 3) send messages with local dofs to the right dict process
+                for (const auto &rank_pair : buffers)
+                  {
+                    request.push_back(MPI_Request());
+                    const int ierr = MPI_Isend(rank_pair.second.data(),
+                                               rank_pair.second.size() * 2,
+                                               DEAL_II_DOF_INDEX_MPI_TYPE,
+                                               rank_pair.first,
+                                               mpi_tag,
+                                               comm,
+                                               &request.back());
+                    AssertThrowMPI(ierr);
+                  }
+
+                // 4) receive messages until all dofs in dict are processed
+                while (this->local_size != dic_local_received)
                   {
+                    // wait for an incoming message
+                    MPI_Status status;
+                    auto       ierr =
+                      MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
+                    AssertThrowMPI(ierr);
+
+                    // retrieve size of incoming message
+                    int number_amount;
+                    ierr = MPI_Get_count(&status,
+                                         DEAL_II_DOF_INDEX_MPI_TYPE,
+                                         &number_amount);
+                    AssertThrowMPI(ierr);
+
+                    const auto other_rank = status.MPI_SOURCE;
+                    actually_owning_rank_list.push_back(other_rank);
+
+                    // receive message
+                    Assert(number_amount % 2 == 0, ExcInternalError());
+                    std::vector<std::pair<types::global_dof_index,
+                                          types::global_dof_index>>
+                      buffer(number_amount / 2);
+                    ierr = MPI_Recv(buffer.data(),
+                                    number_amount,
+                                    DEAL_II_DOF_INDEX_MPI_TYPE,
+                                    status.MPI_SOURCE,
+                                    status.MPI_TAG,
+                                    comm,
+                                    MPI_STATUS_IGNORE);
+                    AssertThrowMPI(ierr);
+                    // process message: loop over all intervals
+                    for (auto interval : buffer)
+                      {
 #  ifdef DEBUG
-                    for (types::global_dof_index i = interval.first;
-                         i < interval.second;
-                         i++)
-                      Assert(actually_owning_ranks[i - local_range.first] ==
-                               numbers::invalid_unsigned_int,
-                             ExcInternalError());
-                    Assert(interval.first >= local_range.first &&
-                             interval.first < local_range.second,
-                           ExcInternalError());
-                    Assert(interval.second > local_range.first &&
-                             interval.second <= local_range.second,
-                           ExcInternalError());
+                        for (types::global_dof_index i = interval.first;
+                             i < interval.second;
+                             i++)
+                          Assert(actually_owning_ranks[i - local_range.first] ==
+                                   numbers::invalid_unsigned_int,
+                                 ExcInternalError());
+                        Assert(interval.first >= local_range.first &&
+                                 interval.first < local_range.second,
+                               ExcInternalError());
+                        Assert(interval.second > local_range.first &&
+                                 interval.second <= local_range.second,
+                               ExcInternalError());
 #  endif
 
-                    std::fill(actually_owning_ranks.data() + interval.first -
-                                local_range.first,
-                              actually_owning_ranks.data() + interval.second -
-                                local_range.first,
-                              other_rank);
-                    dic_local_received += interval.second - interval.first;
+                        std::fill(actually_owning_ranks.data() +
+                                    interval.first - local_range.first,
+                                  actually_owning_ranks.data() +
+                                    interval.second - local_range.first,
+                                  other_rank);
+                        dic_local_received += interval.second - interval.first;
+                      }
                   }
               }
+            else
+              {
+                // with gap: use ConsensusAlgorithm to determine when all
+                // dictionaries have been set up.
+
+                // 3/4) use ConsensusAlgorithm to send messages with local dofs
+                // to the right dict process
+                DictionaryPayLoad temp(buffers,
+                                       actually_owning_ranks,
+                                       local_range,
+                                       actually_owning_rank_list);
+
+                ConsensusAlgorithmSelector<
+                  std::pair<types::global_dof_index, types::global_dof_index>,
+                  unsigned int>
+                  consensus_algo(temp, comm);
+                consensus_algo.run();
+              }
 
             std::sort(actually_owning_rank_list.begin(),
                       actually_owning_rank_list.end());
index 9d987a1ef13a0000e010753ac0e7f1a0477e8c4b..6f7a778dbc249b5f68b218fca330514b13371ea2 100644 (file)
@@ -194,8 +194,10 @@ namespace Utilities
         }
 
       types::global_dof_index my_size = local_size();
-      // Allow non-zero start index for the vector. send this data to all
-      // processors
+
+      // Allow non-zero start index for the vector. Part 1:
+      // Assume for now that the index set of rank 0 starts with 0
+      // and therefore has an increased size.
       if (my_pid == 0)
         my_size += local_range_data.first;
 
@@ -209,7 +211,18 @@ namespace Utilities
                                     communicator);
         AssertThrowMPI(ierr);
       }
-      if (my_shift != local_range_data.first)
+
+      // Allow non-zero start index for the vector. Part 2:
+      // We correct the assumption made above and let the
+      // index set of rank 0 actually start from the
+      // correct value, i.e. we correct the shift to
+      // its start.
+      if (my_pid == 0)
+        my_shift = local_range_data.first;
+
+      // Fix the index start in case the index set could not give us that
+      // information.
+      if (local_range_data.first == 0 && my_shift != 0)
         {
           const types::global_dof_index old_local_size = local_size();
           local_range_data.first                       = my_shift;
diff --git a/tests/mpi/compute_index_owner_01.cc b/tests/mpi/compute_index_owner_01.cc
new file mode 100644 (file)
index 0000000..031f39a
--- /dev/null
@@ -0,0 +1,102 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2010 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+// Test that ComputeIndexOwner can handle IndexSets with gaps (also
+// in the context of Vectors and Partitioners)
+
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/mpi_compute_index_owner_internal.h>
+#include <deal.II/base/partitioner.h>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+  MPI_Comm comm = MPI_COMM_WORLD;
+
+
+  unsigned int myid    = Utilities::MPI::this_mpi_process(comm);
+  unsigned int numproc = Utilities::MPI::n_mpi_processes(comm);
+
+  unsigned int offset = 1;
+  unsigned int size   = 1;
+
+  const unsigned int global_size = (numproc + offset) * size;
+
+  // locals
+  IndexSet local_owned(global_size);
+  local_owned.add_range((myid + offset) * size, (myid + offset + 1) * size);
+
+  // ghosts
+  IndexSet local_relevant(global_size);
+  local_relevant.add_range(((myid + 1) % numproc + offset) * size,
+                           ((myid + 1) % numproc + offset + 1) * size);
+
+  deallog << "local" << std::endl;
+  local_owned.print(deallog);
+  deallog << "ghost" << std::endl;
+  local_relevant.print(deallog);
+
+  {
+    std::vector<unsigned int> owning_ranks_of_ghosts(
+      local_relevant.n_elements());
+
+    Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmPayload
+      process(local_owned, local_relevant, comm, owning_ranks_of_ghosts, true);
+
+    Utilities::MPI::ConsensusAlgorithmSelector<
+      std::pair<types::global_dof_index, types::global_dof_index>,
+      unsigned int>
+      consensus_algorithm(process, comm);
+    consensus_algorithm.run();
+
+    deallog << "owning_ranks_of_ghosts:" << std::endl;
+    for (auto i : owning_ranks_of_ghosts)
+      deallog << i << " ";
+    deallog << std::endl;
+
+    deallog << "requesters:" << std::endl;
+    std::map<unsigned int, IndexSet> import_data = process.get_requesters();
+    for (const auto &m : import_data)
+      {
+        deallog << m.first << ": ";
+        m.second.print(deallog);
+        deallog << std::endl;
+      }
+  }
+
+
+  {
+    Utilities::MPI::Partitioner v(local_owned, local_relevant, comm);
+
+    for (unsigned int i = 0; i < v.ghost_targets().size(); ++i)
+      for (unsigned int j = 0; j < v.ghost_targets()[i].second; j++)
+        deallog << v.ghost_targets()[i].first << std::endl;
+  }
+}
+
+
+int
+main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+  MPILogInitAll                    all;
+
+  test();
+}
diff --git a/tests/mpi/compute_index_owner_01.mpirun=2.output b/tests/mpi/compute_index_owner_01.mpirun=2.output
new file mode 100644 (file)
index 0000000..c4eb51c
--- /dev/null
@@ -0,0 +1,23 @@
+
+DEAL:0::local
+DEAL:0::{1}
+DEAL:0::ghost
+DEAL:0::{2}
+DEAL:0::owning_ranks_of_ghosts:
+DEAL:0::1 
+DEAL:0::requesters:
+DEAL:0::1: {1}
+DEAL:0::
+DEAL:0::1
+
+DEAL:1::local
+DEAL:1::{2}
+DEAL:1::ghost
+DEAL:1::{1}
+DEAL:1::owning_ranks_of_ghosts:
+DEAL:1::0 
+DEAL:1::requesters:
+DEAL:1::0: {2}
+DEAL:1::
+DEAL:1::0
+
diff --git a/tests/mpi/parallel_partitioner_09.cc b/tests/mpi/parallel_partitioner_09.cc
new file mode 100644 (file)
index 0000000..d1dd815
--- /dev/null
@@ -0,0 +1,54 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// test MPI::Partitioner setup for the case the owned indices are not onto,
+// i.e., we skip some index (related test: parallel_vector_16 and
+// compute_index_owner_01)
+
+#include <deal.II/base/partitioner.h>
+
+#include "../tests.h"
+
+int
+main(int argc, char **argv)
+{
+  using namespace dealii;
+
+  Utilities::MPI::MPI_InitFinalize init(argc, argv, 1);
+
+  MPILogInitAll log;
+
+  IndexSet owned(Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD) + 1);
+  owned.add_index(Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) + 1);
+
+  IndexSet ghosted(owned.size());
+  ghosted.add_index(1 + (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) + 1) %
+                          Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD));
+
+  Utilities::MPI::Partitioner part(owned, ghosted, MPI_COMM_WORLD);
+
+  deallog << "ghost targets: ";
+  for (const auto p : part.ghost_targets())
+    deallog << "p" << p.first << " n_indices=" << p.second << "   ";
+  deallog << std::endl;
+  deallog << "import targets: ";
+  for (const auto p : part.import_targets())
+    deallog << "p" << p.first << " n_indices=" << p.second << "   ";
+  deallog << std::endl;
+  deallog << "import indices: ";
+  for (const auto p : part.import_indices())
+    deallog << "[" << p.first << " " << p.second << ") ";
+  deallog << std::endl;
+}
diff --git a/tests/mpi/parallel_partitioner_09.mpirun=3.output b/tests/mpi/parallel_partitioner_09.mpirun=3.output
new file mode 100644 (file)
index 0000000..a51ac04
--- /dev/null
@@ -0,0 +1,14 @@
+
+DEAL:0::ghost targets: p1 n_indices=1   
+DEAL:0::import targets: p2 n_indices=1   
+DEAL:0::import indices: [0 1) 
+
+DEAL:1::ghost targets: p2 n_indices=1   
+DEAL:1::import targets: p0 n_indices=1   
+DEAL:1::import indices: [0 1) 
+
+
+DEAL:2::ghost targets: p0 n_indices=1   
+DEAL:2::import targets: p1 n_indices=1   
+DEAL:2::import indices: [0 1) 
+

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.