]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Use MPI in tests
authorPeter Munch <peterrmuench@gmail.com>
Sat, 21 Mar 2020 17:27:53 +0000 (18:27 +0100)
committerPeter Munch <peterrmuench@gmail.com>
Sat, 21 Mar 2020 17:27:53 +0000 (18:27 +0100)
include/deal.II/lac/la_sm_partitioner.h
include/deal.II/lac/la_sm_vector.templates.h
include/deal.II/matrix_free/matrix_free.h
tests/sm/vector_sm_01.cc
tests/sm/vector_sm_01.mpirun=1.output [moved from tests/sm/vector_sm_01.output with 100% similarity]

index 5245b3198ff99cdb70f43be3116b60cad135ec86..02cc66481c8f7ee8921b3f271f2fbf305fdc00c5 100644 (file)
@@ -18,6 +18,7 @@
 
 
 #include <deal.II/base/config.h>
+#include <deal.II/base/mpi_compute_index_owner_internal.h>
 
 #include <deal.II/lac/communication_pattern_base.h>
 
@@ -32,10 +33,12 @@ namespace LinearAlgebra
     class Partitioner : public LinearAlgebra::CommunicationPatternBase
     {
     public:
-      Partitioner(const MPI_Comm &comm, const MPI_Comm &comm_sm)
+      Partitioner(const MPI_Comm &comm, const MPI_Comm &comm_sm, const IndexSet &is_locally_owned, const IndexSet &is_locally_ghost)
         : comm(comm)
         , comm_sm(comm_sm)
-      {}
+      {
+        reinit(is_locally_owned, is_locally_ghost);
+      }
 
       const MPI_Comm &
       get_mpi_communicator() const override
@@ -44,18 +47,210 @@ namespace LinearAlgebra
       }
 
       void
-      reinit(const IndexSet &indexset_has,
-             const IndexSet &indexset_want,
+      reinit(const IndexSet &is_locally_owned,
+             const IndexSet &is_locally_ghost,
              const MPI_Comm &communicator) override
       {
-        (void)indexset_has;
-        (void)indexset_want;
+        Assert(false, ExcNotImplemented());
+        (void)is_locally_owned;
+        (void)is_locally_ghost;
         (void)communicator;
       }
 
+      void
+      reinit(const IndexSet &is_locally_owned,
+             const IndexSet &is_locally_ghost)
+      {
+        this->n_local_elements = is_locally_owned.n_elements();
+
+        std::vector<unsigned int> sm_ranks(
+          Utilities::MPI::n_mpi_processes(comm_sm));
+
+        const unsigned int rank = Utilities::MPI::this_mpi_process(comm);
+
+        MPI_Allgather(
+          &rank, 1, MPI_UNSIGNED, sm_ranks.data(), 1, MPI_UNSIGNED, comm_sm);
+
+        std::vector<unsigned int> owning_ranks_of_ghosts(
+          is_locally_ghost.n_elements());
+
+        Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmsPayload
+          process(is_locally_owned,
+                  is_locally_ghost,
+                  comm,
+                  owning_ranks_of_ghosts,
+                  true);
+
+        Utilities::MPI::ConsensusAlgorithms::Selector<
+          std::pair<types::global_dof_index, types::global_dof_index>,
+          unsigned int>
+          consensus_algorithm(process, comm);
+        consensus_algorithm.run();
+
+        {
+          std::map<unsigned int, std::vector<types::global_dof_index>>
+            rank_to_local_indices;
+
+          for (unsigned int i = 0; i < owning_ranks_of_ghosts.size(); i++)
+            rank_to_local_indices[owning_ranks_of_ghosts[i]].push_back(i);
+
+          unsigned int offset = 0;
+
+
+          for (const auto &rank_and_local_indices : rank_to_local_indices)
+            {
+              const auto ptr = std::find(sm_ranks.begin(),
+                                         sm_ranks.end(),
+                                         rank_and_local_indices.first);
+
+              if (ptr == sm_ranks.end())
+                {
+                  // remote process
+                  recv_remote_ranks.push_back(rank_and_local_indices.first);
+                  recv_remote_ptr.push_back(
+                    recv_remote_ptr.back() +
+                    rank_and_local_indices.second.size());
+                }
+              else
+                {
+                  // shared process
+                  recv_sm_ranks.push_back(std::distance(sm_ranks.begin(), ptr));
+                  recv_sm_ptr.push_back(recv_sm_ptr.back() +
+                                        rank_and_local_indices.second.size());
+                  recv_sm_offset.push_back(is_locally_owned.n_elements() +
+                                           offset);
+                }
+              offset += rank_and_local_indices.second.size();
+            }
+          recv_remote_req.resize(recv_remote_ranks.size());
+          recv_sm_req.resize(recv_sm_ranks.size());
+
+          recv_sm_indices.resize(recv_sm_ptr.back());
+        }
+
+        {
+          const auto rank_to_global_indices = process.get_requesters();
+
+          for (const auto &rank_and_global_indices : rank_to_global_indices)
+            {
+              const auto ptr = std::find(sm_ranks.begin(),
+                                         sm_ranks.end(),
+                                         rank_and_global_indices.first);
+
+              if (ptr == sm_ranks.end())
+                {
+                  // remote process
+                  send_remote_ranks.push_back(rank_and_global_indices.first);
+
+                  for (const auto &i : rank_and_global_indices.second)
+                    send_remote_indices.push_back(
+                      is_locally_owned.index_within_set(i));
+
+                  send_remote_ptr.push_back(send_remote_indices.size());
+                }
+              else
+                {
+                  // shared process
+                  send_sm_ranks.push_back(std::distance(sm_ranks.begin(), ptr));
+
+                  for (const auto &i : rank_and_global_indices.second)
+                    send_sm_indices.push_back(
+                      is_locally_owned.index_within_set(i));
+
+                  send_sm_ptr.push_back(send_sm_indices.size());
+                }
+            }
+          send_remote_req.resize(send_remote_ranks.size());
+          send_sm_req.resize(send_sm_ranks.size());
+        }
+
+        {
+          for (unsigned int i = 0; i < send_sm_ranks.size(); i++)
+            MPI_Isend(send_sm_indices.data() + send_sm_ptr[i],
+                      send_sm_ptr[i + 1] - send_sm_ptr[i],
+                      MPI_UNSIGNED,
+                      send_sm_ranks[i],
+                      2,
+                      comm_sm,
+                      send_sm_req.data() + i);
+
+          for (unsigned int i = 0; i < recv_sm_ranks.size(); i++)
+            MPI_Irecv(recv_sm_indices.data() + recv_sm_ptr[i],
+                      recv_sm_ptr[i + 1] - recv_sm_ptr[i],
+                      MPI_UNSIGNED,
+                      recv_sm_ranks[i],
+                      2,
+                      comm_sm,
+                      recv_sm_req.data() + i);
+
+          MPI_Waitall(recv_sm_req.size(),
+                      recv_sm_req.data(),
+                      MPI_STATUSES_IGNORE);
+          MPI_Waitall(send_sm_req.size(),
+                      send_sm_req.data(),
+                      MPI_STATUSES_IGNORE);
+        }
+
+        {
+          send_sm_offset.resize(send_sm_ranks.size());
+
+          for (unsigned int i = 0; i < send_sm_ranks.size(); i++)
+            MPI_Irecv(send_sm_offset.data() + i,
+                      1,
+                      MPI_UNSIGNED,
+                      send_sm_ranks[i],
+                      3,
+                      comm_sm,
+                      send_sm_req.data() + i);
+
+          for (unsigned int i = 0; i < recv_sm_ranks.size(); i++)
+            MPI_Isend(recv_sm_offset.data() + i,
+                      1,
+                      MPI_UNSIGNED,
+                      recv_sm_ranks[i],
+                      3,
+                      comm_sm,
+                      recv_sm_req.data() + i);
+
+          MPI_Waitall(recv_sm_req.size(),
+                      recv_sm_req.data(),
+                      MPI_STATUSES_IGNORE);
+          MPI_Waitall(send_sm_req.size(),
+                      send_sm_req.data(),
+                      MPI_STATUSES_IGNORE);
+        }
+
+        buffer.resize(send_remote_ptr.back());
+      }
+
     private:
       const MPI_Comm &comm;
       const MPI_Comm &comm_sm;
+      
+      unsigned int n_local_elements;
+
+      AlignedVector<Number> buffer;
+
+      std::vector<unsigned int>            recv_remote_ranks;
+      std::vector<types::global_dof_index> recv_remote_ptr = {0};
+      std::vector<MPI_Request>             recv_remote_req;
+
+      std::vector<unsigned int> recv_sm_ranks;
+      std::vector<unsigned int> recv_sm_ptr = {0};
+      std::vector<MPI_Request>  recv_sm_req;
+      std::vector<unsigned int> recv_sm_indices;
+      std::vector<unsigned int> recv_sm_offset;
+
+      std::vector<unsigned int> send_remote_ranks;
+      std::vector<unsigned int> send_remote_ptr = {0};
+      std::vector<unsigned int> send_remote_indices;
+      std::vector<MPI_Request>  send_remote_req;
+
+      std::vector<unsigned int> send_sm_ranks;
+      std::vector<unsigned int> send_sm_ptr = {0};
+      std::vector<unsigned int> send_sm_indices;
+      std::vector<MPI_Request>  send_sm_req;
+      std::vector<unsigned int> send_sm_offset;
     };
 
   } // end of namespace SharedMPI
index 2e46004f13f75b71b38fd51596c1001ce0ffab52..a55fd3d6afe53fde2939fa7c980841b9cf558a18 100644 (file)
@@ -97,7 +97,8 @@ namespace LinearAlgebra
     void
     Vector<Number, MemorySpaceType>::clear_mpi_requests()
     {
-      Assert(false, ExcNotImplemented());
+      // TODO: what is the use of this?
+      // it should probably delegated to partitioner
     }
 
 
index a3d9078ea1d6c29f1ea73eb8cefeafeaaae6ba42..d8841bad932922e82647dee0491cd6a2faa68048 100644 (file)
@@ -1482,7 +1482,7 @@ public:
   template <typename Number2>
   void
   initialize_dof_vector(LinearAlgebra::SharedMPI::Vector<Number2> &vec,
-                        const MPI_Comm                             comm_sm,
+                        const MPI_Comm &                            comm_sm,
                         const unsigned int dof_handler_index = 0) const;
 
   /**
@@ -2231,17 +2231,19 @@ template <typename Number2>
 inline void
 MatrixFree<dim, Number, VectorizedArrayType>::initialize_dof_vector(
   LinearAlgebra::SharedMPI::Vector<Number2> &vec,
-  const MPI_Comm                             comm_sm,
+  const MPI_Comm  &                           comm_sm,
   const unsigned int                         comp) const
 {
   AssertIndexRange(comp, n_components());
 
+  const auto & part = dof_info[comp].vector_partitioner;
+  
   if (partitioner_sm[comp][comm_sm] == nullptr)
     partitioner_sm[comp][comm_sm] =
       std::make_shared<LinearAlgebra::SharedMPI::Partitioner<Number>>(
-        dof_info[comp].vector_partitioner->get_communicator(), comm_sm);
+        part->get_communicator(), comm_sm, part->locally_owned_range(), part->ghost_indices());
 
-  vec.reinit(dof_info[comp].vector_partitioner, partitioner_sm[comp][comm_sm]);
+  vec.reinit(part, partitioner_sm[comp][comm_sm]);
 }
 
 
index 17d609739178c9320469042a233de4afc51018a2..1506d1064ca16a722011ca720dc1468b405eda1d 100644 (file)
@@ -36,7 +36,7 @@ using namespace dealii;
 
 template <typename Number, int dim>
 void
-test(const int n_refinements, const int degree)
+test(const int n_refinements, const int degree, const int group_size)
 {
   Triangulation<dim> tria;
   GridGenerator::hyper_cube(tria);
@@ -60,16 +60,22 @@ test(const int n_refinements, const int degree)
   dealii::MatrixFree<dim, Number> matrix_free;
   matrix_free.reinit(mapping, dof_handler, constraint, quad, additional_data);
 
+  MPI_Comm comm = MPI_COMM_WORLD;
+  
+  const unsigned int rank = Utilities::MPI::this_mpi_process(comm);
+  
   MPI_Comm comm_sm;
+  MPI_Comm_split(comm, rank / group_size, rank, &comm_sm);
 
   LinearAlgebra::SharedMPI::Vector<Number> vec;
   matrix_free.initialize_dof_vector(vec, comm_sm);
 }
 
 int
-main()
+main(int argc, char *argv[])
 {
-  initlog();
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+  MPILogInitAll                    all;
 
-  test<double, 2>(1, 1);
+  test<double, 2>(1, 1, 1);
 }
\ No newline at end of file

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.