]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Add update_ghost_values and compress
authorPeter Munch <peterrmuench@gmail.com>
Sat, 21 Mar 2020 19:20:55 +0000 (20:20 +0100)
committerPeter Munch <peterrmuench@gmail.com>
Sat, 21 Mar 2020 19:20:55 +0000 (20:20 +0100)
include/deal.II/lac/la_sm_partitioner.h
include/deal.II/lac/la_sm_vector.h
include/deal.II/lac/la_sm_vector.templates.h
include/deal.II/matrix_free/matrix_free.h
tests/sm/vector_sm_01.cc

index aac7cc57e77ecf0009613cfffad35ea6b1fa7d43..2ad2dcea5744cc0daf13d7ed2ab9d14197e03aae 100644 (file)
@@ -232,34 +232,227 @@ namespace LinearAlgebra
         buffer.resize(send_remote_ptr.back());
       }
 
+      void
+      update_ghost_values_start(
+        Number *               data_this,
+        std::vector<Number *> &data_others,
+        const unsigned int     communication_channel = 0) const
+      {
+        (void)data_others;
+
+        int dummy;
+        for (unsigned int i = 0; i < recv_sm_ranks.size(); i++)
+          MPI_Irecv(&dummy,
+                    0,
+                    MPI_INT,
+                    recv_sm_ranks[i],
+                    communication_channel + 1,
+                    comm_sm,
+                    recv_sm_req.data() + i);
+
+        for (unsigned int i = 0; i < send_sm_ranks.size(); i++)
+          MPI_Isend(&dummy,
+                    0,
+                    MPI_INT,
+                    send_sm_ranks[i],
+                    communication_channel + 1,
+                    comm_sm,
+                    send_sm_req.data() + i);
+
+        for (unsigned int i = 0; i < recv_remote_ranks.size(); i++)
+          MPI_Irecv(data_this + recv_remote_ptr[i] + n_local_elements,
+                    recv_remote_ptr[i + 1] - recv_remote_ptr[i],
+                    Utilities::MPI::internal::mpi_type_id(data_this),
+                    recv_remote_ranks[i],
+                    communication_channel + 0,
+                    comm,
+                    recv_remote_req.data() + i);
+
+        for (unsigned int i = 0; i < send_remote_ranks.size(); i++)
+          {
+            for (unsigned int j = send_remote_ptr[i];
+                 j < send_remote_ptr[i + 1];
+                 j++)
+              buffer[j] = data_this[send_remote_indices[j]];
+
+            MPI_Isend(buffer.data() + send_remote_ptr[i],
+                      send_remote_ptr[i + 1] - send_remote_ptr[i],
+                      Utilities::MPI::internal::mpi_type_id(data_this),
+                      send_remote_ranks[i],
+                      communication_channel + 0,
+                      comm,
+                      send_remote_req.data() + i);
+          }
+      }
+
+      void
+      update_ghost_values_finish(Number *               data_this,
+                                 std::vector<Number *> &data_others) const
+      {
+        for (unsigned int c = 0; c < recv_sm_ranks.size(); c++)
+          {
+            int i;
+            MPI_Waitany(recv_sm_req.size(),
+                        recv_sm_req.data(),
+                        &i,
+                        MPI_STATUS_IGNORE);
+
+            const Number *__restrict__ data_others_ptr =
+              data_others[recv_sm_ranks[i]];
+            Number *__restrict__ data_this_ptr = data_this;
+
+            for (unsigned int j = recv_sm_ptr[i], k = recv_sm_offset[i];
+                 j < recv_sm_ptr[i + 1];
+                 j++, k++)
+              data_this_ptr[k] = data_others_ptr[recv_sm_indices[j]];
+          }
+
+        MPI_Waitall(send_sm_req.size(),
+                    send_sm_req.data(),
+                    MPI_STATUSES_IGNORE);
+        MPI_Waitall(send_remote_req.size(),
+                    send_remote_req.data(),
+                    MPI_STATUSES_IGNORE);
+        MPI_Waitall(recv_remote_req.size(),
+                    recv_remote_req.data(),
+                    MPI_STATUSES_IGNORE);
+      }
+
+      void
+      update_ghost_values(Number *               data_this,
+                          std::vector<Number *> &data_others) const
+      {
+        update_ghost_values_start(data_this, data_others);
+        update_ghost_values_finish(data_this, data_others);
+      }
+
+      void
+      compress_start(Number *               data_this,
+                     std::vector<Number *> &data_others,
+                     const unsigned int     communication_channel = 0) const
+      {
+        (void)data_others;
+
+        int dummy;
+        for (unsigned int i = 0; i < recv_sm_ranks.size(); i++)
+          MPI_Isend(&dummy,
+                    0,
+                    MPI_INT,
+                    recv_sm_ranks[i],
+                    communication_channel + 1,
+                    comm_sm,
+                    recv_sm_req.data() + i);
+
+        for (unsigned int i = 0; i < send_sm_ranks.size(); i++)
+          MPI_Irecv(&dummy,
+                    0,
+                    MPI_INT,
+                    send_sm_ranks[i],
+                    communication_channel + 1,
+                    comm_sm,
+                    send_sm_req.data() + i);
+
+        for (unsigned int i = 0; i < recv_remote_ranks.size(); i++)
+          MPI_Isend(data_this + recv_remote_ptr[i] + n_local_elements,
+                    recv_remote_ptr[i + 1] - recv_remote_ptr[i],
+                    Utilities::MPI::internal::mpi_type_id(data_this),
+                    recv_remote_ranks[i],
+                    communication_channel + 0,
+                    comm,
+                    recv_remote_req.data() + i);
+
+        for (unsigned int i = 0; i < send_remote_ranks.size(); i++)
+          MPI_Irecv(buffer.data() + send_remote_ptr[i],
+                    send_remote_ptr[i + 1] - send_remote_ptr[i],
+                    Utilities::MPI::internal::mpi_type_id(data_this),
+                    send_remote_ranks[i],
+                    communication_channel + 0,
+                    comm,
+                    send_remote_req.data() + i);
+      }
+
+      void
+      compress_finish(Number *               data_this,
+                      std::vector<Number *> &data_others) const
+      {
+        for (unsigned int c = 0; c < send_sm_ranks.size(); c++)
+          {
+            int i;
+            MPI_Waitany(send_sm_req.size(),
+                        send_sm_req.data(),
+                        &i,
+                        MPI_STATUS_IGNORE);
+
+            const Number *__restrict__ data_others_ptr =
+              data_others[send_sm_ranks[i]];
+            Number *__restrict__ data_this_ptr = data_this;
+
+            for (unsigned int j = send_sm_ptr[i], k = send_sm_offset[i];
+                 j < send_sm_ptr[i + 1];
+                 j++, k++)
+              {
+                data_this_ptr[send_sm_indices[j]] += data_others_ptr[k];
+              }
+          }
+
+        for (unsigned int c = 0; c < send_remote_ranks.size(); c++)
+          {
+            int i;
+            MPI_Waitany(send_remote_req.size(),
+                        send_remote_req.data(),
+                        &i,
+                        MPI_STATUS_IGNORE);
+
+            for (unsigned int j = send_remote_ptr[i];
+                 j < send_remote_ptr[i + 1];
+                 j++)
+              data_this[send_remote_indices[j]] += buffer[j];
+          }
+
+        MPI_Waitall(recv_sm_req.size(),
+                    recv_sm_req.data(),
+                    MPI_STATUSES_IGNORE);
+
+        MPI_Waitall(recv_remote_req.size(),
+                    recv_remote_req.data(),
+                    MPI_STATUSES_IGNORE);
+      }
+
+      void
+      compress(Number *data_this, std::vector<Number *> &data_others) const
+      {
+        compress_start(data_this, data_others);
+        compress_finish(data_this, data_others);
+      }
+
     private:
       const MPI_Comm &comm;
       const MPI_Comm &comm_sm;
 
       unsigned int n_local_elements;
 
-      AlignedVector<Number> buffer;
+      mutable AlignedVector<Number> buffer;
 
       std::vector<unsigned int>            recv_remote_ranks;
       std::vector<types::global_dof_index> recv_remote_ptr = {0};
-      std::vector<MPI_Request>             recv_remote_req;
-
-      std::vector<unsigned int> recv_sm_ranks;
-      std::vector<unsigned int> recv_sm_ptr = {0};
-      std::vector<MPI_Request>  recv_sm_req;
-      std::vector<unsigned int> recv_sm_indices;
-      std::vector<unsigned int> recv_sm_offset;
-
-      std::vector<unsigned int> send_remote_ranks;
-      std::vector<unsigned int> send_remote_ptr = {0};
-      std::vector<unsigned int> send_remote_indices;
-      std::vector<MPI_Request>  send_remote_req;
-
-      std::vector<unsigned int> send_sm_ranks;
-      std::vector<unsigned int> send_sm_ptr = {0};
-      std::vector<unsigned int> send_sm_indices;
-      std::vector<MPI_Request>  send_sm_req;
-      std::vector<unsigned int> send_sm_offset;
+      mutable std::vector<MPI_Request>     recv_remote_req;
+
+      std::vector<unsigned int>        recv_sm_ranks;
+      std::vector<unsigned int>        recv_sm_ptr = {0};
+      mutable std::vector<MPI_Request> recv_sm_req;
+      std::vector<unsigned int>        recv_sm_indices;
+      std::vector<unsigned int>        recv_sm_offset;
+
+      std::vector<unsigned int>        send_remote_ranks;
+      std::vector<unsigned int>        send_remote_ptr = {0};
+      std::vector<unsigned int>        send_remote_indices;
+      mutable std::vector<MPI_Request> send_remote_req;
+
+      std::vector<unsigned int>        send_sm_ranks;
+      std::vector<unsigned int>        send_sm_ptr = {0};
+      std::vector<unsigned int>        send_sm_indices;
+      mutable std::vector<MPI_Request> send_sm_req;
+      std::vector<unsigned int>        send_sm_offset;
     };
 
   } // end of namespace SharedMPI
index 02c8508a2981f63b5f7cf0fd078521a095e71ea0..79105114eeaaeafae3fb6bfeeb7d21be3ae2adbd 100644 (file)
@@ -98,6 +98,8 @@ namespace LinearAlgebra
       std::unique_ptr<Number[], std::function<void(Number *&)>> values;
       MPI_Win *values_win = nullptr;
 
+      std::vector<Number *> others;
+
       std::unique_ptr<Number[]> values_dev;
     };
 
@@ -539,8 +541,7 @@ namespace LinearAlgebra
     inline bool
     Vector<Number, MemorySpace>::has_ghost_elements() const
     {
-      Assert(false, ExcNotImplemented());
-      return false;
+      return vector_is_ghosted;
     }
 
 
index 62826b57a523da1d144e873eaf566f72d2aab6ea..91b9e9dcdc8d1eaa2a5966d506eb215b68ccbf31 100644 (file)
@@ -56,12 +56,9 @@ namespace LinearAlgebra
 
           allocated_size = new_alloc_size;
 
-          // TODO
-          std::vector<Number *> data_others;
-
           data.values_win   = new MPI_Win;
           Number *data_this = (Number *)malloc(0);
-          data_others.resize(Utilities::MPI::n_mpi_processes(comm_shared));
+          data.others.resize(Utilities::MPI::n_mpi_processes(comm_shared));
 
 
           MPI_Info info;
@@ -82,11 +79,11 @@ namespace LinearAlgebra
               int      disp_unit;
               MPI_Aint ssize;
               MPI_Win_shared_query(
-                *data.values_win, i, &ssize, &disp_unit, &data_others[i]);
+                *data.values_win, i, &ssize, &disp_unit, &data.others[i]);
             }
 
           data.values = {
-            data_others[Utilities::MPI::this_mpi_process(comm_shared)],
+            data.others[Utilities::MPI::this_mpi_process(comm_shared)],
             [&data](Number *&) { MPI_Win_free(data.values_win); }};
         }
       };
@@ -380,9 +377,10 @@ namespace LinearAlgebra
       const unsigned int                communication_channel,
       ::dealii::VectorOperation::values operation)
     {
-      Assert(false, ExcNotImplemented());
-      (void)communication_channel;
       (void)operation;
+      partitioner_sm->compress_start(data.values.get(),
+                                     data.others,
+                                     communication_channel);
     }
 
 
@@ -392,8 +390,8 @@ namespace LinearAlgebra
     Vector<Number, MemorySpaceType>::compress_finish(
       ::dealii::VectorOperation::values operation)
     {
-      Assert(false, ExcNotImplemented());
       (void)operation;
+      partitioner_sm->compress_finish(data.values.get(), data.others);
     }
 
 
@@ -403,8 +401,9 @@ namespace LinearAlgebra
     Vector<Number, MemorySpaceType>::update_ghost_values_start(
       const unsigned int communication_channel) const
     {
-      Assert(false, ExcNotImplemented());
-      (void)communication_channel;
+      partitioner_sm->update_ghost_values_start(data.values.get(),
+                                                data.others,
+                                                communication_channel);
     }
 
 
@@ -413,7 +412,8 @@ namespace LinearAlgebra
     void
     Vector<Number, MemorySpaceType>::update_ghost_values_finish() const
     {
-      Assert(false, ExcNotImplemented());
+      partitioner_sm->update_ghost_values_finish(data.values.get(),
+                                                 data.others);
     }
 
 
index 4f1f67e5d4354c9dbb3832e050429460e74910c5..b4509d9c6772dd4eee8cfb06c0237755909efcd9 100644 (file)
@@ -3765,6 +3765,51 @@ namespace internal
         }
     }
 
+    void
+    reset_ghost_values(
+      const LinearAlgebra::SharedMPI::Vector<Number> &vec) const
+    {
+      if (ghosts_were_set == true)
+        return;
+
+      if (vector_face_access ==
+            dealii::MatrixFree<dim, Number, VectorizedArrayType>::
+              DataAccessOnFaces::unspecified ||
+          vec.size() == 0)
+        vec.zero_out_ghosts();
+      else
+        {
+#  ifdef DEAL_II_WITH_MPI
+          AssertDimension(requests.size(), tmp_data.size());
+
+          const unsigned int mf_component = find_vector_in_mf(vec);
+          const Utilities::MPI::Partitioner &part =
+            get_partitioner(mf_component);
+          if (&part ==
+              matrix_free.get_dof_info(mf_component).vector_partitioner.get())
+            vec.zero_out_ghosts();
+          else if (part.n_ghost_indices() > 0)
+            {
+              for (std::vector<std::pair<unsigned int, unsigned int>>::
+                     const_iterator my_ghosts =
+                       part.ghost_indices_within_larger_ghost_set().begin();
+                   my_ghosts !=
+                   part.ghost_indices_within_larger_ghost_set().end();
+                   ++my_ghosts)
+                for (unsigned int j = my_ghosts->first; j < my_ghosts->second;
+                     j++)
+                  {
+                    const_cast<LinearAlgebra::SharedMPI::Vector<Number> &>(vec)
+                      .local_element(j + part.local_size()) = 0.;
+                  }
+            }
+
+          // let vector know that it's not ghosted anymore
+          vec.set_ghost_state(false);
+#  endif
+        }
+    }
+
 
 
     /**
index ec3622a90a474bd1e1013a545cd67441fb75b8d4..ed2ebca093228d79c5d83a71b42bbeefe3789ed8 100644 (file)
@@ -67,8 +67,14 @@ test(const int n_refinements, const int degree, const int group_size)
   MPI_Comm comm_sm;
   MPI_Comm_split(comm, rank / group_size, rank, &comm_sm);
 
-  LinearAlgebra::SharedMPI::Vector<Number> vec;
-  matrix_free.initialize_dof_vector(vec, comm_sm);
+  using VectorType = LinearAlgebra::SharedMPI::Vector<Number>;
+
+  LinearAlgebra::SharedMPI::Vector<Number> dst, src;
+  matrix_free.initialize_dof_vector(dst, comm_sm);
+  matrix_free.initialize_dof_vector(src, comm_sm);
+
+  matrix_free.template cell_loop<VectorType, VectorType>(
+    [](const auto &, auto &, const auto &, const auto) {}, dst, src);
 }
 
 int

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.