]> https://gitweb.dealii.org/ - dealii.git/commitdiff
RPE::evaluate_and_process(): pack only if needed 15163/head
authorPeter Munch <peterrmuench@gmail.com>
Mon, 1 May 2023 13:07:58 +0000 (15:07 +0200)
committerPeter Munch <peterrmuench@gmail.com>
Fri, 7 Jun 2024 12:33:20 +0000 (14:33 +0200)
include/deal.II/base/mpi_remote_point_evaluation.h
include/deal.II/multigrid/mg_transfer_global_coarsening.h
include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h
source/base/mpi_remote_point_evaluation.cc
tests/remote_point_evaluation/remote_point_evaluation_01.cc

index 9691d4349dc2f8d3fcddf2dfa22e921d2157b3f8..f1b3c7da48ebad2686bad02019aa47c132e78204 100644 (file)
@@ -272,24 +272,32 @@ namespace Utilities
        *
        * @warning This is a collective call that needs to be executed by all
        *   processors in the communicator.
+       *
+       * @note This function can handle arbitrary types including scalar and
+       *   Tensor objects. In the case that scalar types are used, one can
+       *   specify the number of components @p n_components. This allows to
+       *   provide unrolled tensors, which is useful, e.g., if its dimension
+       *   and its rank is not known at compile time.
        */
-      template <typename T>
+      template <typename T, unsigned int n_components = 1>
       void
       evaluate_and_process(
         std::vector<T> &output,
         std::vector<T> &buffer,
         const std::function<void(const ArrayView<T> &, const CellData &)>
-          &evaluation_function) const;
+                  &evaluation_function,
+        const bool sort_data = true) const;
 
       /**
        * Same as above but with the result provided as return value and
        * without external allocation of a user-provided buffer.
        */
-      template <typename T>
+      template <typename T, unsigned int n_components = 1>
       std::vector<T>
       evaluate_and_process(
         const std::function<void(const ArrayView<T> &, const CellData &)>
-          &evaluation_function) const;
+                  &evaluation_function,
+        const bool sort_data = true) const;
 
       /**
        * This method is the inverse of the method evaluate_and_process(). It
@@ -298,25 +306,33 @@ namespace Utilities
        *
        * @warning This is a collective call that needs to be executed by all
        *   processors in the communicator.
+       *
+       * @note This function can handle arbitrary types including scalar and
+       *   Tensor objects. In the case that scalar types are used, one can
+       *   specify the number of components @p n_components. This allows to
+       *   provide unrolled tensors, which is useful, e.g., if its dimension
+       *   and its rank is not known at compile time.
        */
-      template <typename T>
+      template <typename T, unsigned int n_components = 1>
       void
       process_and_evaluate(
         const std::vector<T> &input,
         std::vector<T>       &buffer,
         const std::function<void(const ArrayView<const T> &, const CellData &)>
-          &evaluation_function) const;
+                  &evaluation_function,
+        const bool sort_data = true) const;
 
       /**
        * Same as above but without external allocation of a user-provided
        * buffer.
        */
-      template <typename T>
+      template <typename T, unsigned int n_components = 1>
       void
       process_and_evaluate(
         const std::vector<T> &input,
         const std::function<void(const ArrayView<const T> &, const CellData &)>
-          &evaluation_function) const;
+                  &evaluation_function,
+        const bool sort_data = true) const;
 
       /**
        * Return a CRS-like data structure to determine the position of the
@@ -366,6 +382,24 @@ namespace Utilities
       bool
       is_ready() const;
 
+      /**
+       * Return permutation needed for sending. If this is applied, evaluation
+       * results do not have to be sorted according to the receiving ranks
+       * during evaluate_and_process().
+       */
+      const std::vector<unsigned int> &
+      get_send_permutation() const;
+
+      /**
+       * Data is contiguous rank by rank directly after MPI communication.
+       * If no sorting is requested during evaluate_and_process(),
+       * this permutation allows to access all values corresponding to an
+       * evaluation point (sorted into different cells).
+       */
+      const std::vector<unsigned int> &
+      get_inverse_recv_permutation() const;
+
+
     private:
       /**
        * Additional data with basic settings.
@@ -416,6 +450,11 @@ namespace Utilities
        */
       std::vector<unsigned int> recv_permutation;
 
+      /**
+       * Inverse of permutation index within a recv buffer.
+       */
+      std::vector<unsigned int> recv_permutation_inv;
+
       /**
        * Pointers of ranges within a receive buffer that are filled by ranks
        * specified by recv_ranks.
@@ -438,6 +477,11 @@ namespace Utilities
        */
       std::vector<unsigned int> send_permutation;
 
+      /**
+       * Inverse of permutation index within a send buffer.
+       */
+      std::vector<unsigned int> send_permutation_inv;
+
       /**
        * Ranks to send to.
        */
@@ -448,8 +492,184 @@ namespace Utilities
        * specified by send_ranks.
        */
       std::vector<unsigned int> send_ptrs;
+
+      /**
+       * Buffer size (if internal sorting is requested) determined as max of:
+       *
+       * needed during evaluate_and_process()
+       * - point data (sorted according to cells) -> input from cell loop
+       * - point data (sorted according to ranks) -> to be sent
+       * - memory for receiving data (by one process at a time)
+       *
+       * needed during process_and_evaluate()
+       * - point data (sorted according to cells) -> output to cell loop
+       * - point data (sorted according to ranks) -> to be sent
+       * - memory for receiving data (by one process at a time)
+       */
+      unsigned int buffer_size_with_sorting;
+
+      /**
+       * Buffer size (if sorting is not requested); corresponds to the
+       * number of evaluation points.
+       */
+      unsigned int buffer_size_without_sorting;
     };
 
+    namespace internal
+    {
+#ifdef DEAL_II_WITH_MPI
+      /**
+       * Pack @p data and send it via MPI_Isend.
+       */
+      template <typename T>
+      std::enable_if_t<Utilities::MPI::is_mpi_type<T> == false, void>
+      pack_and_isend(const ArrayView<const T>       &data,
+                     const unsigned int              rank,
+                     const unsigned int              tag,
+                     const MPI_Comm                  comm,
+                     std::vector<std::vector<char>> &buffers,
+                     std::vector<MPI_Request>       &requests)
+      {
+        requests.emplace_back(MPI_Request());
+
+        buffers.emplace_back(Utilities::pack(
+          std::vector<T>(data.data(), data.data() + data.size()), false));
+
+        const int ierr = MPI_Isend(buffers.back().data(),
+                                   buffers.back().size(),
+                                   MPI_CHAR,
+                                   rank,
+                                   tag,
+                                   comm,
+                                   &requests.back());
+        AssertThrowMPI(ierr);
+      }
+
+
+
+      /**
+       * Above function specialized for data types supported by MPI
+       * so that one can skip packing.
+       */
+      template <typename T>
+      std::enable_if_t<Utilities::MPI::is_mpi_type<T> == true, void>
+      pack_and_isend(const ArrayView<const T> &data,
+                     const unsigned int        rank,
+                     const unsigned int        tag,
+                     const MPI_Comm            comm,
+                     std::vector<std::vector<char>> & /*buffers*/,
+                     std::vector<MPI_Request> &requests)
+      {
+        requests.emplace_back(MPI_Request());
+
+        const int ierr = MPI_Isend(data.data(),
+                                   data.size(),
+                                   Utilities::MPI::mpi_type_id_for_type<T>,
+                                   rank,
+                                   tag,
+                                   comm,
+                                   &requests.back());
+        AssertThrowMPI(ierr);
+      }
+
+
+
+      /**
+       * Above function specialized for Tenors objects. The underlying data type
+       * might be supported by MPI so that one can skip packing.
+       */
+      template <int rank_, int dim, typename T>
+      std::enable_if_t<Utilities::MPI::is_mpi_type<T> == true, void>
+      pack_and_isend(const ArrayView<const Tensor<rank_, dim, T>> &data,
+                     const unsigned int                            rank,
+                     const unsigned int                            tag,
+                     const MPI_Comm                                comm,
+                     std::vector<std::vector<char>>               &buffers,
+                     std::vector<MPI_Request>                     &requests)
+      {
+        ArrayView<const T> data_(reinterpret_cast<const T *>(data.data()),
+                                 data.size() * Utilities::pow(dim, rank_));
+
+        pack_and_isend(data_, rank, tag, comm, buffers, requests);
+      }
+
+
+      /**
+       * Receive message, unpack it, and store the result in @p data.
+       */
+      template <typename T>
+      std::enable_if_t<Utilities::MPI::is_mpi_type<T> == false, void>
+      recv_and_unpack(const ArrayView<T> &data,
+                      const MPI_Comm      comm,
+                      const MPI_Status   &status,
+                      std::vector<char>  &buffer)
+      {
+        int message_length;
+        int ierr = MPI_Get_count(&status, MPI_CHAR, &message_length);
+        AssertThrowMPI(ierr);
+
+        buffer.resize(message_length);
+
+        ierr = MPI_Recv(buffer.data(),
+                        buffer.size(),
+                        MPI_CHAR,
+                        status.MPI_SOURCE,
+                        internal::Tags::remote_point_evaluation,
+                        comm,
+                        MPI_STATUS_IGNORE);
+        AssertThrowMPI(ierr);
+
+        // unpack data
+        const auto temp = Utilities::unpack<std::vector<T>>(buffer, false);
+
+        for (unsigned int i = 0; i < data.size(); ++i)
+          data[i] = temp[i];
+      }
+
+
+
+      /**
+       * Above function specialized for data types supported by MPI
+       * so that one can skip unpacking.
+       */
+      template <typename T>
+      std::enable_if_t<Utilities::MPI::is_mpi_type<T> == true, void>
+      recv_and_unpack(const ArrayView<T> &data,
+                      const MPI_Comm      comm,
+                      const MPI_Status   &status,
+                      std::vector<char> & /*buffer*/)
+      {
+        const auto ierr = MPI_Recv(data.data(),
+                                   data.size(),
+                                   Utilities::MPI::mpi_type_id_for_type<T>,
+                                   status.MPI_SOURCE,
+                                   internal::Tags::remote_point_evaluation,
+                                   comm,
+                                   MPI_STATUS_IGNORE);
+        AssertThrowMPI(ierr);
+      }
+
+
+
+      /**
+       * Above function specialized for Tensor objects. The underlying data type
+       * might be supported by MPI so that one can skip unpacking.
+       */
+      template <int rank_, int dim, typename T>
+      std::enable_if_t<Utilities::MPI::is_mpi_type<T> == true, void>
+      recv_and_unpack(const ArrayView<Tensor<rank_, dim, T>> &data,
+                      const MPI_Comm                          comm,
+                      const MPI_Status                       &status,
+                      std::vector<char>                      &buffer)
+      {
+        const ArrayView<T> data_(reinterpret_cast<T *>(data.data()),
+                                 data.size() * Utilities::pow(dim, rank_));
+
+        recv_and_unpack(data_, comm, status, buffer);
+      }
+#endif
+    } // namespace internal
+
 
 
     template <int dim, int spacedim>
@@ -467,19 +687,21 @@ namespace Utilities
 
 
     template <int dim, int spacedim>
-    template <typename T>
+    template <typename T, unsigned int n_components>
     void
     RemotePointEvaluation<dim, spacedim>::evaluate_and_process(
       std::vector<T> &output,
       std::vector<T> &buffer,
       const std::function<void(const ArrayView<T> &, const CellData &)>
-        &evaluation_function) const
+                &evaluation_function,
+      const bool sort_data) const
     {
 #ifndef DEAL_II_WITH_MPI
       Assert(false, ExcNeedsMPI());
       (void)output;
       (void)buffer;
       (void)evaluation_function;
+      (void)sort_data;
 #else
       static CollectiveMutex      mutex;
       CollectiveMutex::ScopedLock lock(mutex, tria->get_communicator());
@@ -488,57 +710,75 @@ namespace Utilities
         Utilities::MPI::this_mpi_process(tria->get_communicator());
 
       // allocate memory for output and buffer
-      output.resize(point_ptrs.back());
-      buffer.resize(std::max(send_permutation.size() * 2,
-                             point_ptrs.back() + send_permutation.size()));
+      output.resize(point_ptrs.back() * n_components);
+
+      buffer.resize(
+        (sort_data ? buffer_size_with_sorting : buffer_size_without_sorting) *
+        n_components);
 
       // ... for evaluation
-      ArrayView<T> buffer_eval(buffer.data(), send_permutation.size());
+      ArrayView<T> buffer_eval(buffer.data(),
+                               send_permutation.size() * n_components);
 
-      // ... for communication
-      ArrayView<T> buffer_comm(buffer.data() + send_permutation.size(),
-                               send_permutation.size());
+      // ... for communication (send)
+      ArrayView<T> buffer_send(sort_data ?
+                                 (buffer.data() +
+                                  send_permutation.size() * n_components) :
+                                 buffer.data(),
+                               send_permutation.size() * n_components);
+
+      // more arrays
+      std::vector<MPI_Request>       send_requests;
+      std::vector<std::vector<char>> send_buffers_packed;
+      std::vector<char>              recv_buffer_packed;
 
       // evaluate functions at points
       evaluation_function(buffer_eval, *cell_data);
 
-      // sort for communication
-      const auto my_rank_local_recv_ptr =
-        std::find(recv_ranks.begin(), recv_ranks.end(), my_rank);
-
-      if (my_rank_local_recv_ptr != recv_ranks.end())
+      // sort for communication (optional)
+      if (sort_data)
         {
-          const unsigned int my_rank_local_recv =
-            std::distance(recv_ranks.begin(), my_rank_local_recv_ptr);
-          const unsigned int my_rank_local_send = std::distance(
-            send_ranks.begin(),
-            std::find(send_ranks.begin(), send_ranks.end(), my_rank));
-          const unsigned int  start = send_ptrs[my_rank_local_send];
-          const unsigned int  end   = send_ptrs[my_rank_local_send + 1];
-          const unsigned int *recv_ptr =
-            recv_permutation.data() + recv_ptrs[my_rank_local_recv];
-          for (unsigned int i = 0; i < send_permutation.size(); ++i)
-            {
-              const unsigned int send_index = send_permutation[i];
+          const auto my_rank_local_recv_ptr =
+            std::find(recv_ranks.begin(), recv_ranks.end(), my_rank);
 
-              // local data -> can be copied to output directly
-              if (start <= send_index && send_index < end)
-                output[recv_ptr[send_index - start]] = buffer_eval[i];
-              else // data to be sent
-                buffer_comm[send_index] = buffer_eval[i];
+          if (my_rank_local_recv_ptr != recv_ranks.end())
+            {
+              const unsigned int my_rank_local_recv =
+                std::distance(recv_ranks.begin(), my_rank_local_recv_ptr);
+              const unsigned int my_rank_local_send = std::distance(
+                send_ranks.begin(),
+                std::find(send_ranks.begin(), send_ranks.end(), my_rank));
+              const unsigned int  start = send_ptrs[my_rank_local_send];
+              const unsigned int  end   = send_ptrs[my_rank_local_send + 1];
+              const unsigned int *recv_ptr =
+                recv_permutation.data() + recv_ptrs[my_rank_local_recv];
+              for (unsigned int i = 0; i < send_permutation.size(); ++i)
+                {
+                  const unsigned int send_index = send_permutation[i];
+
+                  if (start <= send_index && send_index < end)
+                    // local data -> can be copied to output directly
+                    for (unsigned int c = 0; c < n_components; ++c)
+                      output[recv_ptr[send_index - start] * n_components + c] =
+                        buffer_eval[i * n_components + c];
+                  else
+                    // data to be sent
+                    for (unsigned int c = 0; c < n_components; ++c)
+                      buffer_send[send_index * n_components + c] =
+                        buffer_eval[i * n_components + c];
+                }
+            }
+          else
+            {
+              for (unsigned int i = 0; i < send_permutation.size(); ++i)
+                for (unsigned int c = 0; c < n_components; ++c)
+                  buffer_send[send_permutation[i] * n_components + c] =
+                    buffer_eval[i * n_components + c];
             }
-        }
-      else
-        {
-          for (unsigned int i = 0; i < send_permutation.size(); ++i)
-            buffer_comm[send_permutation[i]] = buffer_eval[i];
         }
 
       // send data
-      std::vector<std::vector<char>> send_buffer;
-      send_buffer.reserve(send_ranks.size());
-
-      std::vector<MPI_Request> send_requests;
+      send_buffers_packed.reserve(send_ranks.size());
       send_requests.reserve(send_ranks.size());
 
       for (unsigned int i = 0; i < send_ranks.size(); ++i)
@@ -546,26 +786,42 @@ namespace Utilities
           if (send_ranks[i] == my_rank)
             continue;
 
-          send_requests.emplace_back(MPI_Request());
+          internal::pack_and_isend(
+            ArrayView<const T>(
+              buffer_send.begin() + send_ptrs[i] * n_components,
+              (send_ptrs[i + 1] - send_ptrs[i]) * n_components),
+            send_ranks[i],
+            internal::Tags::remote_point_evaluation,
+            tria->get_communicator(),
+            send_buffers_packed,
+            send_requests);
+        }
 
-          send_buffer.emplace_back(Utilities::pack(
-            std::vector<T>(buffer_comm.begin() + send_ptrs[i],
-                           buffer_comm.begin() + send_ptrs[i + 1]),
-            false));
+      // copy local data directly to output if no sorting is requested
+      if (!sort_data)
+        {
+          const auto my_rank_local_recv_ptr =
+            std::find(recv_ranks.begin(), recv_ranks.end(), my_rank);
 
-          const int ierr = MPI_Isend(send_buffer.back().data(),
-                                     send_buffer.back().size(),
-                                     MPI_CHAR,
-                                     send_ranks[i],
-                                     internal::Tags::remote_point_evaluation,
-                                     tria->get_communicator(),
-                                     &send_requests.back());
-          AssertThrowMPI(ierr);
+          if (my_rank_local_recv_ptr != recv_ranks.end())
+            {
+              const unsigned int my_rank_local_recv =
+                std::distance(recv_ranks.begin(), my_rank_local_recv_ptr);
+              const unsigned int my_rank_local_send = std::distance(
+                send_ranks.begin(),
+                std::find(send_ranks.begin(), send_ranks.end(), my_rank));
+
+              for (unsigned int j = recv_ptrs[my_rank_local_recv],
+                                k = send_ptrs[my_rank_local_send];
+                   j < recv_ptrs[my_rank_local_recv + 1];
+                   ++j, ++k)
+                for (unsigned int c = 0; c < n_components; ++c)
+                  output[j * n_components + c] =
+                    buffer_eval[k * n_components + c];
+            }
         }
 
       // receive data
-      std::vector<char> buffer_char;
-
       for (unsigned int i = 0; i < recv_ranks.size(); ++i)
         {
           if (recv_ranks[i] == my_rank)
@@ -579,26 +835,6 @@ namespace Utilities
                                &status);
           AssertThrowMPI(ierr);
 
-          int message_length;
-          ierr = MPI_Get_count(&status, MPI_CHAR, &message_length);
-          AssertThrowMPI(ierr);
-
-          buffer_char.resize(message_length);
-
-          ierr = MPI_Recv(buffer_char.data(),
-                          buffer_char.size(),
-                          MPI_CHAR,
-                          status.MPI_SOURCE,
-                          internal::Tags::remote_point_evaluation,
-                          tria->get_communicator(),
-                          MPI_STATUS_IGNORE);
-          AssertThrowMPI(ierr);
-
-          // unpack data
-          const auto buffer =
-            Utilities::unpack<std::vector<T>>(buffer_char, false);
-
-          // write data into output vector
           const auto ptr =
             std::find(recv_ranks.begin(), recv_ranks.end(), status.MPI_SOURCE);
 
@@ -606,11 +842,27 @@ namespace Utilities
 
           const unsigned int j = std::distance(recv_ranks.begin(), ptr);
 
-          AssertDimension(buffer.size(), recv_ptrs[j + 1] - recv_ptrs[j]);
+          // ... for communication (recv)
+          ArrayView<T> recv_buffer(
+            sort_data ?
+              (buffer.data() + send_permutation.size() * 2 * n_components) :
+              (output.data() + recv_ptrs[j] * n_components),
+            (recv_ptrs[j + 1] - recv_ptrs[j]) * n_components);
 
-          for (unsigned int i = recv_ptrs[j], c = 0; i < recv_ptrs[j + 1];
-               ++i, ++c)
-            output[recv_permutation[i]] = buffer[c];
+          internal::recv_and_unpack(recv_buffer,
+                                    tria->get_communicator(),
+                                    status,
+                                    recv_buffer_packed);
+
+          if (sort_data)
+            {
+              // sort data into output vector (optional)
+              for (unsigned int i = recv_ptrs[j], k = 0; i < recv_ptrs[j + 1];
+                   ++i, ++k)
+                for (unsigned int c = 0; c < n_components; ++c)
+                  output[recv_permutation[i] * n_components + c] =
+                    recv_buffer[k * n_components + c];
+            }
         }
 
       // make sure all messages have been sent
@@ -626,16 +878,20 @@ namespace Utilities
 
 
     template <int dim, int spacedim>
-    template <typename T>
+    template <typename T, unsigned int n_components>
     std::vector<T>
     RemotePointEvaluation<dim, spacedim>::evaluate_and_process(
       const std::function<void(const ArrayView<T> &, const CellData &)>
-        &evaluation_function) const
+                &evaluation_function,
+      const bool sort_data) const
     {
       std::vector<T> output;
       std::vector<T> buffer;
 
-      this->evaluate_and_process(output, buffer, evaluation_function);
+      this->evaluate_and_process<T, n_components>(output,
+                                                  buffer,
+                                                  evaluation_function,
+                                                  sort_data);
 
       return output;
     }
@@ -643,19 +899,21 @@ namespace Utilities
 
 
     template <int dim, int spacedim>
-    template <typename T>
+    template <typename T, unsigned int n_components>
     void
     RemotePointEvaluation<dim, spacedim>::process_and_evaluate(
       const std::vector<T> &input,
       std::vector<T>       &buffer,
       const std::function<void(const ArrayView<const T> &, const CellData &)>
-        &evaluation_function) const
+                &evaluation_function,
+      const bool sort_data) const
     {
 #ifndef DEAL_II_WITH_MPI
       Assert(false, ExcNeedsMPI());
       (void)input;
       (void)buffer;
       (void)evaluation_function;
+      (void)sort_data;
 #else
       static CollectiveMutex      mutex;
       CollectiveMutex::ScopedLock lock(mutex, tria->get_communicator());
@@ -663,74 +921,86 @@ namespace Utilities
       const unsigned int my_rank =
         Utilities::MPI::this_mpi_process(tria->get_communicator());
 
-      // invert permutation matrices (TODO: precompute)
-      std::vector<unsigned int> recv_permutation_inv(recv_permutation.size());
-      for (unsigned int c = 0; c < recv_permutation.size(); ++c)
-        recv_permutation_inv[recv_permutation[c]] = c;
-
-      std::vector<unsigned int> send_permutation_inv(send_permutation.size());
-      for (unsigned int c = 0; c < send_permutation.size(); ++c)
-        send_permutation_inv[send_permutation[c]] = c;
-
       // allocate memory for buffer
       const auto &point_ptrs = this->get_point_ptrs();
-      AssertDimension(input.size(), point_ptrs.size() - 1);
-      buffer.resize(std::max(send_permutation.size() * 2,
-                             point_ptrs.back() + send_permutation.size()));
+
+      AssertDimension(input.size(),
+                      sort_data ? ((point_ptrs.size() - 1) * n_components) :
+                                  (point_ptrs.back() * n_components));
+      buffer.resize(
+        (sort_data ? buffer_size_with_sorting : buffer_size_without_sorting) *
+        n_components);
 
       // ... for evaluation
-      ArrayView<T> buffer_eval(buffer.data(), send_permutation.size());
+      ArrayView<T> buffer_eval(buffer.data(),
+                               send_permutation.size() * n_components);
 
-      // ... for communication
-      ArrayView<T> buffer_comm(buffer.data() + send_permutation.size(),
-                               point_ptrs.back());
+      // ... for communication (send)
+      ArrayView<T> buffer_send(sort_data ?
+                                 (buffer.data() +
+                                  send_permutation.size() * n_components) :
+                                 const_cast<T *>(input.data()),
+                               point_ptrs.back() * n_components);
 
-      // sort for communication (and duplicate data if necessary)
+      // more arrays
+      std::vector<MPI_Request>       send_requests;
+      std::vector<std::vector<char>> send_buffers_packed;
+      std::vector<char>              recv_buffer_packed;
 
-      const auto my_rank_local_recv_ptr =
-        std::find(recv_ranks.begin(), recv_ranks.end(), my_rank);
+      // sort for communication (and duplicate data if necessary)
 
-      if (my_rank_local_recv_ptr != recv_ranks.end())
+      if (sort_data)
         {
-          // optimize the case if we have our own rank among the possible list
-          const unsigned int my_rank_local_recv =
-            std::distance(recv_ranks.begin(), my_rank_local_recv_ptr);
-          const unsigned int my_rank_local_send = std::distance(
-            send_ranks.begin(),
-            std::find(send_ranks.begin(), send_ranks.end(), my_rank));
-
-          const unsigned int  start = recv_ptrs[my_rank_local_recv];
-          const unsigned int  end   = recv_ptrs[my_rank_local_recv + 1];
-          const unsigned int *send_ptr =
-            send_permutation_inv.data() + send_ptrs[my_rank_local_send];
-          for (unsigned int i = 0, c = 0; i < point_ptrs.size() - 1; ++i)
+          const auto my_rank_local_recv_ptr =
+            std::find(recv_ranks.begin(), recv_ranks.end(), my_rank);
+
+          if (my_rank_local_recv_ptr != recv_ranks.end())
             {
-              const unsigned int next = point_ptrs[i + 1];
-              for (unsigned int j = point_ptrs[i]; j < next; ++j, ++c)
+              // optimize the case if we have our own rank among the possible
+              // list
+              const unsigned int my_rank_local_recv =
+                std::distance(recv_ranks.begin(), my_rank_local_recv_ptr);
+              const unsigned int my_rank_local_send = std::distance(
+                send_ranks.begin(),
+                std::find(send_ranks.begin(), send_ranks.end(), my_rank));
+
+              const unsigned int  start = recv_ptrs[my_rank_local_recv];
+              const unsigned int  end   = recv_ptrs[my_rank_local_recv + 1];
+              const unsigned int *send_ptr =
+                send_permutation_inv.data() + send_ptrs[my_rank_local_send];
+              for (unsigned int i = 0, k = 0; i < point_ptrs.size() - 1; ++i)
                 {
-                  const unsigned int recv_index = recv_permutation_inv[c];
-
-                  // local data -> can be copied to final buffer directly
-                  if (start <= recv_index && recv_index < end)
-                    buffer_eval[send_ptr[recv_index - start]] = input[i];
-                  else // data to be sent
-                    buffer_comm[recv_index] = input[i];
+                  const unsigned int next = point_ptrs[i + 1];
+                  for (unsigned int j = point_ptrs[i]; j < next; ++j, ++k)
+                    {
+                      const unsigned int recv_index = recv_permutation_inv[k];
+
+                      // local data -> can be copied to final buffer directly
+                      if (start <= recv_index && recv_index < end)
+                        for (unsigned int c = 0; c < n_components; ++c)
+                          buffer_eval[send_ptr[recv_index - start] *
+                                        n_components +
+                                      c] = input[i * n_components + c];
+                      else // data to be sent
+                        for (unsigned int c = 0; c < n_components; ++c)
+                          buffer_send[recv_index * n_components + c] =
+                            input[i * n_components + c];
+                    }
                 }
             }
-        }
-      else
-        {
-          for (unsigned int i = 0, c = 0; i < point_ptrs.size() - 1; ++i)
-            for (unsigned int j = point_ptrs[i]; j < point_ptrs[i + 1];
-                 ++j, ++c)
-              buffer_comm[recv_permutation_inv[c]] = input[i];
+          else
+            {
+              for (unsigned int i = 0, k = 0; i < point_ptrs.size() - 1; ++i)
+                for (unsigned int j = point_ptrs[i]; j < point_ptrs[i + 1];
+                     ++j, ++k)
+                  for (unsigned int c = 0; c < n_components; ++c)
+                    buffer_send[recv_permutation_inv[k] * n_components + c] =
+                      input[i * n_components + c];
+            }
         }
 
       // send data
-      std::vector<std::vector<char>> send_buffer;
-      send_buffer.reserve(recv_ranks.size());
-
-      std::vector<MPI_Request> send_requests;
+      send_buffers_packed.reserve(recv_ranks.size());
       send_requests.reserve(recv_ranks.size());
 
       for (unsigned int i = 0; i < recv_ranks.size(); ++i)
@@ -738,26 +1008,41 @@ namespace Utilities
           if (recv_ranks[i] == my_rank)
             continue;
 
-          send_requests.push_back(MPI_Request());
+          internal::pack_and_isend(
+            ArrayView<const T>(
+              buffer_send.begin() + recv_ptrs[i] * n_components,
+              (recv_ptrs[i + 1] - recv_ptrs[i]) * n_components),
+            recv_ranks[i],
+            internal::Tags::remote_point_evaluation,
+            tria->get_communicator(),
+            send_buffers_packed,
+            send_requests);
+        }
 
-          send_buffer.emplace_back(Utilities::pack(
-            std::vector<T>(buffer_comm.begin() + recv_ptrs[i],
-                           buffer_comm.begin() + recv_ptrs[i + 1]),
-            false));
+      // copy local data directly to output if no sorting is requested
+      if (!sort_data)
+        {
+          const auto my_rank_local_recv_ptr =
+            std::find(recv_ranks.begin(), recv_ranks.end(), my_rank);
 
-          const int ierr = MPI_Isend(send_buffer.back().data(),
-                                     send_buffer.back().size(),
-                                     MPI_CHAR,
-                                     recv_ranks[i],
-                                     internal::Tags::remote_point_evaluation,
-                                     tria->get_communicator(),
-                                     &send_requests.back());
-          AssertThrowMPI(ierr);
+          if (my_rank_local_recv_ptr != recv_ranks.end())
+            {
+              const unsigned int my_rank_local_recv =
+                std::distance(recv_ranks.begin(), my_rank_local_recv_ptr);
+              const unsigned int my_rank_local_send = std::distance(
+                send_ranks.begin(),
+                std::find(send_ranks.begin(), send_ranks.end(), my_rank));
+
+              for (unsigned int j = recv_ptrs[my_rank_local_recv],
+                                k = send_ptrs[my_rank_local_send];
+                   j < recv_ptrs[my_rank_local_recv + 1];
+                   ++j, ++k)
+                for (unsigned int c = 0; c < n_components; ++c)
+                  buffer_eval[k * n_components + c] =
+                    input[j * n_components + c];
+            }
         }
 
-      // receive data
-      std::vector<char> recv_buffer;
-
       for (unsigned int i = 0; i < send_ranks.size(); ++i)
         {
           if (send_ranks[i] == my_rank)
@@ -771,25 +1056,6 @@ namespace Utilities
                                &status);
           AssertThrowMPI(ierr);
 
-          int message_length;
-          ierr = MPI_Get_count(&status, MPI_CHAR, &message_length);
-          AssertThrowMPI(ierr);
-
-          recv_buffer.resize(message_length);
-
-          ierr = MPI_Recv(recv_buffer.data(),
-                          recv_buffer.size(),
-                          MPI_CHAR,
-                          status.MPI_SOURCE,
-                          internal::Tags::remote_point_evaluation,
-                          tria->get_communicator(),
-                          MPI_STATUS_IGNORE);
-          AssertThrowMPI(ierr);
-
-          // unpack data
-          const auto recv_buffer_unpacked =
-            Utilities::unpack<std::vector<T>>(recv_buffer, false);
-
           // write data into buffer vector
           const auto ptr =
             std::find(send_ranks.begin(), send_ranks.end(), status.MPI_SOURCE);
@@ -798,12 +1064,26 @@ namespace Utilities
 
           const unsigned int j = std::distance(send_ranks.begin(), ptr);
 
-          AssertDimension(recv_buffer_unpacked.size(),
-                          send_ptrs[j + 1] - send_ptrs[j]);
+          ArrayView<T> recv_buffer(
+            sort_data ?
+              (buffer.data() +
+               (point_ptrs.back() + send_permutation.size()) * n_components) :
+              (buffer.data() + send_ptrs[j] * n_components),
+            (send_ptrs[j + 1] - send_ptrs[j]) * n_components);
 
-          for (unsigned int i = send_ptrs[j], c = 0; i < send_ptrs[j + 1];
-               ++i, ++c)
-            buffer_eval[send_permutation_inv[i]] = recv_buffer_unpacked[c];
+          internal::recv_and_unpack(recv_buffer,
+                                    tria->get_communicator(),
+                                    status,
+                                    recv_buffer_packed);
+
+          if (sort_data)
+            {
+              for (unsigned int i = send_ptrs[j], k = 0; i < send_ptrs[j + 1];
+                   ++i, ++k)
+                for (unsigned int c = 0; c < n_components; ++c)
+                  buffer_eval[send_permutation_inv[i] * n_components + c] =
+                    recv_buffer[k * n_components + c];
+            }
         }
 
       if (!send_requests.empty())
@@ -822,15 +1102,19 @@ namespace Utilities
 
 
     template <int dim, int spacedim>
-    template <typename T>
+    template <typename T, unsigned int n_components>
     void
     RemotePointEvaluation<dim, spacedim>::process_and_evaluate(
       const std::vector<T> &input,
       const std::function<void(const ArrayView<const T> &, const CellData &)>
-        &evaluation_function) const
+                &evaluation_function,
+      const bool sort_data) const
     {
       std::vector<T> buffer;
-      this->process_and_evaluate(input, buffer, evaluation_function);
+      this->process_and_evaluate<T, n_components>(input,
+                                                  buffer,
+                                                  evaluation_function,
+                                                  sort_data);
     }
 
   } // end of namespace MPI
index 6f6e63727263cc74117563ba1682736d3609d6a6..8d00a58819c5e332596501f9dc949e96623b2982 100644 (file)
@@ -259,9 +259,10 @@ public:
 
   /**
    * Enable inplace vector operations if external and internal vectors
-   * are compatible.
+   * are compatible. The returned pair indicates if the operation
+   * was successful on the coarse and the fine level.
    */
-  virtual void
+  virtual std::pair<bool, bool>
   enable_inplace_operations_if_possible(
     const std::shared_ptr<const Utilities::MPI::Partitioner>
       &partitioner_coarse,
@@ -320,7 +321,7 @@ protected:
    * are compatible.
    */
   template <int dim, std::size_t width, typename IndexType>
-  void
+  std::pair<bool, bool>
   internal_enable_inplace_operations_if_possible(
     const std::shared_ptr<const Utilities::MPI::Partitioner>
       &partitioner_coarse,
@@ -505,7 +506,7 @@ public:
    * Enable inplace vector operations if external and internal vectors
    * are compatible.
    */
-  void
+  std::pair<bool, bool>
   enable_inplace_operations_if_possible(
     const std::shared_ptr<const Utilities::MPI::Partitioner>
       &partitioner_coarse,
@@ -750,7 +751,7 @@ public:
    * Enable inplace vector operations if external and internal vectors
    * are compatible.
    */
-  void
+  std::pair<bool, bool>
   enable_inplace_operations_if_possible(
     const std::shared_ptr<const Utilities::MPI::Partitioner>
       &partitioner_coarse,
@@ -835,6 +836,16 @@ private:
    */
   Utilities::MPI::RemotePointEvaluation<dim> rpe;
 
+  /**
+   * Vectors for input/output for rpe.
+   */
+  mutable std::vector<Number> rpe_input_output;
+
+  /**
+   * Buffers to be reused by rpe.
+   */
+  mutable std::vector<Number> rpe_buffer;
+
   /**
    * MappingInfo object needed as Mapping argument by FEPointEvaluation.
    */
index a407cb324565d0efaada8eaddf21dbebe1750eaf..3d0becb8c3d8817fa2af529ea42afa68b4c5a79f 100644 (file)
@@ -3408,7 +3408,7 @@ namespace internal
 
 template <typename VectorType>
 template <int dim, std::size_t width, typename IndexType>
-void
+std::pair<bool, bool>
 MGTwoLevelTransferBase<VectorType>::
   internal_enable_inplace_operations_if_possible(
     const std::shared_ptr<const Utilities::MPI::Partitioner>
@@ -3421,11 +3421,14 @@ MGTwoLevelTransferBase<VectorType>::
                               &constraint_info_coarse,
     std::vector<unsigned int> &dof_indices_fine)
 {
+  std::pair<bool, bool> success_flags = {false, false};
+
   if (this->partitioner_coarse->is_globally_compatible(
         *external_partitioner_coarse))
     {
       this->vec_coarse.reinit(0);
       this->partitioner_coarse = external_partitioner_coarse;
+      success_flags.first      = true;
     }
   else if (internal::is_partitioner_contained(this->partitioner_coarse,
                                               external_partitioner_coarse))
@@ -3445,6 +3448,7 @@ MGTwoLevelTransferBase<VectorType>::
                                               external_partitioner_coarse);
 
       this->partitioner_coarse = external_partitioner_coarse;
+      success_flags.first      = true;
     }
 
   vec_fine_needs_ghost_update =
@@ -3456,6 +3460,7 @@ MGTwoLevelTransferBase<VectorType>::
     {
       this->vec_fine.reinit(0);
       this->partitioner_fine = external_partitioner_fine;
+      success_flags.second   = true;
     }
   else if (internal::is_partitioner_contained(this->partitioner_fine,
                                               external_partitioner_fine))
@@ -3471,20 +3476,23 @@ MGTwoLevelTransferBase<VectorType>::
                                               external_partitioner_fine);
 
       this->partitioner_fine = external_partitioner_fine;
+      success_flags.second   = true;
     }
+
+  return success_flags;
 }
 
 
 
 template <int dim, typename VectorType>
-void
+std::pair<bool, bool>
 MGTwoLevelTransfer<dim, VectorType>::enable_inplace_operations_if_possible(
   const std::shared_ptr<const Utilities::MPI::Partitioner>
     &external_partitioner_coarse,
   const std::shared_ptr<const Utilities::MPI::Partitioner>
     &external_partitioner_fine)
 {
-  this->internal_enable_inplace_operations_if_possible(
+  return this->internal_enable_inplace_operations_if_possible(
     external_partitioner_coarse,
     external_partitioner_fine,
     this->vec_fine_needs_ghost_update,
@@ -4908,10 +4916,6 @@ MGTwoLevelTransferNonNested<dim, VectorType>::prolongate_and_add_internal_comp(
   VectorType       &dst,
   const VectorType &src) const
 {
-  using Traits = internal::FEPointEvaluation::
-    EvaluatorTypeTraits<dim, dim, n_components, Number>;
-  using value_type = typename Traits::value_type;
-
   const auto evaluation_function = [&](auto &values, const auto &cell_data) {
     this->signals_non_nested.prolongation_cell_loop(true);
     std::vector<Number> solution_values;
@@ -4919,6 +4923,8 @@ MGTwoLevelTransferNonNested<dim, VectorType>::prolongate_and_add_internal_comp(
     FEPointEvaluation<n_components, dim, dim, Number> evaluator(*mapping_info,
                                                                 *fe_coarse);
 
+    const auto &send_permutation = rpe.get_send_permutation();
+
     for (unsigned int cell = 0; cell < cell_data.cells.size(); ++cell)
       {
         solution_values.resize(fe_coarse->n_dofs_per_cell());
@@ -4940,19 +4946,25 @@ MGTwoLevelTransferNonNested<dim, VectorType>::prolongate_and_add_internal_comp(
         evaluator.evaluate(solution_values, dealii::EvaluationFlags::values);
 
         for (const auto q : evaluator.quadrature_point_indices())
-          values[q + cell_data.reference_point_ptrs[cell]] =
-            evaluator.get_value(q);
+          {
+            const unsigned int index =
+              send_permutation[q + cell_data.reference_point_ptrs[cell]];
+
+            for (unsigned int c = 0; c < n_components; ++c)
+              values[index * n_components + c] =
+                internal::access(evaluator.get_value(q), c);
+          }
       }
     this->signals_non_nested.prolongation_cell_loop(false);
   };
 
   this->signals_non_nested.prolongation(true);
 
-  std::vector<value_type> evaluation_point_results;
-  std::vector<value_type> buffer;
-  rpe.template evaluate_and_process<value_type>(evaluation_point_results,
-                                                buffer,
-                                                evaluation_function);
+  std::vector<Number> &evaluation_point_results = this->rpe_input_output;
+  std::vector<Number> &buffer                   = this->rpe_buffer;
+
+  rpe.template evaluate_and_process<Number, n_components>(
+    evaluation_point_results, buffer, evaluation_function, false);
   this->signals_non_nested.prolongation(false);
 
   // Keep a vector of typical inverse touch counts that avoid divisions, all
@@ -4961,11 +4973,12 @@ MGTwoLevelTransferNonNested<dim, VectorType>::prolongate_and_add_internal_comp(
   for (unsigned int i = 0; i < typical_weights.size(); ++i)
     typical_weights[i] = Number(1) / Number(i + 1);
 
-  const bool  must_interpolate = (rpe.is_map_unique() == false);
-  const auto &ptr              = rpe.get_point_ptrs();
+  const bool  must_interpolate     = (rpe.is_map_unique() == false);
+  const auto &ptr                  = rpe.get_point_ptrs();
+  const auto &recv_permutation_inv = rpe.get_inverse_recv_permutation();
   for (unsigned int j = 0; j < ptr.size() - 1; ++j)
     {
-      value_type result;
+      std::array<Number, n_components> result;
       // Weight operator in case some points are owned by multiple cells.
       if (must_interpolate)
         {
@@ -4975,19 +4988,33 @@ MGTwoLevelTransferNonNested<dim, VectorType>::prolongate_and_add_internal_comp(
             {
               result = {};
               for (unsigned int k = 0; k < n_entries; ++k)
-                result += evaluation_point_results[ptr[j] + k];
+                {
+                  const unsigned int index = recv_permutation_inv[ptr[j] + k];
+
+                  for (unsigned int c = 0; c < n_components; ++c)
+                    result[c] +=
+                      evaluation_point_results[index * n_components + c];
+                }
               if (n_entries <= typical_weights.size())
-                result *= typical_weights[n_entries - 1];
+                for (unsigned int c = 0; c < n_components; ++c)
+                  result[c] *= typical_weights[n_entries - 1];
               else
-                result /= Number(n_entries);
+                for (unsigned int c = 0; c < n_components; ++c)
+                  result[c] *= Number(1) / Number(n_entries);
             }
           else if (n_entries == 1)
-            result = evaluation_point_results[ptr[j]];
+            {
+              const unsigned int index = recv_permutation_inv[ptr[j]];
+
+              for (unsigned int c = 0; c < n_components; ++c)
+                result[c] = evaluation_point_results[index * n_components + c];
+            }
           else
             result = {};
         }
       else
-        result = evaluation_point_results[j];
+        for (unsigned int c = 0; c < n_components; ++c)
+          result[c] = evaluation_point_results[j * n_components + c];
 
       if (level_dof_indices_fine_ptrs.empty())
         {
@@ -4997,7 +5024,7 @@ MGTwoLevelTransferNonNested<dim, VectorType>::prolongate_and_add_internal_comp(
                                this->level_dof_indices_fine.size());
               dst.local_element(
                 this->level_dof_indices_fine[n_components * j + c]) +=
-                internal::access(result, c);
+                result[c];
             }
         }
       else
@@ -5011,7 +5038,7 @@ MGTwoLevelTransferNonNested<dim, VectorType>::prolongate_and_add_internal_comp(
                                  this->level_dof_indices_fine.size());
                 dst.local_element(
                   this->level_dof_indices_fine[n_components * i + c]) +=
-                  internal::access(result, c);
+                  result[c];
               }
         }
     }
@@ -5046,20 +5073,21 @@ MGTwoLevelTransferNonNested<dim, VectorType>::restrict_and_add_internal_comp(
     EvaluatorTypeTraits<dim, dim, n_components, Number>;
   using value_type = typename Traits::value_type;
 
-  std::vector<value_type> evaluation_point_results;
-  std::vector<value_type> buffer;
+  std::vector<Number> &evaluation_point_results = this->rpe_input_output;
+  std::vector<Number> &buffer                   = this->rpe_buffer;
 
   std::array<Number, 8> typical_weights;
   for (unsigned int i = 0; i < typical_weights.size(); ++i)
     typical_weights[i] = Number(1) / Number(i + 1);
 
-  const bool  must_interpolate = (rpe.is_map_unique() == false);
-  const auto &ptr              = rpe.get_point_ptrs();
-  evaluation_point_results.resize(ptr.size() - 1);
+  const bool  must_interpolate     = (rpe.is_map_unique() == false);
+  const auto &ptr                  = rpe.get_point_ptrs();
+  const auto &recv_permutation_inv = rpe.get_inverse_recv_permutation();
+  evaluation_point_results.resize(ptr.back() * n_components);
 
-  for (unsigned int j = 0; j < evaluation_point_results.size(); ++j)
+  for (unsigned int j = 0; j < ptr.size() - 1; ++j)
     {
-      value_type result;
+      std::array<Number, n_components> result;
       if (level_dof_indices_fine_ptrs.empty())
         {
           for (unsigned int c = 0; c < n_components; ++c)
@@ -5067,24 +5095,26 @@ MGTwoLevelTransferNonNested<dim, VectorType>::restrict_and_add_internal_comp(
               AssertIndexRange(n_components * j + c,
                                this->level_dof_indices_fine.size());
 
-              internal::access(result, c) = src.local_element(
+              result[c] = src.local_element(
                 this->level_dof_indices_fine[n_components * j + c]);
             }
         }
       else
         {
-          result = value_type();
+          result = {};
 
           for (unsigned int i = this->level_dof_indices_fine_ptrs[j];
                i < this->level_dof_indices_fine_ptrs[j + 1];
                ++i)
-            for (unsigned int c = 0; c < n_components; ++c)
-              {
-                AssertIndexRange(n_components * i + c,
-                                 this->level_dof_indices_fine.size());
-                internal::access(result, c) += src.local_element(
-                  this->level_dof_indices_fine[n_components * i + c]);
-              }
+            {
+              for (unsigned int c = 0; c < n_components; ++c)
+                {
+                  AssertIndexRange(n_components * i + c,
+                                   this->level_dof_indices_fine.size());
+                  result[c] += src.local_element(
+                    this->level_dof_indices_fine[n_components * i + c]);
+                }
+            }
         }
       if (must_interpolate)
         {
@@ -5092,12 +5122,21 @@ MGTwoLevelTransferNonNested<dim, VectorType>::restrict_and_add_internal_comp(
           if (n_entries > 1)
             {
               if (n_entries <= typical_weights.size())
-                result *= typical_weights[n_entries - 1];
+                for (unsigned int c = 0; c < n_components; ++c)
+                  result[c] *= typical_weights[n_entries - 1];
               else
-                result /= Number(n_entries);
+                for (unsigned int c = 0; c < n_components; ++c)
+                  result[c] /= Number(n_entries);
             }
         }
-      evaluation_point_results[j] = result;
+
+      for (unsigned int i = ptr[j]; i < ptr[j + 1]; ++i)
+        {
+          const unsigned int index = recv_permutation_inv[i];
+
+          for (unsigned int c = 0; c < n_components; ++c)
+            evaluation_point_results[index * n_components + c] = result[c];
+        }
     }
 
   const auto evaluation_function = [&](const auto &values,
@@ -5107,6 +5146,8 @@ MGTwoLevelTransferNonNested<dim, VectorType>::restrict_and_add_internal_comp(
     FEPointEvaluation<n_components, dim, dim, Number> evaluator(*mapping_info,
                                                                 *fe_coarse);
 
+    const auto &send_permutation = rpe.get_send_permutation();
+
     for (unsigned int cell = 0; cell < cell_data.cells.size(); ++cell)
       {
         solution_values.resize(fe_coarse->n_dofs_per_cell());
@@ -5115,8 +5156,19 @@ MGTwoLevelTransferNonNested<dim, VectorType>::restrict_and_add_internal_comp(
         evaluator.reinit(cell);
 
         for (const auto q : evaluator.quadrature_point_indices())
-          evaluator.submit_value(
-            values[q + cell_data.reference_point_ptrs[cell]], q);
+          {
+            typename internal::FEPointEvaluation::
+              EvaluatorTypeTraits<dim, dim, n_components, Number>::value_type
+                value;
+
+            const unsigned int index =
+              send_permutation[q + cell_data.reference_point_ptrs[cell]];
+
+            for (unsigned int c = 0; c < n_components; ++c)
+              internal::access(value, c) = values[index * n_components + c];
+
+            evaluator.submit_value(value, q);
+          }
 
         evaluator.test_and_sum(solution_values, EvaluationFlags::values);
 
@@ -5136,9 +5188,8 @@ MGTwoLevelTransferNonNested<dim, VectorType>::restrict_and_add_internal_comp(
   };
 
   this->signals_non_nested.restriction(true);
-  rpe.template process_and_evaluate<value_type>(evaluation_point_results,
-                                                buffer,
-                                                evaluation_function);
+  rpe.template process_and_evaluate<Number, n_components>(
+    evaluation_point_results, buffer, evaluation_function, false);
   this->signals_non_nested.restriction(false);
 }
 
@@ -5174,7 +5225,7 @@ MGTwoLevelTransferNonNested<dim, VectorType>::interpolate(
 
 
 template <int dim, typename VectorType>
-void
+std::pair<bool, bool>
 MGTwoLevelTransferNonNested<dim, VectorType>::
   enable_inplace_operations_if_possible(
     const std::shared_ptr<const Utilities::MPI::Partitioner>
@@ -5182,7 +5233,7 @@ MGTwoLevelTransferNonNested<dim, VectorType>::
     const std::shared_ptr<const Utilities::MPI::Partitioner>
       &external_partitioner_fine)
 {
-  this->internal_enable_inplace_operations_if_possible(
+  return this->internal_enable_inplace_operations_if_possible(
     external_partitioner_coarse,
     external_partitioner_fine,
     this->vec_fine_needs_ghost_update,
index 76f6580d58a3c0896ca75dfbe1cefd035a506d4e..147dbf08c7b2f1c4e430107ce40973164bf24af8 100644 (file)
@@ -233,6 +233,31 @@ namespace Utilities
       cell_data->reference_point_ptrs.emplace_back(
         cell_data->reference_point_values.size());
 
+      unsigned int max_size_recv = 0;
+      for (unsigned int i = 0; i < recv_ranks.size(); ++i)
+        max_size_recv =
+          std::max(max_size_recv, recv_ptrs[i + 1] - recv_ptrs[i]);
+
+      unsigned int max_size_send = 0;
+      for (unsigned int i = 0; i < send_ranks.size(); ++i)
+        max_size_send =
+          std::max(max_size_send, send_ptrs[i + 1] - send_ptrs[i]);
+
+      this->buffer_size_with_sorting =
+        std::max(send_permutation.size() * 2 + max_size_recv,
+                 point_ptrs.back() + send_permutation.size() + max_size_send);
+
+      this->buffer_size_without_sorting = send_permutation.size();
+
+      // invert permutation matrices
+      recv_permutation_inv.resize(recv_permutation.size());
+      for (unsigned int c = 0; c < recv_permutation.size(); ++c)
+        recv_permutation_inv[recv_permutation[c]] = c;
+
+      send_permutation_inv.resize(send_permutation.size());
+      for (unsigned int c = 0; c < send_permutation.size(); ++c)
+        send_permutation_inv[send_permutation[c]] = c;
+
       this->ready_flag = true;
     }
 
@@ -354,6 +379,24 @@ namespace Utilities
       return ready_flag;
     }
 
+
+
+    template <int dim, int spacedim>
+    const std::vector<unsigned int> &
+    RemotePointEvaluation<dim, spacedim>::get_send_permutation() const
+    {
+      return send_permutation;
+    }
+
+
+
+    template <int dim, int spacedim>
+    const std::vector<unsigned int> &
+    RemotePointEvaluation<dim, spacedim>::get_inverse_recv_permutation() const
+    {
+      return recv_permutation_inv;
+    }
+
   } // end of namespace MPI
 } // end of namespace Utilities
 
index 84d1e4f96668ebbf942b1786da155660c3b2d98a..d2bc96003743edf3fb53f2acfa69a563986c5db8 100644 (file)
@@ -264,7 +264,7 @@ compute_force_vector_sharp_interface(
   const VectorType                &curvature_vector,
   VectorType                      &force_vector)
 {
-  using T = Tensor<1, spacedim, double>;
+  using T = double;
 
   const auto integration_points = [&]() {
     std::vector<Point<spacedim>> integration_points;
@@ -330,12 +330,10 @@ compute_force_vector_sharp_interface(
 
         for (const auto q : fe_eval_dim.quadrature_point_indices())
           {
-            T result;
             for (unsigned int i = 0; i < spacedim; ++i)
-              result[i] = -curvature_values[q] * normal_values[q][i] *
-                          fe_eval.JxW(q) * surface_tension;
-
-            integration_values.push_back(result);
+              integration_values.push_back(-curvature_values[q] *
+                                           normal_values[q][i] *
+                                           fe_eval.JxW(q) * surface_tension);
           }
       }
 
@@ -371,8 +369,9 @@ compute_force_vector_sharp_interface(
           cell_data.reference_point_ptrs[i + 1] -
             cell_data.reference_point_ptrs[i]);
 
-        const ArrayView<const T> force_JxW(
-          values.data() + cell_data.reference_point_ptrs[i],
+        const ArrayView<const Tensor<1, spacedim, T>> force_JxW(
+          reinterpret_cast<const Tensor<1, spacedim, T> *>(values.data()) +
+            cell_data.reference_point_ptrs[i],
           cell_data.reference_point_ptrs[i + 1] -
             cell_data.reference_point_ptrs[i]);
 
@@ -391,7 +390,9 @@ compute_force_vector_sharp_interface(
 
   std::vector<T> buffer;
 
-  eval.template process_and_evaluate<T>(integration_values, buffer, fu);
+  eval.template process_and_evaluate<T, spacedim>(integration_values,
+                                                  buffer,
+                                                  fu);
 }
 
 

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.