]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Introduce VectorDataExchange::Full and introduce 2nd communicator in MatrixFree 11098/head
authorPeter Munch <peterrmuench@gmail.com>
Sat, 24 Oct 2020 15:42:39 +0000 (17:42 +0200)
committerPeter Munch <peterrmuench@gmail.com>
Tue, 3 Nov 2020 12:52:15 +0000 (13:52 +0100)
include/deal.II/matrix_free/dof_info.h
include/deal.II/matrix_free/dof_info.templates.h
include/deal.II/matrix_free/matrix_free.h
include/deal.II/matrix_free/matrix_free.templates.h
include/deal.II/matrix_free/vector_data_exchange.h
source/matrix_free/vector_data_exchange.cc

index c51c6f2072a220ca2da1bb7552918321fc94dbcc..e1b1c5f271340af42b497a00d5bb90820311304d 100644 (file)
@@ -187,7 +187,9 @@ namespace internal
        * access to all vector entries.
        */
       void
-      assign_ghosts(const std::vector<unsigned int> &boundary_cells);
+      assign_ghosts(const std::vector<unsigned int> &boundary_cells,
+                    const MPI_Comm &                 communicator_sm,
+                    const bool use_vector_data_exchanger_full);
 
       /**
        * This method reorders the way cells are gone through based on a given
@@ -240,7 +242,9 @@ namespace internal
         const unsigned int                        n_lanes,
         const std::vector<FaceToCellTopology<1>> &inner_faces,
         const std::vector<FaceToCellTopology<1>> &ghosted_faces,
-        const bool                                fill_cell_centric);
+        const bool                                fill_cell_centric,
+        const MPI_Comm &                          communicator_sm,
+        const bool use_vector_data_exchanger_full);
 
       /**
        * Given @p cell_indices_contiguous_sm containing the local index of
index d4361183946a63f7beb09050d54ae906e6ca2d7f..aa71d3da0fced391650714f1d5fa6100aecff394 100644 (file)
@@ -382,7 +382,9 @@ namespace internal
 
 
     void
-    DoFInfo::assign_ghosts(const std::vector<unsigned int> &boundary_cells)
+    DoFInfo::assign_ghosts(const std::vector<unsigned int> &boundary_cells,
+                           const MPI_Comm &                 communicator_sm,
+                           const bool use_vector_data_exchanger_full)
     {
       Assert(boundary_cells.size() < row_starts.size(), ExcInternalError());
 
@@ -497,9 +499,14 @@ namespace internal
         const_cast<Utilities::MPI::Partitioner *>(vector_partitioner.get());
       vec_part->set_ghost_indices(ghost_indices);
 
-      vector_exchanger = std::make_shared<
-        internal::MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
-        vector_partitioner);
+      if (use_vector_data_exchanger_full == false)
+        vector_exchanger =
+          std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
+                             PartitionerWrapper>(vector_partitioner);
+      else
+        vector_exchanger = std::make_shared<
+          internal::MatrixFreeFunctions::VectorDataExchange::Full>(
+          vector_partitioner, communicator_sm);
     }
 
 
@@ -1173,7 +1180,9 @@ namespace internal
       const unsigned int                        n_lanes,
       const std::vector<FaceToCellTopology<1>> &inner_faces,
       const std::vector<FaceToCellTopology<1>> &ghosted_faces,
-      const bool                                fill_cell_centric)
+      const bool                                fill_cell_centric,
+      const MPI_Comm &                          communicator_sm,
+      const bool                                use_vector_data_exchanger_full)
     {
       const Utilities::MPI::Partitioner &part = *vector_partitioner;
 
@@ -1226,9 +1235,14 @@ namespace internal
               ->set_ghost_indices(compressed_set, part.ghost_indices());
           }
 
-        vector_exchanger_face_variants[0] =
-          std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
-                             PartitionerWrapper>(temp_0);
+        if (use_vector_data_exchanger_full == false)
+          vector_exchanger_face_variants[0] =
+            std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
+                               PartitionerWrapper>(temp_0);
+        else
+          vector_exchanger_face_variants[0] = std::make_shared<
+            internal::MatrixFreeFunctions::VectorDataExchange::Full>(
+            temp_0, communicator_sm);
       }
 
       // construct a numbering of faces
@@ -1467,18 +1481,36 @@ namespace internal
             part.locally_owned_range(), part.get_mpi_communicator());
         }
 
-      vector_exchanger_face_variants[1] = std::make_shared<
-        internal::MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
-        temp_1);
-      vector_exchanger_face_variants[2] = std::make_shared<
-        internal::MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
-        temp_2);
-      vector_exchanger_face_variants[3] = std::make_shared<
-        internal::MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
-        temp_3);
-      vector_exchanger_face_variants[4] = std::make_shared<
-        internal::MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
-        temp_4);
+      if (use_vector_data_exchanger_full == false)
+        {
+          vector_exchanger_face_variants[1] =
+            std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
+                               PartitionerWrapper>(temp_1);
+          vector_exchanger_face_variants[2] =
+            std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
+                               PartitionerWrapper>(temp_2);
+          vector_exchanger_face_variants[3] =
+            std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
+                               PartitionerWrapper>(temp_3);
+          vector_exchanger_face_variants[4] =
+            std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
+                               PartitionerWrapper>(temp_4);
+        }
+      else
+        {
+          vector_exchanger_face_variants[1] = std::make_shared<
+            internal::MatrixFreeFunctions::VectorDataExchange::Full>(
+            temp_1, communicator_sm);
+          vector_exchanger_face_variants[2] = std::make_shared<
+            internal::MatrixFreeFunctions::VectorDataExchange::Full>(
+            temp_2, communicator_sm);
+          vector_exchanger_face_variants[3] = std::make_shared<
+            internal::MatrixFreeFunctions::VectorDataExchange::Full>(
+            temp_3, communicator_sm);
+          vector_exchanger_face_variants[4] = std::make_shared<
+            internal::MatrixFreeFunctions::VectorDataExchange::Full>(
+            temp_4, communicator_sm);
+        }
     }
 
 
index f7433f60e6aca9af2e5cd8057d339327b9f1a9f2..7dadca5886381be04e40b4f6baf6056b940ad603 100644 (file)
@@ -244,6 +244,7 @@ public:
       , cell_vectorization_categories_strict(
           cell_vectorization_categories_strict)
       , communicator_sm(MPI_COMM_SELF)
+      , use_vector_data_exchanger_full(false)
     {}
 
     /**
@@ -270,6 +271,7 @@ public:
       , cell_vectorization_categories_strict(
           other.cell_vectorization_categories_strict)
       , communicator_sm(other.communicator_sm)
+      , use_vector_data_exchanger_full(other.use_vector_data_exchanger_full)
     {}
 
     // remove with level_mg_handler
@@ -299,7 +301,8 @@ public:
       cell_vectorization_category   = other.cell_vectorization_category;
       cell_vectorization_categories_strict =
         other.cell_vectorization_categories_strict;
-      communicator_sm = other.communicator_sm;
+      communicator_sm                = other.communicator_sm;
+      use_vector_data_exchanger_full = other.use_vector_data_exchanger_full;
 
       return *this;
     }
@@ -537,6 +540,13 @@ public:
      * Shared-memory MPI communicator. Default: MPI_COMM_SELF.
      */
     MPI_Comm communicator_sm;
+
+    /**
+     * Experimental: flag to switch between
+     * internal::MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper and
+     * internal::MatrixFreeFunctions::VectorDataExchange::Full.
+     */
+    bool use_vector_data_exchanger_full;
   };
 
   /**
@@ -3295,7 +3305,8 @@ namespace internal
 
           const auto &part = get_partitioner(mf_component);
 
-          if (part.n_ghost_indices() == 0 && part.n_import_indices() == 0)
+          if (part.n_ghost_indices() == 0 && part.n_import_indices() == 0 &&
+              part.n_import_sm_procs() == 0)
             return;
 
           tmp_data[component_in_block_vector] =
@@ -3305,7 +3316,7 @@ namespace internal
           AssertDimension(requests.size(), tmp_data.size());
 
           part.export_to_ghosted_array_start(
-            component_in_block_vector + channel_shift,
+            component_in_block_vector * 2 + channel_shift,
             ArrayView<const Number>(vec.begin(), part.local_size()),
             vec.shared_vector_data(),
             ArrayView<Number>(const_cast<Number *>(vec.begin()) +
@@ -3504,7 +3515,7 @@ namespace internal
 
           part.import_from_ghosted_array_start(
             dealii::VectorOperation::add,
-            component_in_block_vector + channel_shift,
+            component_in_block_vector * 2 + channel_shift,
             ArrayView<Number>(vec.begin(), part.local_size()),
             vec.shared_vector_data(),
             ArrayView<Number>(vec.begin() + part.local_size(),
@@ -3601,6 +3612,9 @@ namespace internal
                 tmp_data[component_in_block_vector]);
               tmp_data[component_in_block_vector] = nullptr;
             }
+
+          if (Utilities::MPI::job_supports_mpi())
+            MPI_Barrier(matrix_free.get_task_info().communicator_sm);
 #  endif
         }
     }
index 6933925da7fecf23f8e2c00fb78f4e6ad785de51..eee535d44969033305ed4e27298b75f5760cf16f 100644 (file)
@@ -338,6 +338,10 @@ MatrixFree<dim, Number, VectorizedArrayType>::internal_reinit(
             Utilities::MPI::this_mpi_process(task_info.communicator);
           task_info.n_procs =
             Utilities::MPI::n_mpi_processes(task_info.communicator);
+
+          Assert(additional_data.communicator_sm == MPI_COMM_SELF,
+                 ExcNotImplemented());
+
           task_info.communicator_sm = additional_data.communicator_sm;
         }
       else
@@ -679,7 +683,8 @@ namespace internal
     std::vector<std::pair<unsigned int, unsigned int>> &cell_level_index,
     std::vector<MatrixFreeFunctions::DoFInfo> &         dof_info,
     MatrixFreeFunctions::FaceSetup<dim> &               face_setup,
-    MatrixFreeFunctions::ConstraintValues<double> &     constraint_values)
+    MatrixFreeFunctions::ConstraintValues<double> &     constraint_values,
+    const bool use_vector_data_exchanger_full)
   {
     if (do_face_integrals)
       face_setup.initialize(dof_handler[0]->get_triangulation(),
@@ -784,10 +789,15 @@ namespace internal
           std::make_shared<Utilities::MPI::Partitioner>(locally_owned_dofs[no],
                                                         task_info.communicator);
 
-        dof_info[no].vector_exchanger =
-          std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
-                             PartitionerWrapper>(
-            dof_info[no].vector_partitioner);
+        if (use_vector_data_exchanger_full == false)
+          dof_info[no].vector_exchanger =
+            std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
+                               PartitionerWrapper>(
+              dof_info[no].vector_partitioner);
+        else
+          dof_info[no].vector_exchanger = std::make_shared<
+            internal::MatrixFreeFunctions::VectorDataExchange::Full>(
+            dof_info[no].vector_partitioner, task_info.communicator_sm);
 
         // initialize the arrays for indices
         const unsigned int n_components_total =
@@ -927,7 +937,9 @@ namespace internal
                             dof_info[no].ghost_dofs.push_back(dof_index);
                         }
             }
-          dof_info[no].assign_ghosts(cells_with_ghosts);
+          dof_info[no].assign_ghosts(cells_with_ghosts,
+                                     task_info.communicator_sm,
+                                     use_vector_data_exchanger_full);
         }
     }
 
@@ -1313,7 +1325,8 @@ MatrixFree<dim, Number, VectorizedArrayType>::initialize_indices(
     cell_level_index,
     dof_info,
     face_setup,
-    constraint_values);
+    constraint_values,
+    additional_data.use_vector_data_exchanger_full);
 
   // set constraint pool from the std::map and reorder the indices
   std::vector<const std::vector<double> *> constraints(
@@ -1469,7 +1482,9 @@ MatrixFree<dim, Number, VectorizedArrayType>::initialize_indices(
           VectorizedArrayType::size(),
           face_setup.inner_faces,
           face_setup.inner_ghost_faces,
-          is_fe_dg[count++] && additional_data.hold_all_faces_to_owned_cells);
+          is_fe_dg[count++] && additional_data.hold_all_faces_to_owned_cells,
+          task_info.communicator_sm,
+          additional_data.use_vector_data_exchanger_full);
     }
 
   for (auto &di : dof_info)
index 38d11aafa9f04a47685ea97c51ce9eb22c5fec49..d21efb9d21360fa83d9c610b4ac5c8fccd1aa493 100644 (file)
@@ -247,6 +247,269 @@ namespace internal
         const std::shared_ptr<const Utilities::MPI::Partitioner> partitioner;
       };
 
+
+
+      /**
+       * Similar to the above but using the internal data structures in the
+       * partitioner in order to identify indices of degrees of freedom that are
+       * in the same shared memory region.
+       */
+      class Full : public Base
+      {
+      public:
+        Full(
+          const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
+          const MPI_Comm &communicator_sm);
+
+        unsigned int
+        local_size() const override;
+
+        unsigned int
+        n_ghost_indices() const override;
+
+        unsigned int
+        n_import_indices() const override;
+
+        virtual unsigned int
+        n_import_sm_procs() const override;
+
+        virtual types::global_dof_index
+        size() const override;
+
+        const MPI_Comm &
+        get_sm_mpi_communicator() const;
+
+        void
+        export_to_ghosted_array_start(
+          const unsigned int                          communication_channel,
+          const ArrayView<const double> &             locally_owned_array,
+          const std::vector<ArrayView<const double>> &shared_arrays,
+          const ArrayView<double> &                   ghost_array,
+          const ArrayView<double> &                   temporary_storage,
+          std::vector<MPI_Request> &                  requests) const override;
+
+        void
+        export_to_ghosted_array_finish(
+          const ArrayView<const double> &             locally_owned_array,
+          const std::vector<ArrayView<const double>> &shared_arrays,
+          const ArrayView<double> &                   ghost_array,
+          std::vector<MPI_Request> &                  requests) const override;
+
+        void
+        import_from_ghosted_array_start(
+          const VectorOperation::values               vector_operation,
+          const unsigned int                          communication_channel,
+          const ArrayView<const double> &             locally_owned_array,
+          const std::vector<ArrayView<const double>> &shared_arrays,
+          const ArrayView<double> &                   ghost_array,
+          const ArrayView<double> &                   temporary_storage,
+          std::vector<MPI_Request> &                  requests) const override;
+
+        void
+        import_from_ghosted_array_finish(
+          const VectorOperation::values               vector_operation,
+          const ArrayView<double> &                   locally_owned_storage,
+          const std::vector<ArrayView<const double>> &shared_arrays,
+          const ArrayView<double> &                   ghost_array,
+          const ArrayView<const double> &             temporary_storage,
+          std::vector<MPI_Request> &                  requests) const override;
+
+        void
+        reset_ghost_values(const ArrayView<double> &ghost_array) const override;
+
+        void
+        export_to_ghosted_array_start(
+          const unsigned int                         communication_channel,
+          const ArrayView<const float> &             locally_owned_array,
+          const std::vector<ArrayView<const float>> &shared_arrays,
+          const ArrayView<float> &                   ghost_array,
+          const ArrayView<float> &                   temporary_storage,
+          std::vector<MPI_Request> &                 requests) const override;
+
+        void
+        export_to_ghosted_array_finish(
+          const ArrayView<const float> &             locally_owned_array,
+          const std::vector<ArrayView<const float>> &shared_arrays,
+          const ArrayView<float> &                   ghost_array,
+          std::vector<MPI_Request> &                 requests) const override;
+
+        void
+        import_from_ghosted_array_start(
+          const VectorOperation::values              vector_operation,
+          const unsigned int                         communication_channel,
+          const ArrayView<const float> &             locally_owned_array,
+          const std::vector<ArrayView<const float>> &shared_arrays,
+          const ArrayView<float> &                   ghost_array,
+          const ArrayView<float> &                   temporary_storage,
+          std::vector<MPI_Request> &                 requests) const override;
+
+        void
+        import_from_ghosted_array_finish(
+          const VectorOperation::values              vector_operation,
+          const ArrayView<float> &                   locally_owned_storage,
+          const std::vector<ArrayView<const float>> &shared_arrays,
+          const ArrayView<float> &                   ghost_array,
+          const ArrayView<const float> &             temporary_storage,
+          std::vector<MPI_Request> &                 requests) const override;
+
+        void
+        reset_ghost_values(const ArrayView<float> &ghost_array) const override;
+
+      private:
+        template <typename Number>
+        void
+        export_to_ghosted_array_start_impl(
+          const unsigned int                          communication_channel,
+          const ArrayView<const Number> &             locally_owned_array,
+          const std::vector<ArrayView<const Number>> &shared_arrays,
+          const ArrayView<Number> &                   ghost_array,
+          const ArrayView<Number> &                   temporary_storage,
+          std::vector<MPI_Request> &                  requests) const;
+
+        template <typename Number>
+        void
+        export_to_ghosted_array_finish_impl(
+          const ArrayView<const Number> &             locally_owned_array,
+          const std::vector<ArrayView<const Number>> &shared_arrays,
+          const ArrayView<Number> &                   ghost_array,
+          std::vector<MPI_Request> &                  requests) const;
+
+        template <typename Number>
+        void
+        import_from_ghosted_array_start_impl(
+          const VectorOperation::values               vector_operation,
+          const unsigned int                          communication_channel,
+          const ArrayView<const Number> &             locally_owned_array,
+          const std::vector<ArrayView<const Number>> &shared_arrays,
+          const ArrayView<Number> &                   ghost_array,
+          const ArrayView<Number> &                   temporary_storage,
+          std::vector<MPI_Request> &                  requests) const;
+
+        template <typename Number>
+        void
+        import_from_ghosted_array_finish_impl(
+          const VectorOperation::values               vector_operation,
+          const ArrayView<Number> &                   locally_owned_storage,
+          const std::vector<ArrayView<const Number>> &shared_arrays,
+          const ArrayView<Number> &                   ghost_array,
+          const ArrayView<const Number> &             temporary_storage,
+          std::vector<MPI_Request> &                  requests) const;
+
+        template <typename Number>
+        void
+        reset_ghost_values_impl(const ArrayView<Number> &ghost_array) const;
+
+      private:
+        /**
+         * Global communicator.
+         */
+        const MPI_Comm comm;
+
+        /**
+         * Shared-memory sub-communicator.
+         */
+        const MPI_Comm comm_sm;
+
+        /**
+         * Number of locally-owned vector entries.
+         */
+        const unsigned int n_local_elements;
+
+        /**
+         * Number of ghost vector entries.
+         */
+        const unsigned int n_ghost_elements;
+
+        /**
+         * Number of global vector entries.
+         */
+        const types::global_dof_index n_global_elements;
+
+        /**
+         * A variable caching the number of ghost indices in a larger set of
+         * indices by rank.
+         */
+        std::vector<unsigned int> n_ghost_indices_in_larger_set_by_remote_rank;
+
+        /**
+         * The set of indices that appear for an IndexSet that is a subset of a
+         * larger set for each rank in a compressed manner.
+         */
+        std::pair<std::vector<unsigned int>,
+                  std::vector<std::pair<unsigned int, unsigned int>>>
+          ghost_indices_subset_data;
+
+        /**
+         * An array that contains information which processors my ghost indices
+         * belong to, at which offset and how many those indices are
+         */
+        std::vector<std::array<unsigned int, 3>> ghost_targets_data;
+
+        /**
+         * The set of processors and length of data field which send us their
+         * ghost data.
+         *
+         * @note Structured as ghost_targets_data.
+         */
+        std::vector<std::array<unsigned int, 3>> import_targets_data;
+
+        /**
+         * An array that caches the number of chunks in the import indices per
+         * MPI rank. The length is import_indices_data.size()+1.
+         *
+         * The set of (local) indices that we are importing during compress()
+         * from remote processes, i.e., others' ghosts that belong to the local
+         * range.
+         */
+        std::pair<std::vector<unsigned int>,
+                  std::vector<std::pair<unsigned int, unsigned int>>>
+          import_indices_data;
+
+        /**
+         * Shared-memory ranks from which data is copied from during
+         * export_to_ghosted_array_finish().
+         */
+        std::vector<unsigned int> sm_ghost_ranks;
+
+        /**
+         * Indices from where to copy data from during
+         * export_to_ghosted_array_finish().
+         */
+        std::pair<std::vector<unsigned int>,
+                  std::vector<std::pair<unsigned int, unsigned int>>>
+          sm_export_data;
+
+        /**
+         * Indices where to copy data to during
+         * export_to_ghosted_array_finish().
+         */
+        std::pair<std::vector<unsigned int>,
+                  std::vector<std::pair<unsigned int, unsigned int>>>
+          sm_export_data_this;
+
+        /**
+         * Shared-memory ranks from where to copy data from during
+         * import_from_ghosted_array_finish().
+         */
+        std::vector<unsigned int> sm_import_ranks;
+
+        /**
+         * Indices from where to copy data from during
+         * import_from_ghosted_array_finish().
+         */
+        std::pair<std::vector<unsigned int>,
+                  std::vector<std::pair<unsigned int, unsigned int>>>
+          sm_import_data;
+
+        /**
+         * Indices where to copy data to during
+         * import_from_ghosted_array_finish().
+         */
+        std::pair<std::vector<unsigned int>,
+                  std::vector<std::pair<unsigned int, unsigned int>>>
+          sm_import_data_this;
+      };
+
     } // namespace VectorDataExchange
   }   // end of namespace MatrixFreeFunctions
 } // end of namespace internal
index 2193d304170354544f8304cd45e05a4fe35ccae3..f8ab1fe9b1102a0b0438d761bc880cdb4409d0bd 100644 (file)
@@ -305,6 +305,8 @@ namespace internal
         reset_ghost_values_impl(ghost_array);
       }
 
+
+
       template <typename Number>
       void
       PartitionerWrapper::reset_ghost_values_impl(
@@ -316,6 +318,1049 @@ namespace internal
             ghost_array[j] = 0.;
       }
 
+
+
+      namespace internal
+      {
+        std::pair<std::vector<unsigned int>,
+                  std::vector<std::pair<unsigned int, unsigned int>>>
+        compress_to_contiguous_ranges(
+          const std::vector<unsigned int> &sm_export_ptr,
+          const std::vector<unsigned int> &sm_export_indices)
+        {
+          std::vector<unsigned int> recv_ptr = {0};
+          std::vector<unsigned int> recv_indices;
+          std::vector<unsigned int> recv_len;
+
+          for (unsigned int i = 0; i + 1 < sm_export_ptr.size(); i++)
+            {
+              if (sm_export_ptr[i] != sm_export_ptr[i + 1])
+                {
+                  recv_indices.push_back(sm_export_indices[sm_export_ptr[i]]);
+                  recv_len.push_back(1);
+
+                  for (unsigned int j = sm_export_ptr[i] + 1;
+                       j < sm_export_ptr[i + 1];
+                       j++)
+                    if (recv_indices.back() + recv_len.back() !=
+                        sm_export_indices[j])
+                      {
+                        recv_indices.push_back(sm_export_indices[j]);
+                        recv_len.push_back(1);
+                      }
+                    else
+                      recv_len.back()++;
+                }
+              recv_ptr.push_back(recv_indices.size());
+            }
+
+          std::pair<std::vector<unsigned int>,
+                    std::vector<std::pair<unsigned int, unsigned int>>>
+            result;
+
+          result.first = recv_ptr;
+
+          for (unsigned int i = 0; i < recv_indices.size(); ++i)
+            result.second.emplace_back(recv_indices[i], recv_len[i]);
+
+          return result;
+        }
+
+      } // namespace internal
+
+
+
+      Full::Full(
+        const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
+        const MPI_Comm &communicator_sm)
+        : comm(partitioner->get_mpi_communicator())
+        , comm_sm(communicator_sm)
+        , n_local_elements(partitioner->locally_owned_range().n_elements())
+        , n_ghost_elements(partitioner->ghost_indices().n_elements())
+        , n_global_elements(partitioner->locally_owned_range().size())
+      {
+#ifndef DEAL_II_WITH_MPI
+        Assert(false, ExcNeedsMPI());
+#else
+        if (Utilities::MPI::job_supports_mpi() == false)
+          return; // nothing to do in serial case
+
+        const auto &is_locally_owned = partitioner->locally_owned_range();
+        const auto &is_locally_ghost = partitioner->ghost_indices();
+        const auto &ghost_indices_within_larger_ghost_set =
+          partitioner->ghost_indices_within_larger_ghost_set();
+
+        // temporal data strucutures
+        std::vector<unsigned int> n_ghost_indices_in_larger_set_by_remote_rank;
+
+        std::vector<std::array<unsigned int, 3>> ghost_targets_data;
+
+        std::vector<std::array<unsigned int, 3>> import_targets_data;
+
+        std::vector<unsigned int> sm_ghost_ranks;
+
+        std::vector<unsigned int> sm_import_ranks;
+
+        // temporary uncompressed data structures for ghost_indices_subset_data
+        std::vector<unsigned int> ghost_indices_subset_data_ptr = {0};
+        std::vector<unsigned int> ghost_indices_subset_data_indices;
+
+        // ... for import_indices_data
+        std::vector<unsigned int> import_indices_data_ptr = {0};
+        std::vector<unsigned int> import_indices_data_indices;
+
+        // ... for sm_export_data
+        std::vector<unsigned int> sm_export_data_ptr = {0};
+        std::vector<unsigned int> sm_export_data_indices;
+
+        // ... for sm_export_data_this
+        std::vector<unsigned int> sm_export_data_this_ptr = {0};
+        std::vector<unsigned int> sm_export_data_this_indices;
+
+        // ... for sm_import_data
+        std::vector<unsigned int> sm_import_data_ptr = {};
+        std::vector<unsigned int> sm_import_data_indices;
+
+        // ... for sm_import_data_this
+        std::vector<unsigned int> sm_import_data_this_ptr = {0};
+        std::vector<unsigned int> sm_import_data_this_indices;
+
+        // collect ranks of processes of shared-memory domain
+        const auto sm_ranks = [&]() {
+          std::vector<unsigned int> sm_ranks(
+            Utilities::MPI::n_mpi_processes(comm_sm));
+
+          const unsigned int rank = Utilities::MPI::this_mpi_process(comm);
+
+          MPI_Allgather(
+            &rank, 1, MPI_UNSIGNED, sm_ranks.data(), 1, MPI_UNSIGNED, comm_sm);
+
+          return sm_ranks;
+        }();
+
+        // determine owners of ghost indices and determine requesters
+        std::vector<unsigned int> owning_ranks_of_ghosts(
+          is_locally_ghost.n_elements());
+
+        Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmsPayload
+          process(is_locally_owned,
+                  is_locally_ghost,
+                  comm,
+                  owning_ranks_of_ghosts,
+                  /*track_index_requests = */ true);
+
+        Utilities::MPI::ConsensusAlgorithms::Selector<
+          std::pair<types::global_dof_index, types::global_dof_index>,
+          unsigned int>
+          consensus_algorithm(process, comm);
+        consensus_algorithm.run();
+
+        // decompress ghost_indices_within_larger_ghost_set for simpler
+        // data access during setup
+        std::vector<unsigned int> shifts_indices;
+        for (const auto &pair : ghost_indices_within_larger_ghost_set)
+          for (unsigned int k = pair.first; k < pair.second; ++k)
+            shifts_indices.push_back(k);
+
+        // process ghost indices
+        {
+          // collect ghost indices according to owning rank
+          std::map<unsigned int, std::vector<types::global_dof_index>>
+            rank_to_local_indices;
+
+          for (unsigned int i = 0; i < owning_ranks_of_ghosts.size(); i++)
+            rank_to_local_indices[owning_ranks_of_ghosts[i]].push_back(i);
+
+          unsigned int compressed_offset = 0;
+
+          for (const auto &rank_and_local_indices : rank_to_local_indices)
+            {
+              const auto sm_ranks_ptr = std::find(sm_ranks.begin(),
+                                                  sm_ranks.end(),
+                                                  rank_and_local_indices.first);
+
+              if (sm_ranks_ptr == sm_ranks.end()) // remote process
+                {
+                  ghost_targets_data.emplace_back(std::array<unsigned int, 3>{{
+                    rank_and_local_indices.first,      // rank
+                    shifts_indices[compressed_offset], // offset
+                    static_cast<unsigned int>(
+                      rank_and_local_indices.second.size()) // length
+                  }});
+
+                  for (unsigned int i = 0;
+                       i < rank_and_local_indices.second.size();
+                       ++i)
+                    ghost_indices_subset_data_indices.push_back(
+                      shifts_indices[i + compressed_offset]);
+
+                  ghost_indices_subset_data_ptr.push_back(
+                    ghost_indices_subset_data_indices.size());
+
+                  ghost_indices_subset_data.first.push_back(compressed_offset);
+
+                  unsigned int i =
+                    n_ghost_indices_in_larger_set_by_remote_rank.size();
+
+                  n_ghost_indices_in_larger_set_by_remote_rank.push_back(
+                    (shifts_indices[ghost_indices_subset_data.first[i] +
+                                    (ghost_targets_data[i][2] - 1)] -
+                     shifts_indices[ghost_indices_subset_data.first[i]]) +
+                    1);
+                }
+              else // shared process
+                {
+                  sm_ghost_ranks.push_back(
+                    std::distance(sm_ranks.begin(), sm_ranks_ptr));
+
+                  sm_export_data_ptr.push_back(
+                    sm_export_data_ptr.back() +
+                    rank_and_local_indices.second.size());
+
+                  for (unsigned int i = compressed_offset;
+                       i <
+                       rank_and_local_indices.second.size() + compressed_offset;
+                       ++i)
+                    sm_export_data_this_indices.push_back(
+                      shifts_indices[i] + is_locally_owned.n_elements());
+
+                  sm_export_data_this_ptr.push_back(
+                    sm_export_data_this_indices.size());
+                }
+              compressed_offset += rank_and_local_indices.second.size();
+            }
+
+          sm_export_data_indices.resize(sm_export_data_ptr.back());
+        }
+
+        // process requesters
+        {
+          const auto rank_to_global_indices = process.get_requesters();
+
+          for (const auto &rank_and_global_indices : rank_to_global_indices)
+            {
+              const auto sm_ranks_ptr =
+                std::find(sm_ranks.begin(),
+                          sm_ranks.end(),
+                          rank_and_global_indices.first);
+
+              if (sm_ranks_ptr == sm_ranks.end()) // remote process
+                {
+                  import_targets_data.emplace_back(std::array<unsigned int, 3>{{
+                    rank_and_global_indices.first, // rank
+                    static_cast<unsigned int>(
+                      import_indices_data_indices.size()), // offset
+                    static_cast<unsigned int>(
+                      rank_and_global_indices.second.n_elements()) // length
+                  }});
+
+                  for (const auto i : rank_and_global_indices.second)
+                    import_indices_data_indices.push_back(
+                      is_locally_owned.index_within_set(i));
+
+                  import_indices_data_ptr.push_back(
+                    import_indices_data_indices.size());
+                }
+              else // shared process
+                {
+                  sm_import_ranks.push_back(
+                    std::distance(sm_ranks.begin(), sm_ranks_ptr));
+
+                  for (const auto i : rank_and_global_indices.second)
+                    sm_import_data_this_indices.push_back(
+                      is_locally_owned.index_within_set(i));
+
+                  sm_import_data_this_ptr.push_back(
+                    sm_import_data_this_indices.size());
+                }
+            }
+
+          sm_import_data_ptr = sm_import_data_this_ptr;
+          sm_import_data_indices.resize(sm_import_data_this_ptr.back());
+        }
+
+        // send sm_export_data_this to sm-neighbor -> sm_import_data
+        {
+          std::vector<MPI_Request> requests(sm_ghost_ranks.size() +
+                                            sm_import_ranks.size());
+
+          for (unsigned int i = 0; i < sm_ghost_ranks.size(); i++)
+            MPI_Isend(sm_export_data_this_indices.data() +
+                        sm_export_data_this_ptr[i],
+                      sm_export_data_this_ptr[i + 1] -
+                        sm_export_data_this_ptr[i],
+                      MPI_UNSIGNED,
+                      sm_ghost_ranks[i],
+                      4,
+                      comm_sm,
+                      requests.data() + i);
+
+          for (unsigned int i = 0; i < sm_import_ranks.size(); i++)
+            MPI_Irecv(sm_import_data_indices.data() + sm_import_data_ptr[i],
+                      sm_import_data_ptr[i + 1] - sm_import_data_ptr[i],
+                      MPI_UNSIGNED,
+                      sm_import_ranks[i],
+                      4,
+                      comm_sm,
+                      requests.data() + sm_ghost_ranks.size() + i);
+
+          MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
+        }
+
+        // send sm_import_data_this to sm-neighbor -> sm_export_data_indices
+        {
+          std::vector<MPI_Request> requests(sm_import_ranks.size() +
+                                            sm_ghost_ranks.size());
+
+          for (unsigned int i = 0; i < sm_import_ranks.size(); i++)
+            MPI_Isend(sm_import_data_this_indices.data() +
+                        sm_import_data_this_ptr[i],
+                      sm_import_data_this_ptr[i + 1] -
+                        sm_import_data_this_ptr[i],
+                      MPI_UNSIGNED,
+                      sm_import_ranks[i],
+                      2,
+                      comm_sm,
+                      requests.data() + i);
+
+          for (unsigned int i = 0; i < sm_ghost_ranks.size(); i++)
+            MPI_Irecv(sm_export_data_indices.data() + sm_export_data_ptr[i],
+                      sm_export_data_ptr[i + 1] - sm_export_data_ptr[i],
+                      MPI_UNSIGNED,
+                      sm_ghost_ranks[i],
+                      2,
+                      comm_sm,
+                      requests.data() + sm_import_ranks.size() + i);
+
+          MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
+        }
+
+        // store data structures and, if needed, compress them
+        this->n_ghost_indices_in_larger_set_by_remote_rank =
+          n_ghost_indices_in_larger_set_by_remote_rank;
+
+        this->ghost_indices_subset_data =
+          internal::compress_to_contiguous_ranges(
+            ghost_indices_subset_data_ptr, ghost_indices_subset_data_indices);
+
+        this->ghost_targets_data = ghost_targets_data;
+
+        this->import_targets_data = import_targets_data;
+
+        this->import_indices_data =
+          internal::compress_to_contiguous_ranges(import_indices_data_ptr,
+                                                  import_indices_data_indices);
+
+        this->sm_ghost_ranks = sm_ghost_ranks;
+
+        this->sm_export_data =
+          internal::compress_to_contiguous_ranges(sm_export_data_ptr,
+                                                  sm_export_data_indices);
+
+        this->sm_export_data_this =
+          internal::compress_to_contiguous_ranges(sm_export_data_this_ptr,
+                                                  sm_export_data_this_indices);
+
+        this->sm_import_ranks = sm_import_ranks;
+
+        this->sm_import_data =
+          internal::compress_to_contiguous_ranges(sm_import_data_ptr,
+                                                  sm_import_data_indices);
+
+        this->sm_import_data_this =
+          internal::compress_to_contiguous_ranges(sm_import_data_this_ptr,
+                                                  sm_import_data_this_indices);
+
+#endif
+      }
+
+
+
+      void
+      Full::export_to_ghosted_array_start(
+        const unsigned int                          communication_channel,
+        const ArrayView<const double> &             locally_owned_array,
+        const std::vector<ArrayView<const double>> &shared_arrays,
+        const ArrayView<double> &                   ghost_array,
+        const ArrayView<double> &                   temporary_storage,
+        std::vector<MPI_Request> &                  requests) const
+      {
+        export_to_ghosted_array_start_impl(communication_channel,
+                                           locally_owned_array,
+                                           shared_arrays,
+                                           ghost_array,
+                                           temporary_storage,
+                                           requests);
+      }
+
+
+
+      void
+      Full::export_to_ghosted_array_finish(
+        const ArrayView<const double> &             locally_owned_array,
+        const std::vector<ArrayView<const double>> &shared_arrays,
+        const ArrayView<double> &                   ghost_array,
+        std::vector<MPI_Request> &                  requests) const
+      {
+        export_to_ghosted_array_finish_impl(locally_owned_array,
+                                            shared_arrays,
+                                            ghost_array,
+                                            requests);
+      }
+
+
+
+      void
+      Full::import_from_ghosted_array_start(
+        const VectorOperation::values               vector_operation,
+        const unsigned int                          communication_channel,
+        const ArrayView<const double> &             locally_owned_array,
+        const std::vector<ArrayView<const double>> &shared_arrays,
+        const ArrayView<double> &                   ghost_array,
+        const ArrayView<double> &                   temporary_storage,
+        std::vector<MPI_Request> &                  requests) const
+      {
+        import_from_ghosted_array_start_impl(vector_operation,
+                                             communication_channel,
+                                             locally_owned_array,
+                                             shared_arrays,
+                                             ghost_array,
+                                             temporary_storage,
+                                             requests);
+      }
+
+
+
+      void
+      Full::import_from_ghosted_array_finish(
+        const VectorOperation::values               vector_operation,
+        const ArrayView<double> &                   locally_owned_storage,
+        const std::vector<ArrayView<const double>> &shared_arrays,
+        const ArrayView<double> &                   ghost_array,
+        const ArrayView<const double> &             temporary_storage,
+        std::vector<MPI_Request> &                  requests) const
+      {
+        import_from_ghosted_array_finish_impl(vector_operation,
+                                              locally_owned_storage,
+                                              shared_arrays,
+                                              ghost_array,
+                                              temporary_storage,
+                                              requests);
+      }
+
+
+
+      void
+      Full::export_to_ghosted_array_start(
+        const unsigned int                         communication_channel,
+        const ArrayView<const float> &             locally_owned_array,
+        const std::vector<ArrayView<const float>> &shared_arrays,
+        const ArrayView<float> &                   ghost_array,
+        const ArrayView<float> &                   temporary_storage,
+        std::vector<MPI_Request> &                 requests) const
+      {
+        export_to_ghosted_array_start_impl(communication_channel,
+                                           locally_owned_array,
+                                           shared_arrays,
+                                           ghost_array,
+                                           temporary_storage,
+                                           requests);
+      }
+
+
+
+      void
+      Full::export_to_ghosted_array_finish(
+        const ArrayView<const float> &             locally_owned_array,
+        const std::vector<ArrayView<const float>> &shared_arrays,
+        const ArrayView<float> &                   ghost_array,
+        std::vector<MPI_Request> &                 requests) const
+      {
+        export_to_ghosted_array_finish_impl(locally_owned_array,
+                                            shared_arrays,
+                                            ghost_array,
+                                            requests);
+      }
+
+
+
+      void
+      Full::import_from_ghosted_array_start(
+        const VectorOperation::values              vector_operation,
+        const unsigned int                         communication_channel,
+        const ArrayView<const float> &             locally_owned_array,
+        const std::vector<ArrayView<const float>> &shared_arrays,
+        const ArrayView<float> &                   ghost_array,
+        const ArrayView<float> &                   temporary_storage,
+        std::vector<MPI_Request> &                 requests) const
+      {
+        import_from_ghosted_array_start_impl(vector_operation,
+                                             communication_channel,
+                                             locally_owned_array,
+                                             shared_arrays,
+                                             ghost_array,
+                                             temporary_storage,
+                                             requests);
+      }
+
+
+
+      void
+      Full::import_from_ghosted_array_finish(
+        const VectorOperation::values              vector_operation,
+        const ArrayView<float> &                   locally_owned_storage,
+        const std::vector<ArrayView<const float>> &shared_arrays,
+        const ArrayView<float> &                   ghost_array,
+        const ArrayView<const float> &             temporary_storage,
+        std::vector<MPI_Request> &                 requests) const
+      {
+        import_from_ghosted_array_finish_impl(vector_operation,
+                                              locally_owned_storage,
+                                              shared_arrays,
+                                              ghost_array,
+                                              temporary_storage,
+                                              requests);
+      }
+
+
+
+      template <typename Number>
+      void
+      Full::export_to_ghosted_array_start_impl(
+        const unsigned int                          communication_channel,
+        const ArrayView<const Number> &             data_this,
+        const std::vector<ArrayView<const Number>> &data_others,
+        const ArrayView<Number> &                   buffer,
+        const ArrayView<Number> &                   temporary_storage,
+        std::vector<MPI_Request> &                  requests) const
+      {
+#ifndef DEAL_II_WITH_MPI
+        Assert(false, ExcNeedsMPI());
+
+        (void)communication_channel;
+        (void)data_this;
+        (void)data_others;
+        (void)buffer;
+        (void)temporary_storage;
+        (void)requests;
+#else
+        (void)data_others;
+
+        requests.resize(sm_import_ranks.size() + sm_ghost_ranks.size() +
+                        ghost_targets_data.size() + import_targets_data.size());
+
+        int dummy;
+        // receive a signal that relevant sm neighbors are ready
+        for (unsigned int i = 0; i < sm_ghost_ranks.size(); i++)
+          MPI_Irecv(&dummy,
+                    0,
+                    MPI_INT,
+                    sm_ghost_ranks[i],
+                    communication_channel + 0,
+                    comm_sm,
+                    requests.data() + sm_import_ranks.size() + i);
+
+        // signal to all relevant sm neighbors that this process is ready
+        for (unsigned int i = 0; i < sm_import_ranks.size(); i++)
+          MPI_Isend(&dummy,
+                    0,
+                    MPI_INT,
+                    sm_import_ranks[i],
+                    communication_channel + 0,
+                    comm_sm,
+                    requests.data() + i);
+
+        // receive data from remote processes
+        for (unsigned int i = 0; i < ghost_targets_data.size(); i++)
+          {
+            const unsigned int offset =
+              n_ghost_indices_in_larger_set_by_remote_rank[i] -
+              ghost_targets_data[i][2];
+
+            MPI_Irecv(buffer.data() + ghost_targets_data[i][1] + offset,
+                      ghost_targets_data[i][2],
+                      Utilities::MPI::internal::mpi_type_id(buffer.data()),
+                      ghost_targets_data[i][0],
+                      communication_channel + 1,
+                      comm,
+                      requests.data() + sm_import_ranks.size() +
+                        sm_ghost_ranks.size() + i);
+          }
+
+        // send data to remote processes
+        for (unsigned int i = 0, k = 0; i < import_targets_data.size(); i++)
+          {
+            for (unsigned int j = import_indices_data.first[i];
+                 j < import_indices_data.first[i + 1];
+                 j++)
+              for (unsigned int l = 0; l < import_indices_data.second[j].second;
+                   l++, k++)
+                temporary_storage[k] =
+                  data_this[import_indices_data.second[j].first + l];
+
+            // send data away
+            MPI_Isend(temporary_storage.data() + import_targets_data[i][1],
+                      import_targets_data[i][2],
+                      Utilities::MPI::internal::mpi_type_id(data_this.data()),
+                      import_targets_data[i][0],
+                      communication_channel + 1,
+                      comm,
+                      requests.data() + sm_import_ranks.size() +
+                        sm_ghost_ranks.size() + ghost_targets_data.size() + i);
+          }
+#endif
+      }
+
+
+
+      template <typename Number>
+      void
+      Full::export_to_ghosted_array_finish_impl(
+        const ArrayView<const Number> &             data_this,
+        const std::vector<ArrayView<const Number>> &data_others,
+        const ArrayView<Number> &                   ghost_array,
+        std::vector<MPI_Request> &                  requests) const
+      {
+        (void)data_this;
+
+#ifndef DEAL_II_WITH_MPI
+        Assert(false, ExcNeedsMPI());
+
+        (void)data_others;
+        (void)ghost_array;
+        (void)requests;
+#else
+
+        AssertDimension(requests.size(),
+                        sm_import_ranks.size() + sm_ghost_ranks.size() +
+                          ghost_targets_data.size() +
+                          import_targets_data.size());
+
+        const auto split =
+          [&](const unsigned int i) -> std::pair<unsigned int, unsigned int> {
+          AssertIndexRange(i,
+                           (sm_ghost_ranks.size() + ghost_targets_data.size()));
+
+          if (i < sm_ghost_ranks.size())
+            return {0, i};
+          else
+            return {1, i - sm_ghost_ranks.size()};
+        };
+
+        for (unsigned int c = 0;
+             c < sm_ghost_ranks.size() + ghost_targets_data.size();
+             c++)
+          {
+            int i;
+            MPI_Waitany(sm_ghost_ranks.size() + ghost_targets_data.size(),
+                        requests.data() + sm_import_ranks.size(),
+                        &i,
+                        MPI_STATUS_IGNORE);
+
+            const auto s = split(i);
+            i            = s.second;
+
+            if (s.first == 0)
+              {
+                const Number *DEAL_II_RESTRICT data_others_ptr =
+                  data_others[sm_ghost_ranks[i]].data();
+                Number *DEAL_II_RESTRICT data_this_ptr = ghost_array.data();
+
+                for (unsigned int lo = sm_export_data.first[i],
+                                  ko = sm_export_data_this.first[i],
+                                  li = 0,
+                                  ki = 0;
+                     (lo < sm_export_data.first[i + 1]) &&
+                     (ko < sm_export_data_this.first[i + 1]);)
+                  {
+                    for (; (li < sm_export_data.second[lo].second) &&
+                           (ki < sm_export_data_this.second[ko].second);
+                         ++li, ++ki)
+                      data_this_ptr[sm_export_data_this.second[ko].first + ki -
+                                    n_local_elements] =
+                        data_others_ptr[sm_export_data.second[lo].first + li];
+
+                    if (li == sm_export_data.second[lo].second)
+                      {
+                        lo++;   // increment outer counter
+                        li = 0; // reset inner counter
+                      }
+
+                    if (ki == sm_export_data_this.second[ko].second)
+                      {
+                        ko++;   // increment outer counter
+                        ki = 0; // reset inner counter
+                      }
+                  }
+              }
+            else /*if(s.second == 1)*/
+              {
+                const unsigned int offset =
+                  n_ghost_indices_in_larger_set_by_remote_rank[i] -
+                  ghost_targets_data[i][2];
+
+                for (unsigned int c  = 0,
+                                  ko = ghost_indices_subset_data.first[i],
+                                  ki = 0;
+                     c < ghost_targets_data[i][2];
+                     ++c)
+                  {
+                    AssertIndexRange(ko,
+                                     ghost_indices_subset_data.second.size());
+
+                    const unsigned int idx_1 =
+                      ghost_indices_subset_data.second[ko].first + ki;
+                    const unsigned int idx_2 =
+                      ghost_targets_data[i][1] + c + offset;
+
+                    AssertIndexRange(idx_1, ghost_array.size());
+                    AssertIndexRange(idx_2, ghost_array.size());
+
+                    if (idx_1 == idx_2)
+                      {
+                        // noting to do
+                      }
+                    else if (idx_1 < idx_2)
+                      {
+                        ghost_array[idx_1] = ghost_array[idx_2];
+                        ghost_array[idx_2] = 0.0;
+                      }
+                    else
+                      {
+                        Assert(false, ExcNotImplemented());
+                      }
+
+                    ++ki;
+
+                    if (ki == ghost_indices_subset_data.second[ko].second)
+                      {
+                        ko++;   // increment outer counter
+                        ki = 0; // reset inner counter
+                      }
+                  }
+              }
+          }
+
+        MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
+#endif
+      }
+
+
+
+      template <typename Number>
+      void
+      Full::import_from_ghosted_array_start_impl(
+        const VectorOperation::values               operation,
+        const unsigned int                          communication_channel,
+        const ArrayView<const Number> &             data_this,
+        const std::vector<ArrayView<const Number>> &data_others,
+        const ArrayView<Number> &                   buffer,
+        const ArrayView<Number> &                   temporary_storage,
+        std::vector<MPI_Request> &                  requests) const
+      {
+        (void)data_this;
+
+#ifndef DEAL_II_WITH_MPI
+        Assert(false, ExcNeedsMPI());
+
+        (void)operation;
+        (void)communication_channel;
+        (void)data_others;
+        (void)buffer;
+        (void)temporary_storage;
+        (void)requests;
+#else
+        // return;
+
+        (void)data_others;
+        (void)operation;
+
+        Assert(operation == dealii::VectorOperation::add, ExcNotImplemented());
+
+        requests.resize(sm_ghost_ranks.size() + sm_import_ranks.size() +
+                        ghost_targets_data.size() + import_targets_data.size());
+
+        int dummy;
+        for (unsigned int i = 0; i < sm_ghost_ranks.size(); i++)
+          MPI_Isend(&dummy,
+                    0,
+                    MPI_INT,
+                    sm_ghost_ranks[i],
+                    communication_channel + 1,
+                    comm_sm,
+                    requests.data() + i);
+
+        for (unsigned int i = 0; i < sm_import_ranks.size(); i++)
+          MPI_Irecv(&dummy,
+                    0,
+                    MPI_INT,
+                    sm_import_ranks[i],
+                    communication_channel + 1,
+                    comm_sm,
+                    requests.data() + sm_ghost_ranks.size() + i);
+
+        for (unsigned int i = 0; i < ghost_targets_data.size(); i++)
+          {
+            for (unsigned int c  = 0,
+                              ko = ghost_indices_subset_data.first[i],
+                              ki = 0;
+                 c < ghost_targets_data[i][2];
+                 ++c)
+              {
+                AssertIndexRange(ko, ghost_indices_subset_data.second.size());
+
+                const unsigned int idx_1 =
+                  ghost_indices_subset_data.second[ko].first + ki;
+                const unsigned int idx_2 = ghost_targets_data[i][1] + c;
+
+                AssertIndexRange(idx_1, buffer.size());
+                AssertIndexRange(idx_2, buffer.size());
+
+                if (idx_1 == idx_2)
+                  {
+                    // nothing to do
+                  }
+                else if (idx_2 < idx_1)
+                  {
+                    buffer[idx_2] = buffer[idx_1];
+                    buffer[idx_1] = 0.0;
+                  }
+                else
+                  {
+                    Assert(false, ExcNotImplemented());
+                  }
+
+                if (++ki == ghost_indices_subset_data.second[ko].second)
+                  {
+                    ko++;   // increment outer counter
+                    ki = 0; // reset inner counter
+                  }
+              }
+
+            MPI_Isend(buffer.data() + ghost_targets_data[i][1],
+                      ghost_targets_data[i][2],
+                      Utilities::MPI::internal::mpi_type_id(buffer.data()),
+                      ghost_targets_data[i][0],
+                      communication_channel + 0,
+                      comm,
+                      requests.data() + sm_ghost_ranks.size() +
+                        sm_import_ranks.size() + i);
+          }
+
+        for (unsigned int i = 0; i < import_targets_data.size(); i++)
+          MPI_Irecv(temporary_storage.data() + import_targets_data[i][1],
+                    import_targets_data[i][2],
+                    Utilities::MPI::internal::mpi_type_id(
+                      temporary_storage.data()),
+                    import_targets_data[i][0],
+                    communication_channel + 0,
+                    comm,
+                    requests.data() + sm_ghost_ranks.size() +
+                      sm_import_ranks.size() + ghost_targets_data.size() + i);
+#endif
+      }
+
+
+
+      template <typename Number>
+      void
+      Full::import_from_ghosted_array_finish_impl(
+        const VectorOperation::values               operation,
+        const ArrayView<Number> &                   data_this,
+        const std::vector<ArrayView<const Number>> &data_others,
+        const ArrayView<Number> &                   buffer,
+        const ArrayView<const Number> &             temporary_storage,
+        std::vector<MPI_Request> &                  requests) const
+      {
+#ifndef DEAL_II_WITH_MPI
+        Assert(false, ExcNeedsMPI());
+
+        (void)operation;
+        (void)data_this;
+        (void)data_others;
+        (void)buffer;
+        (void)temporary_storage;
+        (void)requests;
+#else
+
+        (void)operation;
+
+        Assert(operation == dealii::VectorOperation::add, ExcNotImplemented());
+
+        AssertDimension(requests.size(),
+                        sm_ghost_ranks.size() + sm_import_ranks.size() +
+                          ghost_targets_data.size() +
+                          import_targets_data.size());
+
+        const auto split =
+          [&](const unsigned int i) -> std::pair<unsigned int, unsigned int> {
+          AssertIndexRange(i,
+                           (sm_import_ranks.size() + ghost_targets_data.size() +
+                            import_targets_data.size()));
+
+          if (i < sm_import_ranks.size())
+            return {0, i};
+          else if (i < (sm_import_ranks.size() + ghost_targets_data.size()))
+            return {2, i - sm_import_ranks.size()};
+          else
+            return {1, i - sm_import_ranks.size() - ghost_targets_data.size()};
+        };
+
+        for (unsigned int c = 0;
+             c < sm_import_ranks.size() + import_targets_data.size() +
+                   ghost_targets_data.size();
+             c++)
+          {
+            int i;
+            MPI_Waitany(sm_import_ranks.size() + import_targets_data.size() +
+                          ghost_targets_data.size(),
+                        requests.data() + sm_ghost_ranks.size(),
+                        &i,
+                        MPI_STATUS_IGNORE);
+
+            const auto &s = split(i);
+            i             = s.second;
+
+            if (s.first == 0)
+              {
+                Number *DEAL_II_RESTRICT data_others_ptr =
+                  const_cast<Number *>(data_others[sm_import_ranks[i]].data());
+                Number *DEAL_II_RESTRICT data_this_ptr = data_this.data();
+
+                for (unsigned int lo = sm_import_data_this.first[i],
+                                  ko = sm_import_data.first[i],
+                                  li = 0,
+                                  ki = 0;
+                     (lo < sm_import_data_this.first[i + 1]) &&
+                     (ko < sm_import_data.first[i + 1]);)
+                  {
+                    for (; (li < sm_import_data_this.second[lo].second) &&
+                           (ki < sm_import_data.second[ko].second);
+                         ++li, ++ki)
+                      {
+                        data_this_ptr[sm_import_data_this.second[lo].first +
+                                      li] +=
+                          data_others_ptr[sm_import_data.second[ko].first + ki];
+                        data_others_ptr[sm_import_data.second[ko].first + ki] =
+                          0.0;
+                      }
+
+                    if (li == sm_import_data_this.second[lo].second)
+                      {
+                        lo++;   // increment outer counter
+                        li = 0; // reset inner counter
+                      }
+                    if (ki == sm_import_data.second[ko].second)
+                      {
+                        ko++;   // increment outer counter
+                        ki = 0; // reset inner counter
+                      }
+                  }
+              }
+            else if (s.first == 1)
+              {
+                for (unsigned int j = import_indices_data.first[i],
+                                  k = import_targets_data[i][1];
+                     j < import_indices_data.first[i + 1];
+                     j++)
+                  for (unsigned int l = 0;
+                       l < import_indices_data.second[j].second;
+                       l++)
+                    data_this[import_indices_data.second[j].first + l] +=
+                      temporary_storage[k++];
+              }
+            else /*if (s.first == 2)*/
+              {
+                std::memset(buffer.data() + ghost_targets_data[i][1],
+                            0.0,
+                            (ghost_targets_data[i][2]) * sizeof(Number));
+              }
+          }
+
+        MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
+#endif
+      }
+
+
+
+      unsigned int
+      Full::local_size() const
+      {
+        return n_local_elements;
+      }
+
+
+
+      unsigned int
+      Full::n_ghost_indices() const
+      {
+        return n_ghost_elements;
+      }
+
+
+
+      unsigned int
+      Full::n_import_indices() const
+      {
+        if (import_targets_data.size() == 0)
+          return 0;
+        return import_targets_data.back()[1] + import_targets_data.back()[2];
+      }
+
+
+
+      unsigned int
+      Full::n_import_sm_procs() const
+      {
+        return sm_import_ranks.size() + sm_ghost_ranks.size(); // TODO
+      }
+
+
+
+      types::global_dof_index
+      Full::size() const
+      {
+        return n_global_elements;
+      }
+
+
+
+      const MPI_Comm &
+      Full::get_sm_mpi_communicator() const
+      {
+        return this->comm_sm;
+      }
+
+
+
+      void
+      Full::reset_ghost_values(const ArrayView<double> &ghost_array) const
+      {
+        reset_ghost_values_impl(ghost_array);
+      }
+
+
+
+      void
+      Full::reset_ghost_values(const ArrayView<float> &ghost_array) const
+      {
+        reset_ghost_values_impl(ghost_array);
+      }
+
+
+
+      template <typename Number>
+      void
+      Full::reset_ghost_values_impl(const ArrayView<Number> &ghost_array) const
+      {
+        // TODO
+        std::memset(ghost_array.data(),
+                    0.0,
+                    ghost_array.size() * sizeof(Number));
+      }
+
+
+
     } // namespace VectorDataExchange
   }   // namespace MatrixFreeFunctions
 } // namespace internal

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.