]> https://gitweb.dealii.org/ - dealii.git/commitdiff
MatrixFree: add DoFInfo::dof_indices_contiguous_sm 11092/head
authorPeter Munch <peterrmuench@gmail.com>
Fri, 23 Oct 2020 11:58:33 +0000 (13:58 +0200)
committerPeter Munch <peterrmuench@gmail.com>
Tue, 3 Nov 2020 08:27:45 +0000 (09:27 +0100)
include/deal.II/matrix_free/dof_info.h
include/deal.II/matrix_free/dof_info.templates.h
include/deal.II/matrix_free/matrix_free.h
include/deal.II/matrix_free/matrix_free.templates.h
include/deal.II/matrix_free/task_info.h
source/base/mpi_consensus_algorithms.cc

index 551d6fe4e6427b56b8c97deb4af1bed4b94b44ae..c51c6f2072a220ca2da1bb7552918321fc94dbcc 100644 (file)
@@ -242,6 +242,16 @@ namespace internal
         const std::vector<FaceToCellTopology<1>> &ghosted_faces,
         const bool                                fill_cell_centric);
 
+      /**
+       * Given @p cell_indices_contiguous_sm containing the local index of
+       * cells of macro faces (inner/outer) and macro faces compute
+       * dof_indices_contiguous_sm.
+       */
+      void
+      compute_shared_memory_contiguous_indices(
+        std::array<std::vector<std::pair<unsigned int, unsigned int>>, 3>
+          &cell_indices_contiguous_sm);
+
       /**
        * Compute a renumbering of the degrees of freedom to improve the data
        * access patterns for this class that can be utilized by the categories
@@ -488,6 +498,17 @@ namespace internal
        */
       std::array<std::vector<unsigned int>, 3> dof_indices_contiguous;
 
+      /**
+       * The same as above but for shared-memory usage. The first value of the
+       * pair is identifying the owning process and the second the index
+       * within that locally-owned data of that process.
+       *
+       * @note This data structure is only set up if all entries in
+       *   index_storage_variants[2] are IndexStorageVariants::contiguous.
+       */
+      std::array<std::vector<std::pair<unsigned int, unsigned int>>, 3>
+        dof_indices_contiguous_sm;
+
       /**
        * Compressed index storage for faster access than through @p
        * dof_indices used according to the description in IndexStorageVariants.
index 7d7c189904535ad84b3b45eda89b3f607b1e1d44..d4361183946a63f7beb09050d54ae906e6ca2d7f 100644 (file)
@@ -1483,6 +1483,33 @@ namespace internal
 
 
 
+    void
+    DoFInfo::compute_shared_memory_contiguous_indices(
+      std::array<std::vector<std::pair<unsigned int, unsigned int>>, 3>
+        &cell_indices_contiguous_sm)
+    {
+      AssertDimension(dofs_per_cell.size(), 1);
+
+      for (unsigned int i = 0; i < 3; ++i)
+        {
+          dof_indices_contiguous_sm[i].resize(
+            cell_indices_contiguous_sm[i].size());
+
+          for (unsigned int j = 0; j < cell_indices_contiguous_sm[i].size();
+               ++j)
+            if (cell_indices_contiguous_sm[i][j].first !=
+                numbers::invalid_unsigned_int)
+              dof_indices_contiguous_sm[i][j] = {
+                cell_indices_contiguous_sm[i][j].first,
+                cell_indices_contiguous_sm[i][j].second * dofs_per_cell[0]};
+            else
+              dof_indices_contiguous_sm[i][j] = {numbers::invalid_unsigned_int,
+                                                 numbers::invalid_unsigned_int};
+        }
+    }
+
+
+
     template <int length>
     void
     DoFInfo::compute_vector_zero_access_pattern(
index 9240fa77b6287cd85a9f464f3ed2db88bb9bf5e4..f7433f60e6aca9af2e5cd8057d339327b9f1a9f2 100644 (file)
@@ -243,6 +243,7 @@ public:
       , hold_all_faces_to_owned_cells(hold_all_faces_to_owned_cells)
       , cell_vectorization_categories_strict(
           cell_vectorization_categories_strict)
+      , communicator_sm(MPI_COMM_SELF)
     {}
 
     /**
@@ -268,6 +269,7 @@ public:
       , cell_vectorization_category(other.cell_vectorization_category)
       , cell_vectorization_categories_strict(
           other.cell_vectorization_categories_strict)
+      , communicator_sm(other.communicator_sm)
     {}
 
     // remove with level_mg_handler
@@ -297,6 +299,7 @@ public:
       cell_vectorization_category   = other.cell_vectorization_category;
       cell_vectorization_categories_strict =
         other.cell_vectorization_categories_strict;
+      communicator_sm = other.communicator_sm;
 
       return *this;
     }
@@ -529,6 +532,11 @@ public:
      * them in a single vectorized array.
      */
     bool cell_vectorization_categories_strict;
+
+    /**
+     * Shared-memory MPI communicator. Default: MPI_COMM_SELF.
+     */
+    MPI_Comm communicator_sm;
   };
 
   /**
@@ -2170,7 +2178,7 @@ MatrixFree<dim, Number, VectorizedArrayType>::initialize_dof_vector(
   const unsigned int                           comp) const
 {
   AssertIndexRange(comp, n_components());
-  vec.reinit(dof_info[comp].vector_partitioner);
+  vec.reinit(dof_info[comp].vector_partitioner, task_info.communicator_sm);
 }
 
 
index 95a08c5d8e018dfa62e4ab0d7e3c97c00cbb4320..6933925da7fecf23f8e2c00fb78f4e6ad785de51 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <deal.II/base/memory_consumption.h>
 #include <deal.II/base/mpi.h>
+#include <deal.II/base/mpi_consensus_algorithms.h>
 #include <deal.II/base/polynomials_piecewise.h>
 #include <deal.II/base/tensor_product_polynomials.h>
 #include <deal.II/base/utilities.h>
@@ -337,12 +338,14 @@ MatrixFree<dim, Number, VectorizedArrayType>::internal_reinit(
             Utilities::MPI::this_mpi_process(task_info.communicator);
           task_info.n_procs =
             Utilities::MPI::n_mpi_processes(task_info.communicator);
+          task_info.communicator_sm = additional_data.communicator_sm;
         }
       else
         {
-          task_info.communicator = MPI_COMM_SELF;
-          task_info.my_pid       = 0;
-          task_info.n_procs      = 1;
+          task_info.communicator    = MPI_COMM_SELF;
+          task_info.communicator_sm = MPI_COMM_SELF;
+          task_info.my_pid          = 0;
+          task_info.n_procs         = 1;
         }
 
       initialize_dof_handlers(dof_handler, additional_data);
@@ -949,8 +952,6 @@ namespace internal
       {
         const bool strict_categories =
           cell_vectorization_categories_strict || hp_functionality_enabled;
-        // Use max dofs per cell in create_blocks_serial. This is necessary
-        // to use FE_Nothing.
         unsigned int max_dofs_per_cell = 0;
         for (const auto &info : dof_info)
           for (const auto &dofs : info.dofs_per_cell)
@@ -1474,6 +1475,231 @@ MatrixFree<dim, Number, VectorizedArrayType>::initialize_indices(
   for (auto &di : dof_info)
     di.compute_vector_zero_access_pattern(task_info, face_info.faces);
 
+#ifdef DEAL_II_WITH_MPI
+  {
+    // non-buffering mode is only supported if the indices of all cells are
+    // contiguous for all dof_info objects.
+    bool is_non_buffering_sm_supported = true;
+    for (const auto &di : dof_info)
+      for (const auto &v : di.index_storage_variants[2])
+        is_non_buffering_sm_supported &=
+          (v == internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
+                  contiguous);
+
+    is_non_buffering_sm_supported =
+      Utilities::MPI::min(static_cast<unsigned int>(
+                            is_non_buffering_sm_supported),
+                          task_info.communicator);
+
+    const MPI_Comm communicator_sm = this->task_info.communicator_sm;
+
+    if (is_non_buffering_sm_supported)
+      {
+        // TODO: move into Utilities::MPI
+        const auto procs_of_sm = [](const MPI_Comm &comm,
+                                    const MPI_Comm &comm_shared) {
+          if (Utilities::MPI::job_supports_mpi() == false)
+            return std::vector<unsigned int>{0};
+
+          const unsigned int rank = Utilities::MPI::this_mpi_process(comm);
+          const unsigned int size =
+            Utilities::MPI::n_mpi_processes(comm_shared);
+
+          std::vector<unsigned int> ranks(size);
+          MPI_Allgather(
+            &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_shared);
+
+          return ranks;
+        };
+
+        // gather the ranks of the shared-memory domain
+        const std::vector<unsigned int> sm_procs =
+          procs_of_sm(task_info.communicator, communicator_sm);
+
+        // get my rank within the shared-memory domain
+        const unsigned int my_sm_pid = std::distance(
+          sm_procs.begin(),
+          std::find(sm_procs.begin(), sm_procs.end(), task_info.my_pid));
+
+        // process any dof_info
+        const auto  di_index = 0;
+        const auto &di       = dof_info[di_index];
+
+        std::array<std::vector<std::pair<unsigned int, unsigned int>>, 3>
+          cell_indices_contiguous_sm;
+
+        // collect for each cell: sm rank + offset
+        cell_indices_contiguous_sm[2] = [&]() {
+          // result
+          std::vector<std::pair<unsigned int, unsigned int>> cells(
+            di.dof_indices_contiguous[2].size(),
+            {numbers::invalid_unsigned_int, numbers::invalid_unsigned_int});
+
+          // locally-owned cells
+          std::vector<std::pair<unsigned int, types::global_cell_index>>
+            cells_locally_owned;
+          cells_locally_owned.reserve(n_cell_batches() * n_lanes);
+
+          // shared-ghost cells (categorized according to sm-rank)
+          std::vector<
+            std::vector<std::pair<unsigned int, types::global_cell_index>>>
+            cells_shared_ghosts(sm_procs.size());
+
+          for (unsigned int cell = 0;
+               cell < n_cell_batches() + n_ghost_cell_batches();
+               ++cell)
+            for (unsigned int v = 0; v < this->n_components_filled(cell); ++v)
+              {
+                const unsigned int index = cell * n_lanes + v;
+
+                // determine global cell index
+                const unsigned int local_dof_index =
+                  di.dof_indices_contiguous[2][index];
+                const types::global_cell_index global_cell_index =
+                  di.vector_partitioner->local_to_global(local_dof_index) /
+                  this->get_dofs_per_cell(di_index);
+
+                // determine sm rank
+                unsigned int sm_rank = my_sm_pid;
+
+                if (cell < n_cell_batches())
+                  {
+                    // locally-owned cell
+                    cells_locally_owned.emplace_back(index, global_cell_index);
+                  }
+                else
+                  {
+                    // ghost cell
+                    const auto ptr = std::find(
+                      sm_procs.begin(),
+                      sm_procs.end(),
+                      additional_data.mg_level ==
+                          numbers::invalid_unsigned_int ?
+                        get_cell_iterator(cell, v, di_index)->subdomain_id() :
+                        get_cell_iterator(cell, v, di_index)
+                          ->level_subdomain_id());
+
+                    if (ptr != sm_procs.end())
+                      {
+                        // shared ghost cell:
+                        sm_rank = std::distance(sm_procs.begin(), ptr);
+                        cells_shared_ghosts[sm_rank].emplace_back(
+                          index, global_cell_index);
+                      }
+                    else
+                      {
+                        // remote ghost cell: nothing to do since the values are
+                        // communicated (i.e. locally available)
+                      }
+                  }
+
+                // write back result
+                cells[cell * n_lanes + v] = {
+                  sm_rank, local_dof_index / this->get_dofs_per_cell(di_index)};
+              }
+
+          std::sort(cells_locally_owned.begin(),
+                    cells_locally_owned.end(),
+                    [](const auto &a, const auto &b) {
+                      return a.second < b.second;
+                    });
+
+          // get offsets of shared cells
+          Utilities::MPI::ConsensusAlgorithms::
+            AnonymousProcess<dealii::types::global_dof_index, unsigned int>
+              process(
+                [&]() {
+                  std::vector<unsigned int> targets;
+                  for (unsigned int i = 0; i < cells_shared_ghosts.size(); ++i)
+                    if (cells_shared_ghosts[i].size() > 0)
+                      targets.push_back(i);
+                  return targets;
+                },
+                [&](const auto other_rank, auto &send_buffer) {
+                  auto &source = cells_shared_ghosts[other_rank];
+                  std::sort(source.begin(),
+                            source.end(),
+                            [](const auto &a, const auto &b) {
+                              return a.second < b.second;
+                            });
+
+                  send_buffer.reserve(source.size());
+                  for (const auto &i : source)
+                    send_buffer.push_back(i.second);
+                },
+                [&](const auto &other_rank,
+                    const auto &buffer_recv,
+                    auto &      request_buffer) {
+                  (void)other_rank;
+
+                  request_buffer.reserve(buffer_recv.size());
+
+                  unsigned int j = 0;
+
+                  for (unsigned int i = 0; i < buffer_recv.size(); ++i)
+                    {
+                      for (; j < cells_locally_owned.size(); ++j)
+                        if (cells_locally_owned[j].second == buffer_recv[i])
+                          {
+                            request_buffer.push_back(
+                              cells_locally_owned[j].first);
+                            break;
+                          }
+                    }
+
+                  AssertDimension(request_buffer.size(), buffer_recv.size());
+                },
+                [&](const auto other_rank, auto &recv_buffer) {
+                  recv_buffer.resize(cells_shared_ghosts[other_rank].size());
+                },
+                [&](const auto other_rank, const auto &recv_buffer) {
+                  for (unsigned int i = 0; i < recv_buffer.size(); ++i)
+                    {
+                      cells[cells_shared_ghosts[other_rank][i].first] = {
+                        other_rank, recv_buffer[i]};
+                    }
+                });
+
+          Utilities::MPI::ConsensusAlgorithms::Selector<
+            dealii::types::global_dof_index,
+            unsigned int>(process, communicator_sm)
+            .run();
+
+          return cells;
+        }();
+
+        // process faces (0: interior; 1: exterior)
+        for (unsigned int i = 0; i < 2; ++i)
+          {
+            cell_indices_contiguous_sm[i].assign(
+              di.dof_indices_contiguous[i].size(),
+              {numbers::invalid_unsigned_int, numbers::invalid_unsigned_int});
+
+            for (unsigned int face_index = 0;
+                 face_index < cell_indices_contiguous_sm[i].size();
+                 ++face_index)
+              {
+                const unsigned int cell_index =
+                  (i == 0) ? get_face_info(face_index / n_lanes)
+                               .cells_interior[face_index % n_lanes] :
+                             get_face_info(face_index / n_lanes)
+                               .cells_exterior[face_index % n_lanes];
+
+                if (cell_index == numbers::invalid_unsigned_int)
+                  continue;
+
+                cell_indices_contiguous_sm[i][face_index] =
+                  cell_indices_contiguous_sm[2][cell_index];
+              }
+          }
+
+        for (auto &di : dof_info)
+          di.compute_shared_memory_contiguous_indices(
+            cell_indices_contiguous_sm);
+      }
+  }
+#endif
+
   indices_are_initialized = true;
 }
 
index e7fe06ab1eb84c20164e6d8c64b01fbfd467c95f..322b863ddbe26e5fad94cc419a88c60361781fe3 100644 (file)
@@ -565,6 +565,11 @@ namespace internal
        */
       MPI_Comm communicator;
 
+      /**
+       * Shared-memory MPI communicator
+       */
+      MPI_Comm communicator_sm;
+
       /**
        * Rank of MPI process
        */
index c135143a0a34573d75318774eb6b298365284ed8..1eebaa29eaa0d8b3148c69a3bba73defa9a6765c 100644 (file)
@@ -48,6 +48,17 @@ namespace Utilities
         std::pair<types::global_dof_index, types::global_dof_index>,
         unsigned int>;
 
+#ifdef DEAL_II_WITH_64BIT_INDICES
+      template class Process<types::global_dof_index, unsigned int>;
+
+      template class NBX<types::global_dof_index, unsigned int>;
+
+      template class PEX<types::global_dof_index, unsigned int>;
+
+      template class Selector<types::global_dof_index, unsigned int>;
+#endif
+
+
     } // namespace ConsensusAlgorithms
   }   // end of namespace MPI
 } // end of namespace Utilities

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.