]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Merge implementations of exchange_cell_data_to_(level)_ghosts 10516/head
authorPeter Munch <peterrmuench@gmail.com>
Thu, 11 Jun 2020 17:33:31 +0000 (19:33 +0200)
committerPeter Munch <peterrmuench@gmail.com>
Tue, 16 Jun 2020 14:45:26 +0000 (16:45 +0200)
include/deal.II/base/mpi_tags.h
include/deal.II/grid/grid_tools.h
source/dofs/dof_handler_policy.cc
tests/distributed_grids/grid_tools_exchange_cell_data_01.cc
tests/distributed_grids/grid_tools_exchange_cell_data_02.cc
tests/distributed_grids/grid_tools_exchange_cell_data_03.cc
tests/distributed_grids/grid_tools_exchange_cell_data_04.cc

index d4d401cacf68c8825d76b15850ea04dd54b02953..37b0ff45f06e6583bfeab59793d0c4f160ecf0d4 100644 (file)
@@ -62,11 +62,11 @@ namespace Utilities
           /// Triangulation<dim, spacedim>::communicate_locally_moved_vertices()
           triangulation_communicate_locally_moved_vertices,
 
-          /// grid_tools.h: exchange_cell_data_to_level_ghosts()
-          exchange_cell_data_to_level_ghosts_request,
+          /// grid_tools.h: exchange_cell_ghosts()
+          exchange_cell_data_request,
 
-          /// grid_tools.h: exchange_cell_data_to_level_ghosts()
-          exchange_cell_data_to_level_ghosts_reply,
+          /// grid_tools.h: exchange_cell_ghosts()
+          exchange_cell_data_reply,
 
           /// mg_transfer_internal.cc: fill_copy_indices()
           mg_transfer_fill_copy_indices,
index d3ea4ecffd27ee51362db0103261e70321372c7c..f650b814f3b664ee85d46429e0cb258085123d78 100644 (file)
@@ -2801,7 +2801,9 @@ namespace GridTools
    *   on the sending side returned a non-empty std_cxx17::optional object.
    *   The @p unpack function is then called with the data sent by the
    *   processor that owns that cell.
-   *
+   * @param cell_filter Only cells are communicated where this filter function returns
+   *   the value `true`. In the default case, the function returns true on all
+   * cells and thus, all relevant cells are communicated.
    *
    * <h4> An example </h4>
    *
@@ -2852,7 +2854,10 @@ namespace GridTools
     const std::function<std_cxx17::optional<DataType>(
       const typename MeshType::active_cell_iterator &)> &pack,
     const std::function<void(const typename MeshType::active_cell_iterator &,
-                             const DataType &)> &        unpack);
+                             const DataType &)> &        unpack,
+    const std::function<bool(const typename MeshType::active_cell_iterator &)>
+      &cell_filter =
+        [](const typename MeshType::active_cell_iterator &) { return true; });
 
   /**
    * Exchange arbitrary data of type @p DataType provided by the function
@@ -2860,7 +2865,7 @@ namespace GridTools
    * processes.
    *
    * In addition to the parameters of exchange_cell_data_to_ghosts(), this
-   * function allows to provide a @p filter function, which can be used to only
+   * function allows to provide a @p cell_filter function, which can be used to only
    * communicate marked cells. In the default case, all relevant cells are
    * communicated.
    */
@@ -2873,7 +2878,7 @@ namespace GridTools
     const std::function<void(const typename MeshType::level_cell_iterator &,
                              const DataType &)> &       unpack,
     const std::function<bool(const typename MeshType::level_cell_iterator &)>
-      &filter =
+      &cell_filter =
         [](const typename MeshType::level_cell_iterator &) { return true; });
 
   /* Exchange with all processors of the MPI communicator @p mpi_communicator the vector of bounding
@@ -3883,182 +3888,282 @@ namespace GridTools
   }
 
 
-
-  template <typename DataType, typename MeshType>
-  void
-  exchange_cell_data_to_ghosts(
-    const MeshType &                                     mesh,
-    const std::function<std_cxx17::optional<DataType>(
-      const typename MeshType::active_cell_iterator &)> &pack,
-    const std::function<void(const typename MeshType::active_cell_iterator &,
-                             const DataType &)> &        unpack)
+  namespace internal
   {
+    template <typename DataType,
+              typename MeshType,
+              typename MeshCellIteratorType>
+    void
+    exchange_cell_data(
+      const MeshType &mesh,
+      const std::function<
+        std_cxx17::optional<DataType>(const MeshCellIteratorType &)> &pack,
+      const std::function<void(const MeshCellIteratorType &, const DataType &)>
+        &                                                         unpack,
+      const std::function<bool(const MeshCellIteratorType &)> &   cell_filter,
+      const std::function<void(
+        const std::function<void(const MeshCellIteratorType &,
+                                 const types::subdomain_id)> &)> &process_cells,
+      const std::function<std::set<types::subdomain_id>(
+        const parallel::TriangulationBase<MeshType::dimension,
+                                          MeshType::space_dimension> &)>
+        &compute_ghost_owners)
+    {
 #    ifndef DEAL_II_WITH_MPI
-    (void)mesh;
-    (void)pack;
-    (void)unpack;
-    Assert(false,
-           ExcMessage(
-             "GridTools::exchange_cell_data_to_ghosts() requires MPI."));
+      (void)mesh;
+      (void)pack;
+      (void)unpack;
+      (void)cell_filter;
+      (void)process_cells;
+      (void)compute_ghost_owners;
+      Assert(false,
+             ExcMessage("GridTools::exchange_cell_data() requires MPI."));
 #    else
-    constexpr int dim      = MeshType::dimension;
-    constexpr int spacedim = MeshType::space_dimension;
-    auto          tria =
-      dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
-        &mesh.get_triangulation());
-    Assert(
-      tria != nullptr,
-      ExcMessage(
-        "The function exchange_cell_data_to_ghosts() only works with parallel triangulations."));
-
-    // map neighbor_id -> data_buffer where we accumulate the data to send
-    using DestinationToBufferMap =
+      constexpr int dim      = MeshType::dimension;
+      constexpr int spacedim = MeshType::space_dimension;
+      auto          tria =
+        dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+          &mesh.get_triangulation());
+      Assert(
+        tria != nullptr,
+        ExcMessage(
+          "The function exchange_cell_data_to_ghosts() only works with parallel triangulations."));
+
+      // build list of cells to request for each neighbor
+      std::set<dealii::types::subdomain_id> ghost_owners =
+        compute_ghost_owners(*tria);
       std::map<dealii::types::subdomain_id,
-               CellDataTransferBuffer<dim, DataType>>;
-    DestinationToBufferMap destination_to_data_buffer_map;
+               std::vector<typename CellId::binary_type>>
+        neighbor_cell_list;
 
-    std::map<unsigned int, std::set<dealii::types::subdomain_id>>
-      vertices_with_ghost_neighbors =
-        GridTools::compute_vertices_with_ghost_neighbors(*tria);
+      for (const auto ghost_owner : ghost_owners)
+        neighbor_cell_list[ghost_owner] = {};
 
-    for (const auto &cell : tria->active_cell_iterators())
-      if (cell->is_locally_owned())
-        {
-          std::set<dealii::types::subdomain_id> send_to;
-          for (const unsigned int v : GeometryInfo<dim>::vertex_indices())
-            {
-              const std::map<unsigned int,
-                             std::set<dealii::types::subdomain_id>>::
-                const_iterator neighbor_subdomains_of_vertex =
-                  vertices_with_ghost_neighbors.find(cell->vertex_index(v));
+      process_cells([&](const auto &cell, const auto key) {
+        if (cell_filter(cell))
+          neighbor_cell_list[key].emplace_back(
+            cell->id().template to_binary<spacedim>());
+      });
 
-              if (neighbor_subdomains_of_vertex ==
-                  vertices_with_ghost_neighbors.end())
-                continue;
+      Assert(ghost_owners.size() == neighbor_cell_list.size(),
+             ExcInternalError());
 
-              Assert(neighbor_subdomains_of_vertex->second.size() != 0,
-                     ExcInternalError());
 
-              send_to.insert(neighbor_subdomains_of_vertex->second.begin(),
-                             neighbor_subdomains_of_vertex->second.end());
-            }
+      // Before sending & receiving, make sure we protect this section with
+      // a mutex:
+      static Utilities::MPI::CollectiveMutex      mutex;
+      Utilities::MPI::CollectiveMutex::ScopedLock lock(
+        mutex, tria->get_communicator());
+
+      const int mpi_tag =
+        Utilities::MPI::internal::Tags::exchange_cell_data_request;
+      const int mpi_tag_reply =
+        Utilities::MPI::internal::Tags::exchange_cell_data_reply;
+
+      // send our requests:
+      std::vector<MPI_Request> requests(ghost_owners.size());
+      {
+        unsigned int idx = 0;
+        for (const auto &it : neighbor_cell_list)
+          {
+            // send the data about the relevant cells
+            const int ierr = MPI_Isend(it.second.data(),
+                                       it.second.size() * sizeof(it.second[0]),
+                                       MPI_BYTE,
+                                       it.first,
+                                       mpi_tag,
+                                       tria->get_communicator(),
+                                       &requests[idx]);
+            AssertThrowMPI(ierr);
+            ++idx;
+          }
+      }
 
-          if (send_to.size() > 0)
+      using DestinationToBufferMap =
+        std::map<dealii::types::subdomain_id,
+                 GridTools::CellDataTransferBuffer<dim, DataType>>;
+      DestinationToBufferMap destination_to_data_buffer_map;
+
+      // receive requests and reply with the ghost indices
+      std::vector<std::vector<typename CellId::binary_type>> cell_data_to_send(
+        ghost_owners.size());
+      std::vector<std::vector<types::global_dof_index>>
+                                     send_dof_numbers_and_indices(ghost_owners.size());
+      std::vector<MPI_Request>       reply_requests(ghost_owners.size());
+      std::vector<std::vector<char>> sendbuffers(ghost_owners.size());
+
+      for (unsigned int idx = 0; idx < ghost_owners.size(); ++idx)
+        {
+          MPI_Status status;
+          int        ierr = MPI_Probe(MPI_ANY_SOURCE,
+                               mpi_tag,
+                               tria->get_communicator(),
+                               &status);
+          AssertThrowMPI(ierr);
+
+          int len;
+          ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+          AssertThrowMPI(ierr);
+          Assert(len % sizeof(cell_data_to_send[idx][0]) == 0,
+                 ExcInternalError());
+
+          const unsigned int n_cells =
+            len / sizeof(typename CellId::binary_type);
+          cell_data_to_send[idx].resize(n_cells);
+
+          ierr = MPI_Recv(cell_data_to_send[idx].data(),
+                          len,
+                          MPI_BYTE,
+                          status.MPI_SOURCE,
+                          status.MPI_TAG,
+                          tria->get_communicator(),
+                          &status);
+          AssertThrowMPI(ierr);
+
+          // store data for each cell
+          for (unsigned int c = 0; c < static_cast<unsigned int>(n_cells); ++c)
             {
-              // this cell's data needs to be sent to someone
-              typename MeshType::active_cell_iterator mesh_it(tria,
-                                                              cell->level(),
-                                                              cell->index(),
-                                                              &mesh);
+              const auto cell =
+                CellId(cell_data_to_send[idx][c]).to_cell(*tria);
 
+              MeshCellIteratorType                mesh_it(tria,
+                                           cell->level(),
+                                           cell->index(),
+                                           &mesh);
               const std_cxx17::optional<DataType> data = pack(mesh_it);
 
               if (data)
                 {
-                  const CellId cellid = cell->id();
-
-                  for (const auto subdomain : send_to)
-                    {
-                      // find the data buffer for proc "subdomain" if it exists
-                      // or create an empty one otherwise
-                      typename DestinationToBufferMap::iterator p =
-                        destination_to_data_buffer_map
-                          .insert(std::make_pair(
-                            subdomain, CellDataTransferBuffer<dim, DataType>()))
-                          .first;
-
-                      p->second.cell_ids.emplace_back(cellid);
-                      p->second.data.emplace_back(*data);
-                    }
+                  typename DestinationToBufferMap::iterator p =
+                    destination_to_data_buffer_map
+                      .insert(std::make_pair(
+                        idx,
+                        GridTools::CellDataTransferBuffer<dim, DataType>()))
+                      .first;
+
+                  p->second.cell_ids.emplace_back(cell->id());
+                  p->second.data.emplace_back(*data);
                 }
             }
+
+          // send reply
+          GridTools::CellDataTransferBuffer<dim, DataType> &data =
+            destination_to_data_buffer_map[idx];
+
+          sendbuffers[idx] =
+            Utilities::pack(data, /*enable_compression*/ false);
+          ierr = MPI_Isend(sendbuffers[idx].data(),
+                           sendbuffers[idx].size(),
+                           MPI_BYTE,
+                           status.MPI_SOURCE,
+                           mpi_tag_reply,
+                           tria->get_communicator(),
+                           &reply_requests[idx]);
+          AssertThrowMPI(ierr);
         }
 
-    // Protect the following communication:
-    static Utilities::MPI::CollectiveMutex      mutex;
-    Utilities::MPI::CollectiveMutex::ScopedLock lock(mutex,
-                                                     tria->get_communicator());
+      // finally receive the replies
+      std::vector<char> receive;
+      for (unsigned int idx = 0; idx < ghost_owners.size(); ++idx)
+        {
+          MPI_Status status;
+          int        ierr = MPI_Probe(MPI_ANY_SOURCE,
+                               mpi_tag_reply,
+                               tria->get_communicator(),
+                               &status);
+          AssertThrowMPI(ierr);
 
-    const int mpi_tag =
-      Utilities::MPI::internal::Tags::exchange_cell_data_to_ghosts;
+          int len;
+          ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+          AssertThrowMPI(ierr);
 
-    // 2. send our messages
-    std::set<dealii::types::subdomain_id> ghost_owners   = tria->ghost_owners();
-    const unsigned int                    n_ghost_owners = ghost_owners.size();
-    std::vector<std::vector<char>>        sendbuffers(n_ghost_owners);
-    std::vector<MPI_Request>              requests(n_ghost_owners);
+          receive.resize(len);
 
-    unsigned int idx = 0;
-    for (auto it = ghost_owners.begin(); it != ghost_owners.end(); ++it, ++idx)
-      {
-        CellDataTransferBuffer<dim, DataType> &data =
-          destination_to_data_buffer_map[*it];
-
-        // pack all the data into the buffer for this recipient and send it.
-        // keep data around till we can make sure that the packet has been
-        // received
-        sendbuffers[idx] = Utilities::pack(data, /*enable_compression*/ false);
-        const int ierr   = MPI_Isend(sendbuffers[idx].data(),
-                                   sendbuffers[idx].size(),
-                                   MPI_BYTE,
-                                   *it,
-                                   mpi_tag,
-                                   tria->get_communicator(),
-                                   &requests[idx]);
-        AssertThrowMPI(ierr);
-      }
+          char *ptr = receive.data();
+          ierr      = MPI_Recv(ptr,
+                          len,
+                          MPI_BYTE,
+                          status.MPI_SOURCE,
+                          status.MPI_TAG,
+                          tria->get_communicator(),
+                          &status);
+          AssertThrowMPI(ierr);
 
-    // 3. receive messages
-    std::vector<char> receive;
-    for (unsigned int idx = 0; idx < n_ghost_owners; ++idx)
-      {
-        MPI_Status status;
-        int        ierr =
-          MPI_Probe(MPI_ANY_SOURCE, mpi_tag, tria->get_communicator(), &status);
-        AssertThrowMPI(ierr);
-
-        int len;
-        ierr = MPI_Get_count(&status, MPI_BYTE, &len);
-        AssertThrowMPI(ierr);
-
-        receive.resize(len);
-
-        char *ptr = receive.data();
-        ierr      = MPI_Recv(ptr,
-                        len,
-                        MPI_BYTE,
-                        status.MPI_SOURCE,
-                        status.MPI_TAG,
-                        tria->get_communicator(),
-                        &status);
-        AssertThrowMPI(ierr);
-
-        auto cellinfo =
-          Utilities::unpack<CellDataTransferBuffer<dim, DataType>>(
-            receive, /*enable_compression*/ false);
-
-        DataType *data = cellinfo.data.data();
-        for (unsigned int c = 0; c < cellinfo.cell_ids.size(); ++c, ++data)
-          {
-            const typename Triangulation<dim, spacedim>::cell_iterator
-              tria_cell = cellinfo.cell_ids[c].to_cell(*tria);
+          auto cellinfo =
+            Utilities::unpack<CellDataTransferBuffer<dim, DataType>>(
+              receive, /*enable_compression*/ false);
 
-            const typename MeshType::active_cell_iterator cell(
-              tria, tria_cell->level(), tria_cell->index(), &mesh);
+          DataType *data = cellinfo.data.data();
+          for (unsigned int c = 0; c < cellinfo.cell_ids.size(); ++c, ++data)
+            {
+              const typename Triangulation<dim, spacedim>::cell_iterator
+                tria_cell = cellinfo.cell_ids[c].to_cell(*tria);
+
+              MeshCellIteratorType cell(tria,
+                                        tria_cell->level(),
+                                        tria_cell->index(),
+                                        &mesh);
+
+              unpack(cell, *data);
+            }
+        }
+
+      // make sure that all communication is finished
+      // when we leave this function.
+      if (requests.size() > 0)
+        {
+          const int ierr =
+            MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
+          AssertThrowMPI(ierr);
+        }
+      if (reply_requests.size() > 0)
+        {
+          const int ierr = MPI_Waitall(reply_requests.size(),
+                                       reply_requests.data(),
+                                       MPI_STATUSES_IGNORE);
+          AssertThrowMPI(ierr);
+        }
 
-            unpack(cell, *data);
-          }
-      }
 
-    // make sure that all communication is finished
-    // when we leave this function.
-    if (requests.size())
-      {
-        const int ierr =
-          MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
-        AssertThrowMPI(ierr);
-      }
 #    endif // DEAL_II_WITH_MPI
+    }
+
+  } // namespace internal
+
+  template <typename DataType, typename MeshType>
+  void
+  exchange_cell_data_to_ghosts(
+    const MeshType &                                     mesh,
+    const std::function<std_cxx17::optional<DataType>(
+      const typename MeshType::active_cell_iterator &)> &pack,
+    const std::function<void(const typename MeshType::active_cell_iterator &,
+                             const DataType &)> &        unpack,
+    const std::function<bool(const typename MeshType::active_cell_iterator &)>
+      &cell_filter)
+  {
+#    ifndef DEAL_II_WITH_MPI
+    (void)mesh;
+    (void)pack;
+    (void)unpack;
+    (void)cell_filter;
+    Assert(false,
+           ExcMessage(
+             "GridTools::exchange_cell_data_to_ghosts() requires MPI."));
+#    else
+    internal::exchange_cell_data<DataType,
+                                 MeshType,
+                                 typename MeshType::active_cell_iterator>(
+      mesh,
+      pack,
+      unpack,
+      cell_filter,
+      [&](const auto &process) {
+        for (const auto &cell : mesh.active_cell_iterators())
+          if (cell->is_ghost())
+            process(cell, cell->subdomain_id());
+      },
+      [](const auto &tria) { return tria.ghost_owners(); });
+#    endif
   }
 
 
@@ -4072,219 +4177,33 @@ namespace GridTools
     const std::function<void(const typename MeshType::level_cell_iterator &,
                              const DataType &)> &       unpack,
     const std::function<bool(const typename MeshType::level_cell_iterator &)>
-      &filter)
+      &cell_filter)
   {
 #    ifndef DEAL_II_WITH_MPI
     (void)mesh;
     (void)pack;
     (void)unpack;
-    (void)filter;
+    (void)cell_filter;
     Assert(false,
            ExcMessage(
-             "GridTools::exchange_cell_data_to_ghosts() requires MPI."));
+             "GridTools::exchange_cell_data_to_level_ghosts() requires MPI."));
 #    else
-    constexpr int dim      = MeshType::dimension;
-    constexpr int spacedim = MeshType::space_dimension;
-    auto          tria =
-      dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
-        &mesh.get_triangulation());
-    Assert(
-      tria != nullptr,
-      ExcMessage(
-        "The function exchange_cell_data_to_ghosts() only works with parallel triangulations."));
-
-    // build list of cells to request for each neighbor
-    std::set<dealii::types::subdomain_id> level_ghost_owners =
-      tria->level_ghost_owners();
-    std::map<dealii::types::subdomain_id,
-             std::vector<typename CellId::binary_type>>
-      neighbor_cell_list;
-
-    for (const auto level_ghost_owner : level_ghost_owners)
-      neighbor_cell_list[level_ghost_owner] = {};
-
-    {
-      for (const auto &cell : mesh.cell_iterators())
-        if (cell->level_subdomain_id() !=
-              dealii::numbers::artificial_subdomain_id &&
-            !cell->is_locally_owned_on_level())
-          if (filter(cell))
-            neighbor_cell_list[cell->level_subdomain_id()].emplace_back(
-              cell->id().template to_binary<spacedim>());
-    }
-
-    Assert(level_ghost_owners.size() == neighbor_cell_list.size(),
-           ExcInternalError());
-
-
-    // Before sending & receiving, make sure we protect this section with
-    // a mutex:
-    static Utilities::MPI::CollectiveMutex      mutex;
-    Utilities::MPI::CollectiveMutex::ScopedLock lock(mutex,
-                                                     tria->get_communicator());
-
-    const int mpi_tag = Utilities::MPI::internal::Tags::
-      exchange_cell_data_to_level_ghosts_request;
-    const int mpi_tag_reply =
-      Utilities::MPI::internal::Tags::exchange_cell_data_to_level_ghosts_reply;
-
-    // send our requests:
-    std::vector<MPI_Request> requests(level_ghost_owners.size());
-    {
-      unsigned int idx = 0;
-      for (const auto &it : neighbor_cell_list)
-        {
-          // send the data about the relevant cells
-          const int ierr = MPI_Isend(it.second.data(),
-                                     it.second.size() * sizeof(it.second[0]),
-                                     MPI_BYTE,
-                                     it.first,
-                                     mpi_tag,
-                                     tria->get_communicator(),
-                                     &requests[idx]);
-          AssertThrowMPI(ierr);
-          ++idx;
-        }
-    }
-
-    using DestinationToBufferMap =
-      std::map<dealii::types::subdomain_id,
-               GridTools::CellDataTransferBuffer<dim, DataType>>;
-    DestinationToBufferMap destination_to_data_buffer_map;
-
-    // receive requests and reply with the ghost indices
-    std::vector<std::vector<typename CellId::binary_type>> cell_data_to_send(
-      level_ghost_owners.size());
-    std::vector<std::vector<types::global_dof_index>>
-                                   send_dof_numbers_and_indices(level_ghost_owners.size());
-    std::vector<MPI_Request>       reply_requests(level_ghost_owners.size());
-    std::vector<std::vector<char>> sendbuffers(level_ghost_owners.size());
-
-    for (unsigned int idx = 0; idx < level_ghost_owners.size(); ++idx)
-      {
-        MPI_Status status;
-        int        ierr =
-          MPI_Probe(MPI_ANY_SOURCE, mpi_tag, tria->get_communicator(), &status);
-        AssertThrowMPI(ierr);
-
-        int len;
-        ierr = MPI_Get_count(&status, MPI_BYTE, &len);
-        AssertThrowMPI(ierr);
-        Assert(len % sizeof(cell_data_to_send[idx][0]) == 0,
-               ExcInternalError());
-
-        const unsigned int n_cells = len / sizeof(typename CellId::binary_type);
-        cell_data_to_send[idx].resize(n_cells);
-
-        ierr = MPI_Recv(cell_data_to_send[idx].data(),
-                        len,
-                        MPI_BYTE,
-                        status.MPI_SOURCE,
-                        status.MPI_TAG,
-                        tria->get_communicator(),
-                        &status);
-        AssertThrowMPI(ierr);
-
-        // store data for each cell
-        for (unsigned int c = 0; c < static_cast<unsigned int>(n_cells); ++c)
-          {
-            const auto cell = CellId(cell_data_to_send[idx][c]).to_cell(*tria);
-
-            typename MeshType::level_cell_iterator mesh_it(tria,
-                                                           cell->level(),
-                                                           cell->index(),
-                                                           &mesh);
-            const std_cxx17::optional<DataType>    data = pack(mesh_it);
-
-            {
-              typename DestinationToBufferMap::iterator p =
-                destination_to_data_buffer_map
-                  .insert(std::make_pair(
-                    idx, GridTools::CellDataTransferBuffer<dim, DataType>()))
-                  .first;
-
-              p->second.cell_ids.emplace_back(cell->id());
-              p->second.data.emplace_back(*data);
-            }
-          }
-
-        // send reply
-        GridTools::CellDataTransferBuffer<dim, DataType> &data =
-          destination_to_data_buffer_map[idx];
-
-        sendbuffers[idx] = Utilities::pack(data, /*enable_compression*/ false);
-        ierr             = MPI_Isend(sendbuffers[idx].data(),
-                         sendbuffers[idx].size(),
-                         MPI_BYTE,
-                         status.MPI_SOURCE,
-                         mpi_tag_reply,
-                         tria->get_communicator(),
-                         &reply_requests[idx]);
-        AssertThrowMPI(ierr);
-      }
-
-    // finally receive the replies
-    std::vector<char> receive;
-    for (unsigned int idx = 0; idx < level_ghost_owners.size(); ++idx)
-      {
-        MPI_Status status;
-        int        ierr = MPI_Probe(MPI_ANY_SOURCE,
-                             mpi_tag_reply,
-                             tria->get_communicator(),
-                             &status);
-        AssertThrowMPI(ierr);
-
-        int len;
-        ierr = MPI_Get_count(&status, MPI_BYTE, &len);
-        AssertThrowMPI(ierr);
-
-        receive.resize(len);
-
-        char *ptr = receive.data();
-        ierr      = MPI_Recv(ptr,
-                        len,
-                        MPI_BYTE,
-                        status.MPI_SOURCE,
-                        status.MPI_TAG,
-                        tria->get_communicator(),
-                        &status);
-        AssertThrowMPI(ierr);
-
-        auto cellinfo =
-          Utilities::unpack<CellDataTransferBuffer<dim, DataType>>(
-            receive, /*enable_compression*/ false);
-
-        DataType *data = cellinfo.data.data();
-        for (unsigned int c = 0; c < cellinfo.cell_ids.size(); ++c, ++data)
-          {
-            const typename Triangulation<dim, spacedim>::cell_iterator
-              tria_cell = cellinfo.cell_ids[c].to_cell(*tria);
-
-            const typename MeshType::level_cell_iterator cell(
-              tria, tria_cell->level(), tria_cell->index(), &mesh);
-
-            unpack(cell, *data);
-          }
-      }
-
-    // make sure that all communication is finished
-    // when we leave this function.
-    if (requests.size() > 0)
-      {
-        const int ierr =
-          MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
-        AssertThrowMPI(ierr);
-      }
-    if (reply_requests.size() > 0)
-      {
-        const int ierr = MPI_Waitall(reply_requests.size(),
-                                     reply_requests.data(),
-                                     MPI_STATUSES_IGNORE);
-        AssertThrowMPI(ierr);
-      }
-
-
-#    endif // DEAL_II_WITH_MPI
+    internal::exchange_cell_data<DataType,
+                                 MeshType,
+                                 typename MeshType::level_cell_iterator>(
+      mesh,
+      pack,
+      unpack,
+      cell_filter,
+      [&](const auto &process) {
+        for (const auto &cell : mesh.cell_iterators())
+          if (cell->level_subdomain_id() !=
+                dealii::numbers::artificial_subdomain_id &&
+              !cell->is_locally_owned_on_level())
+            process(cell, cell->level_subdomain_id());
+      },
+      [](const auto &tria) { return tria.level_ghost_owners(); });
+#    endif
   }
 } // namespace GridTools
 
index fb7fb4ff66aac10269b971f5e0064c7d1aa3cbd3..290e0fed1f37cb55ac7706cf40b39a923f449a1c 100644 (file)
@@ -3679,11 +3679,10 @@ namespace internal
         template <class DoFHandlerType>
         void
         communicate_dof_indices_on_marked_cells(
-          const DoFHandlerType &dof_handler,
-          const std::map<unsigned int, std::set<dealii::types::subdomain_id>> &)
+          const DoFHandlerType &dof_handler)
         {
 #  ifndef DEAL_II_WITH_MPI
-          (void)vertices_with_ghost_neighbors;
+          (void)dof_handler;
           Assert(false, ExcNotImplemented());
 #  else
           const unsigned int dim      = DoFHandlerType::dimension;
@@ -3692,124 +3691,57 @@ namespace internal
           // define functions that pack data on cells that are ghost cells
           // somewhere else, and unpack data on cells where we get information
           // from elsewhere
-          auto pack =
-            [](const typename DoFHandlerType::active_cell_iterator &cell)
-            -> std_cxx17::optional<std::vector<types::global_dof_index>> {
+          const auto pack = [](const auto &cell) {
             Assert(cell->is_locally_owned(), ExcInternalError());
 
-            // first see whether we need to do anything at all on this cell.
-            // this is determined by whether the user_flag is set on the
-            // cell that indicates that the *complete* set of DoF indices
-            // has not been sent
-            if (cell->user_flag_set())
-              {
-                // get dof indices for the current cell
-                std::vector<types::global_dof_index> local_dof_indices(
-                  cell->get_fe().dofs_per_cell);
-                cell->get_dof_indices(local_dof_indices);
-
-                // now see if there are dof indices that were previously
-                // unknown. this can only happen in phase 1, and in
-                // that case we know that the user flag must have been set
-                //
-                // in any case, if the cell *is* complete, we do not
-                // need to send the data any more in the next phase. indicate
-                // this by removing the user flag
-                if (std::find(local_dof_indices.begin(),
-                              local_dof_indices.end(),
-                              numbers::invalid_dof_index) !=
-                    local_dof_indices.end())
-                  {
-                    Assert(cell->user_flag_set(), ExcInternalError());
-                  }
-                else
-                  cell->clear_user_flag();
-
-                return local_dof_indices;
-              }
-            else
-              {
-                // the fact that the user flag wasn't set means that there is
-                // nothing we need to send that hasn't been sent so far.
-                // so return an empty array, but also verify that indeed
-                // the cell is complete
-#    ifdef DEBUG
-                std::vector<types::global_dof_index> local_dof_indices(
-                  cell->get_fe().dofs_per_cell);
-                cell->get_dof_indices(local_dof_indices);
+            std::vector<dealii::types::global_dof_index> data(
+              cell->get_fe().dofs_per_cell);
+            cell->get_dof_indices(data);
 
-                const bool is_complete =
-                  (std::find(local_dof_indices.begin(),
-                             local_dof_indices.end(),
-                             numbers::invalid_dof_index) ==
-                   local_dof_indices.end());
-                Assert(is_complete, ExcInternalError());
-#    endif
-                return std_cxx17::optional<
-                  std::vector<types::global_dof_index>>();
-              }
+            return data;
           };
 
-          auto unpack =
-            [](const typename DoFHandlerType::active_cell_iterator &cell,
-               const std::vector<types::global_dof_index> &received_dof_indices)
-            -> void {
-            // this function should only be called on ghost cells, and
-            // on top of that, only on cells that have not been
-            // completed -- which we indicate via the user flag.
-            // check both
+          const auto unpack = [](const auto &cell, const auto &dofs) {
+            Assert(cell->get_fe().dofs_per_cell == dofs.size(),
+                   ExcInternalError());
+
             Assert(cell->is_ghost(), ExcInternalError());
-            Assert(cell->user_flag_set(), ExcInternalError());
 
-            // if we just got an incomplete array of DoF indices, then we must
-            // be in the first ghost exchange and the user flag must have been
-            // set. we tested that already above.
-            //
-            // if we did get a complete array, then we may be in the first
-            // or second ghost exchange, but in any case we need not exchange
-            // another time. so delete the user flag
-            const bool is_complete = (std::find(received_dof_indices.begin(),
-                                                received_dof_indices.end(),
-                                                numbers::invalid_dof_index) ==
-                                      received_dof_indices.end());
-            if (is_complete)
-              cell->clear_user_flag();
-
-            // in any case, set the DoF indices on this cell. some
-            // of the ones we received may still be invalid because
-            // the sending processor did not know them yet, so we
-            // need to merge the ones we get with those that are
-            // already set here and may have already been known. for
-            // those that we already know *and* get, they must obviously
-            // agree
-            //
-            // before getting the local dof indices, we need to update the
-            // cell dof indices cache because we may have set dof indices
-            // on a neighboring ghost cell before this one, which may have
-            // affected the dof indices we know about the current cell
-            std::vector<types::global_dof_index> local_dof_indices(
+            std::vector<dealii::types::global_dof_index> dof_indices(
               cell->get_fe().dofs_per_cell);
             cell->update_cell_dof_indices_cache();
-            cell->get_dof_indices(local_dof_indices);
+            cell->get_dof_indices(dof_indices);
 
-            for (unsigned int i = 0; i < local_dof_indices.size(); ++i)
-              if (local_dof_indices[i] == numbers::invalid_dof_index)
-                local_dof_indices[i] = received_dof_indices[i];
+            bool complete = true;
+            for (unsigned int i = 0; i < dof_indices.size(); ++i)
+              if (dofs[i] != numbers::invalid_dof_index)
+                {
+                  Assert((dof_indices[i] == (numbers::invalid_dof_index)) ||
+                           (dof_indices[i] == dofs[i]),
+                         ExcInternalError());
+                  dof_indices[i] = dofs[i];
+                }
               else
-                // we already know the dof index. check that there
-                // is no conflict
-                Assert((received_dof_indices[i] ==
-                        numbers::invalid_dof_index) ||
-                         (received_dof_indices[i] == local_dof_indices[i]),
-                       ExcInternalError());
+                complete = false;
+
+            if (!complete)
+              const_cast<typename DoFHandlerType::active_cell_iterator &>(cell)
+                ->set_user_flag();
+            else
+              const_cast<typename DoFHandlerType::active_cell_iterator &>(cell)
+                ->clear_user_flag();
 
             const_cast<typename DoFHandlerType::active_cell_iterator &>(cell)
-              ->set_dof_indices(local_dof_indices);
+              ->set_dof_indices(dof_indices);
+          };
+
+          const auto filter = [](const auto &cell) {
+            return cell->user_flag_set();
           };
 
           GridTools::exchange_cell_data_to_ghosts<
             std::vector<types::global_dof_index>,
-            DoFHandlerType>(dof_handler, pack, unpack);
+            DoFHandlerType>(dof_handler, pack, unpack, filter);
 
           // finally update the cell DoF indices caches to make sure
           // our internal data structures are consistent
@@ -3999,39 +3931,20 @@ namespace internal
           triangulation->save_user_flags(user_flags);
           triangulation->clear_user_flags();
 
-          // figure out which cells are ghost cells on which we have
-          // to exchange DoF indices
-          const std::map<unsigned int, std::set<dealii::types::subdomain_id>>
-            vertices_with_ghost_neighbors =
-              GridTools::compute_vertices_with_ghost_neighbors(*triangulation);
-
           // mark all cells that either have to send data (locally
           // owned cells that are adjacent to ghost neighbors in some
           // way) or receive data (all ghost cells) via the user flags
           for (const auto &cell : dof_handler->active_cell_iterators())
-            if (cell->is_locally_owned())
-              {
-                for (const unsigned int v : GeometryInfo<dim>::vertex_indices())
-                  if (vertices_with_ghost_neighbors.find(cell->vertex_index(
-                        v)) != vertices_with_ghost_neighbors.end())
-                    {
-                      cell->set_user_flag();
-                      break;
-                    }
-              }
-            else if (cell->is_ghost())
+            if (cell->is_ghost())
               cell->set_user_flag();
 
-
-
           // Send and receive cells. After this, only the local cells
           // are marked, that received new data. This has to be
           // communicated in a second communication step.
           //
           // as explained in the 'distributed' paper, this has to be
           // done twice
-          communicate_dof_indices_on_marked_cells(
-            *dof_handler, vertices_with_ghost_neighbors);
+          communicate_dof_indices_on_marked_cells(*dof_handler);
 
           // in case of hp::DoFHandlers, we may have received valid
           // indices of degrees of freedom that are dominated by a fe
@@ -4045,8 +3958,7 @@ namespace internal
           //                    DoF indices set. however, some ghost cells
           //                    may still have invalid ones. thus, exchange
           //                    one more time.
-          communicate_dof_indices_on_marked_cells(
-            *dof_handler, vertices_with_ghost_neighbors);
+          communicate_dof_indices_on_marked_cells(*dof_handler);
 
           // at this point, we must have taken care of the data transfer
           // on all cells we had previously marked. verify this
@@ -4547,17 +4459,9 @@ namespace internal
 
               // mark all own cells for transfer
               for (const auto &cell : dof_handler->active_cell_iterators())
-                if (!cell->is_artificial())
+                if (cell->is_ghost())
                   cell->set_user_flag();
 
-              // figure out which cells are ghost cells on which we have
-              // to exchange DoF indices
-              const std::map<unsigned int,
-                             std::set<dealii::types::subdomain_id>>
-                vertices_with_ghost_neighbors =
-                  GridTools::compute_vertices_with_ghost_neighbors(
-                    *triangulation);
-
 
               // Send and receive cells. After this, only the local cells
               // are marked, that received new data. This has to be
@@ -4565,8 +4469,7 @@ namespace internal
               //
               // as explained in the 'distributed' paper, this has to be
               // done twice
-              communicate_dof_indices_on_marked_cells(
-                *dof_handler, vertices_with_ghost_neighbors);
+              communicate_dof_indices_on_marked_cells(*dof_handler);
 
               // in case of hp::DoFHandlers, we may have received valid
               // indices of degrees of freedom that are dominated by a fe
@@ -4576,8 +4479,7 @@ namespace internal
               Implementation::merge_invalid_dof_indices_on_ghost_interfaces(
                 *dof_handler);
 
-              communicate_dof_indices_on_marked_cells(
-                *dof_handler, vertices_with_ghost_neighbors);
+              communicate_dof_indices_on_marked_cells(*dof_handler);
 
               triangulation->load_user_flags(user_flags);
             }
index 8696acc458f08372068663aa055011e18f9b88a1..22fe413e1d67aa556dd6a49684c4b9967a7384d4 100644 (file)
@@ -40,20 +40,48 @@ test()
   GridGenerator::hyper_cube(tria);
   tria.refine_global(2);
 
-  std::set<std::string> output;
+  std::set<std::string> input, output;
+
+
+  typedef double       DT;
+  std::map<CellId, DT> map;
+  DT                   counter = 0.0;
+
+  std::map<unsigned int, std::set<dealii::types::subdomain_id>>
+    vertices_with_ghost_neighbors =
+      GridTools::compute_vertices_with_ghost_neighbors(tria);
+
+  for (const auto &cell : tria.active_cell_iterators())
+    if (cell->is_locally_owned())
+      {
+        for (const unsigned int v : GeometryInfo<dim>::vertex_indices())
+          {
+            const std::map<unsigned int,
+                           std::set<dealii::types::subdomain_id>>::
+              const_iterator neighbor_subdomains_of_vertex =
+                vertices_with_ghost_neighbors.find(cell->vertex_index(v));
+
+            if (neighbor_subdomains_of_vertex !=
+                vertices_with_ghost_neighbors.end())
+              {
+                map[cell->id()] = ++counter;
+                break;
+              }
+          }
+      }
 
   typedef
     typename parallel::distributed::Triangulation<dim>::active_cell_iterator
-                 cell_iterator;
-  typedef double DT;
-  DT             counter = 0.0;
+      cell_iterator;
   GridTools::
     exchange_cell_data_to_ghosts<DT, parallel::distributed::Triangulation<dim>>(
       tria,
       [&](const cell_iterator &cell) {
-        DT value = ++counter;
+        DT                 value = map[cell->id()];
+        std::ostringstream oss;
+        oss << "pack " << cell->id() << " " << value;
+        input.insert(oss.str());
 
-        deallog << "pack " << cell->id() << " " << value << std::endl;
         return value;
       },
       [&](const cell_iterator &cell, const DT &data) {
@@ -65,6 +93,8 @@ test()
       });
 
   // sort the output because it will come in in random order
+  for (auto &it : input)
+    deallog << it << std::endl;
   for (auto &it : output)
     deallog << it << std::endl;
 }
index a7ae3eae0604190b85af25168b68aea809537164..c0e61d6555272a5690d85974d33e8bd0aa3924c6 100644 (file)
@@ -49,17 +49,44 @@ test()
   DoFHandler<dim> dofhandler(tria);
   dofhandler.distribute_dofs(fe);
 
-  std::set<std::string> output;
+  std::set<std::string> input, output;
 
   typedef typename DoFHandler<dim>::active_cell_iterator cell_iterator;
   typedef short                                          DT;
+  std::map<CellId, DT>                                   map;
   short                                                  counter = 0;
+
+  std::map<unsigned int, std::set<dealii::types::subdomain_id>>
+    vertices_with_ghost_neighbors =
+      GridTools::compute_vertices_with_ghost_neighbors(tria);
+
+  for (const auto &cell : tria.active_cell_iterators())
+    if (cell->is_locally_owned())
+      {
+        for (const unsigned int v : GeometryInfo<dim>::vertex_indices())
+          {
+            const std::map<unsigned int,
+                           std::set<dealii::types::subdomain_id>>::
+              const_iterator neighbor_subdomains_of_vertex =
+                vertices_with_ghost_neighbors.find(cell->vertex_index(v));
+
+            if (neighbor_subdomains_of_vertex !=
+                vertices_with_ghost_neighbors.end())
+              {
+                map[cell->id()] = ++counter;
+                break;
+              }
+          }
+      }
+
   GridTools::exchange_cell_data_to_ghosts<DT, DoFHandler<dim>>(
     dofhandler,
     [&](const cell_iterator &cell) {
-      DT value = ++counter;
+      DT                 value = map[cell->id()];
+      std::ostringstream oss;
+      oss << "pack " << cell->id() << " " << value;
+      input.insert(oss.str());
 
-      deallog << "pack " << cell->id() << " " << value << std::endl;
       return value;
     },
     [&](const cell_iterator &cell, const DT &data) {
@@ -71,6 +98,8 @@ test()
     });
 
   // sort the output because it will come in in random order
+  for (auto &it : input)
+    deallog << it << std::endl;
   for (auto &it : output)
     deallog << it << std::endl;
 }
index 35ff6c1195d93429f8fd0fe49a4b0ebe565b2bca..f801858bee7bc4162a7397c488dc9c915c34fd62 100644 (file)
@@ -46,19 +46,46 @@ test()
   GridGenerator::hyper_cube(tria);
   tria.refine_global(2);
 
-  std::set<std::string> output;
+  std::set<std::string> input, output;
 
   typedef typename parallel::shared::Triangulation<dim>::active_cell_iterator
-                 cell_iterator;
-  typedef double DT;
-  DT             counter = 0.0;
+                       cell_iterator;
+  typedef double       DT;
+  std::map<CellId, DT> map;
+  DT                   counter = 0.0;
+
+  std::map<unsigned int, std::set<dealii::types::subdomain_id>>
+    vertices_with_ghost_neighbors =
+      GridTools::compute_vertices_with_ghost_neighbors(tria);
+
+  for (const auto &cell : tria.active_cell_iterators())
+    if (cell->is_locally_owned())
+      {
+        for (const unsigned int v : GeometryInfo<dim>::vertex_indices())
+          {
+            const std::map<unsigned int,
+                           std::set<dealii::types::subdomain_id>>::
+              const_iterator neighbor_subdomains_of_vertex =
+                vertices_with_ghost_neighbors.find(cell->vertex_index(v));
+
+            if (neighbor_subdomains_of_vertex !=
+                vertices_with_ghost_neighbors.end())
+              {
+                map[cell->id()] = ++counter;
+                break;
+              }
+          }
+      }
+
   GridTools::exchange_cell_data_to_ghosts<DT,
                                           parallel::shared::Triangulation<dim>>(
     tria,
     [&](const cell_iterator &cell) {
-      DT value = ++counter;
+      DT                 value = map[cell->id()];
+      std::ostringstream oss;
+      oss << "pack " << cell->id() << " " << value;
+      input.insert(oss.str());
 
-      deallog << "pack " << cell->id() << " " << value << std::endl;
       return value;
     },
     [&](const cell_iterator &cell, const DT &data) {
@@ -70,6 +97,8 @@ test()
     });
 
   // sort the output because it will come in in random order
+  for (auto &it : input)
+    deallog << it << std::endl;
   for (auto &it : output)
     deallog << it << std::endl;
 }
index 13755381652056c7b7af3673c53d0e2607b2610a..7cab39e2ae8a5283e65e195d7b7bc8c4ced06932 100644 (file)
@@ -44,28 +44,57 @@ test()
   GridGenerator::hyper_cube(tria);
   tria.refine_global(2);
 
-  std::set<std::string> output;
+  std::map<CellId, std::string> input;
+  std::set<std::string>         output;
 
   typedef
     typename parallel::distributed::Triangulation<dim>::active_cell_iterator
-                 cell_iterator;
-  typedef double DT;
-  int            counter = 0;
+                        cell_iterator;
+  typedef double        DT;
+  std::map<CellId, int> map;
+  int                   counter = 0;
+
+  std::map<unsigned int, std::set<dealii::types::subdomain_id>>
+    vertices_with_ghost_neighbors =
+      GridTools::compute_vertices_with_ghost_neighbors(tria);
+
+  for (const auto &cell : tria.active_cell_iterators())
+    if (cell->is_locally_owned())
+      {
+        for (const unsigned int v : GeometryInfo<dim>::vertex_indices())
+          {
+            const std::map<unsigned int,
+                           std::set<dealii::types::subdomain_id>>::
+              const_iterator neighbor_subdomains_of_vertex =
+                vertices_with_ghost_neighbors.find(cell->vertex_index(v));
+
+            if (neighbor_subdomains_of_vertex !=
+                vertices_with_ghost_neighbors.end())
+              {
+                map[cell->id()] = ++counter;
+                break;
+              }
+          }
+      }
+
   GridTools::
     exchange_cell_data_to_ghosts<DT, parallel::distributed::Triangulation<dim>>(
       tria,
       [&](const cell_iterator &cell) -> std_cxx17::optional<DT> {
-        ++counter;
+        const auto         counter = map[cell->id()];
+        std::ostringstream oss;
         if (counter % 2 == 0)
           {
             DT value = counter;
 
-            deallog << "pack " << cell->id() << " " << value << std::endl;
+            oss << "pack " << cell->id() << " " << value;
+            input[cell->id()] = oss.str();
             return value;
           }
         else
           {
-            deallog << "skipping " << cell->id() << ' ' << counter << std::endl;
+            oss << "skipping " << cell->id() << ' ' << counter;
+            input[cell->id()] = oss.str();
             return std_cxx17::optional<DT>();
           }
       },
@@ -78,6 +107,8 @@ test()
       });
 
   // sort the output because it will come in in random order
+  for (auto &it : input)
+    deallog << it.second << std::endl;
   for (auto &it : output)
     deallog << it << std::endl;
 }

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.