* on the sending side returned a non-empty std_cxx17::optional object.
* The @p unpack function is then called with the data sent by the
* processor that owns that cell.
- *
+ * @param cell_filter Only cells are communicated where this filter function returns
+ * the value `true`. In the default case, the function returns true on all
+ * cells and thus, all relevant cells are communicated.
*
* <h4> An example </h4>
*
const std::function<std_cxx17::optional<DataType>(
const typename MeshType::active_cell_iterator &)> &pack,
const std::function<void(const typename MeshType::active_cell_iterator &,
- const DataType &)> & unpack);
+ const DataType &)> & unpack,
+ const std::function<bool(const typename MeshType::active_cell_iterator &)>
+ &cell_filter =
+ [](const typename MeshType::active_cell_iterator &) { return true; });
/**
* Exchange arbitrary data of type @p DataType provided by the function
* processes.
*
* In addition to the parameters of exchange_cell_data_to_ghosts(), this
- * function allows to provide a @p filter function, which can be used to only
+ * function allows to provide a @p cell_filter function, which can be used to only
* communicate marked cells. In the default case, all relevant cells are
* communicated.
*/
const std::function<void(const typename MeshType::level_cell_iterator &,
const DataType &)> & unpack,
const std::function<bool(const typename MeshType::level_cell_iterator &)>
- &filter =
+ &cell_filter =
[](const typename MeshType::level_cell_iterator &) { return true; });
/* Exchange with all processors of the MPI communicator @p mpi_communicator the vector of bounding
}
-
- template <typename DataType, typename MeshType>
- void
- exchange_cell_data_to_ghosts(
- const MeshType & mesh,
- const std::function<std_cxx17::optional<DataType>(
- const typename MeshType::active_cell_iterator &)> &pack,
- const std::function<void(const typename MeshType::active_cell_iterator &,
- const DataType &)> & unpack)
+ namespace internal
{
+ template <typename DataType,
+ typename MeshType,
+ typename MeshCellIteratorType>
+ void
+ exchange_cell_data(
+ const MeshType &mesh,
+ const std::function<
+ std_cxx17::optional<DataType>(const MeshCellIteratorType &)> &pack,
+ const std::function<void(const MeshCellIteratorType &, const DataType &)>
+ & unpack,
+ const std::function<bool(const MeshCellIteratorType &)> & cell_filter,
+ const std::function<void(
+ const std::function<void(const MeshCellIteratorType &,
+ const types::subdomain_id)> &)> &process_cells,
+ const std::function<std::set<types::subdomain_id>(
+ const parallel::TriangulationBase<MeshType::dimension,
+ MeshType::space_dimension> &)>
+ &compute_ghost_owners)
+ {
# ifndef DEAL_II_WITH_MPI
- (void)mesh;
- (void)pack;
- (void)unpack;
- Assert(false,
- ExcMessage(
- "GridTools::exchange_cell_data_to_ghosts() requires MPI."));
+ (void)mesh;
+ (void)pack;
+ (void)unpack;
+ (void)cell_filter;
+ (void)process_cells;
+ (void)compute_ghost_owners;
+ Assert(false,
+ ExcMessage("GridTools::exchange_cell_data() requires MPI."));
# else
- constexpr int dim = MeshType::dimension;
- constexpr int spacedim = MeshType::space_dimension;
- auto tria =
- dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &mesh.get_triangulation());
- Assert(
- tria != nullptr,
- ExcMessage(
- "The function exchange_cell_data_to_ghosts() only works with parallel triangulations."));
-
- // map neighbor_id -> data_buffer where we accumulate the data to send
- using DestinationToBufferMap =
+ constexpr int dim = MeshType::dimension;
+ constexpr int spacedim = MeshType::space_dimension;
+ auto tria =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ &mesh.get_triangulation());
+ Assert(
+ tria != nullptr,
+ ExcMessage(
+ "The function exchange_cell_data_to_ghosts() only works with parallel triangulations."));
+
+ // build list of cells to request for each neighbor
+ std::set<dealii::types::subdomain_id> ghost_owners =
+ compute_ghost_owners(*tria);
std::map<dealii::types::subdomain_id,
- CellDataTransferBuffer<dim, DataType>>;
- DestinationToBufferMap destination_to_data_buffer_map;
+ std::vector<typename CellId::binary_type>>
+ neighbor_cell_list;
- std::map<unsigned int, std::set<dealii::types::subdomain_id>>
- vertices_with_ghost_neighbors =
- GridTools::compute_vertices_with_ghost_neighbors(*tria);
+ for (const auto ghost_owner : ghost_owners)
+ neighbor_cell_list[ghost_owner] = {};
- for (const auto &cell : tria->active_cell_iterators())
- if (cell->is_locally_owned())
- {
- std::set<dealii::types::subdomain_id> send_to;
- for (const unsigned int v : GeometryInfo<dim>::vertex_indices())
- {
- const std::map<unsigned int,
- std::set<dealii::types::subdomain_id>>::
- const_iterator neighbor_subdomains_of_vertex =
- vertices_with_ghost_neighbors.find(cell->vertex_index(v));
+ process_cells([&](const auto &cell, const auto key) {
+ if (cell_filter(cell))
+ neighbor_cell_list[key].emplace_back(
+ cell->id().template to_binary<spacedim>());
+ });
- if (neighbor_subdomains_of_vertex ==
- vertices_with_ghost_neighbors.end())
- continue;
+ Assert(ghost_owners.size() == neighbor_cell_list.size(),
+ ExcInternalError());
- Assert(neighbor_subdomains_of_vertex->second.size() != 0,
- ExcInternalError());
- send_to.insert(neighbor_subdomains_of_vertex->second.begin(),
- neighbor_subdomains_of_vertex->second.end());
- }
+ // Before sending & receiving, make sure we protect this section with
+ // a mutex:
+ static Utilities::MPI::CollectiveMutex mutex;
+ Utilities::MPI::CollectiveMutex::ScopedLock lock(
+ mutex, tria->get_communicator());
+
+ const int mpi_tag =
+ Utilities::MPI::internal::Tags::exchange_cell_data_request;
+ const int mpi_tag_reply =
+ Utilities::MPI::internal::Tags::exchange_cell_data_reply;
+
+ // send our requests:
+ std::vector<MPI_Request> requests(ghost_owners.size());
+ {
+ unsigned int idx = 0;
+ for (const auto &it : neighbor_cell_list)
+ {
+ // send the data about the relevant cells
+ const int ierr = MPI_Isend(it.second.data(),
+ it.second.size() * sizeof(it.second[0]),
+ MPI_BYTE,
+ it.first,
+ mpi_tag,
+ tria->get_communicator(),
+ &requests[idx]);
+ AssertThrowMPI(ierr);
+ ++idx;
+ }
+ }
- if (send_to.size() > 0)
+ using DestinationToBufferMap =
+ std::map<dealii::types::subdomain_id,
+ GridTools::CellDataTransferBuffer<dim, DataType>>;
+ DestinationToBufferMap destination_to_data_buffer_map;
+
+ // receive requests and reply with the ghost indices
+ std::vector<std::vector<typename CellId::binary_type>> cell_data_to_send(
+ ghost_owners.size());
+ std::vector<std::vector<types::global_dof_index>>
+ send_dof_numbers_and_indices(ghost_owners.size());
+ std::vector<MPI_Request> reply_requests(ghost_owners.size());
+ std::vector<std::vector<char>> sendbuffers(ghost_owners.size());
+
+ for (unsigned int idx = 0; idx < ghost_owners.size(); ++idx)
+ {
+ MPI_Status status;
+ int ierr = MPI_Probe(MPI_ANY_SOURCE,
+ mpi_tag,
+ tria->get_communicator(),
+ &status);
+ AssertThrowMPI(ierr);
+
+ int len;
+ ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+ AssertThrowMPI(ierr);
+ Assert(len % sizeof(cell_data_to_send[idx][0]) == 0,
+ ExcInternalError());
+
+ const unsigned int n_cells =
+ len / sizeof(typename CellId::binary_type);
+ cell_data_to_send[idx].resize(n_cells);
+
+ ierr = MPI_Recv(cell_data_to_send[idx].data(),
+ len,
+ MPI_BYTE,
+ status.MPI_SOURCE,
+ status.MPI_TAG,
+ tria->get_communicator(),
+ &status);
+ AssertThrowMPI(ierr);
+
+ // store data for each cell
+ for (unsigned int c = 0; c < static_cast<unsigned int>(n_cells); ++c)
{
- // this cell's data needs to be sent to someone
- typename MeshType::active_cell_iterator mesh_it(tria,
- cell->level(),
- cell->index(),
- &mesh);
+ const auto cell =
+ CellId(cell_data_to_send[idx][c]).to_cell(*tria);
+ MeshCellIteratorType mesh_it(tria,
+ cell->level(),
+ cell->index(),
+ &mesh);
const std_cxx17::optional<DataType> data = pack(mesh_it);
if (data)
{
- const CellId cellid = cell->id();
-
- for (const auto subdomain : send_to)
- {
- // find the data buffer for proc "subdomain" if it exists
- // or create an empty one otherwise
- typename DestinationToBufferMap::iterator p =
- destination_to_data_buffer_map
- .insert(std::make_pair(
- subdomain, CellDataTransferBuffer<dim, DataType>()))
- .first;
-
- p->second.cell_ids.emplace_back(cellid);
- p->second.data.emplace_back(*data);
- }
+ typename DestinationToBufferMap::iterator p =
+ destination_to_data_buffer_map
+ .insert(std::make_pair(
+ idx,
+ GridTools::CellDataTransferBuffer<dim, DataType>()))
+ .first;
+
+ p->second.cell_ids.emplace_back(cell->id());
+ p->second.data.emplace_back(*data);
}
}
+
+ // send reply
+ GridTools::CellDataTransferBuffer<dim, DataType> &data =
+ destination_to_data_buffer_map[idx];
+
+ sendbuffers[idx] =
+ Utilities::pack(data, /*enable_compression*/ false);
+ ierr = MPI_Isend(sendbuffers[idx].data(),
+ sendbuffers[idx].size(),
+ MPI_BYTE,
+ status.MPI_SOURCE,
+ mpi_tag_reply,
+ tria->get_communicator(),
+ &reply_requests[idx]);
+ AssertThrowMPI(ierr);
}
- // Protect the following communication:
- static Utilities::MPI::CollectiveMutex mutex;
- Utilities::MPI::CollectiveMutex::ScopedLock lock(mutex,
- tria->get_communicator());
+ // finally receive the replies
+ std::vector<char> receive;
+ for (unsigned int idx = 0; idx < ghost_owners.size(); ++idx)
+ {
+ MPI_Status status;
+ int ierr = MPI_Probe(MPI_ANY_SOURCE,
+ mpi_tag_reply,
+ tria->get_communicator(),
+ &status);
+ AssertThrowMPI(ierr);
- const int mpi_tag =
- Utilities::MPI::internal::Tags::exchange_cell_data_to_ghosts;
+ int len;
+ ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+ AssertThrowMPI(ierr);
- // 2. send our messages
- std::set<dealii::types::subdomain_id> ghost_owners = tria->ghost_owners();
- const unsigned int n_ghost_owners = ghost_owners.size();
- std::vector<std::vector<char>> sendbuffers(n_ghost_owners);
- std::vector<MPI_Request> requests(n_ghost_owners);
+ receive.resize(len);
- unsigned int idx = 0;
- for (auto it = ghost_owners.begin(); it != ghost_owners.end(); ++it, ++idx)
- {
- CellDataTransferBuffer<dim, DataType> &data =
- destination_to_data_buffer_map[*it];
-
- // pack all the data into the buffer for this recipient and send it.
- // keep data around till we can make sure that the packet has been
- // received
- sendbuffers[idx] = Utilities::pack(data, /*enable_compression*/ false);
- const int ierr = MPI_Isend(sendbuffers[idx].data(),
- sendbuffers[idx].size(),
- MPI_BYTE,
- *it,
- mpi_tag,
- tria->get_communicator(),
- &requests[idx]);
- AssertThrowMPI(ierr);
- }
+ char *ptr = receive.data();
+ ierr = MPI_Recv(ptr,
+ len,
+ MPI_BYTE,
+ status.MPI_SOURCE,
+ status.MPI_TAG,
+ tria->get_communicator(),
+ &status);
+ AssertThrowMPI(ierr);
- // 3. receive messages
- std::vector<char> receive;
- for (unsigned int idx = 0; idx < n_ghost_owners; ++idx)
- {
- MPI_Status status;
- int ierr =
- MPI_Probe(MPI_ANY_SOURCE, mpi_tag, tria->get_communicator(), &status);
- AssertThrowMPI(ierr);
-
- int len;
- ierr = MPI_Get_count(&status, MPI_BYTE, &len);
- AssertThrowMPI(ierr);
-
- receive.resize(len);
-
- char *ptr = receive.data();
- ierr = MPI_Recv(ptr,
- len,
- MPI_BYTE,
- status.MPI_SOURCE,
- status.MPI_TAG,
- tria->get_communicator(),
- &status);
- AssertThrowMPI(ierr);
-
- auto cellinfo =
- Utilities::unpack<CellDataTransferBuffer<dim, DataType>>(
- receive, /*enable_compression*/ false);
-
- DataType *data = cellinfo.data.data();
- for (unsigned int c = 0; c < cellinfo.cell_ids.size(); ++c, ++data)
- {
- const typename Triangulation<dim, spacedim>::cell_iterator
- tria_cell = cellinfo.cell_ids[c].to_cell(*tria);
+ auto cellinfo =
+ Utilities::unpack<CellDataTransferBuffer<dim, DataType>>(
+ receive, /*enable_compression*/ false);
- const typename MeshType::active_cell_iterator cell(
- tria, tria_cell->level(), tria_cell->index(), &mesh);
+ DataType *data = cellinfo.data.data();
+ for (unsigned int c = 0; c < cellinfo.cell_ids.size(); ++c, ++data)
+ {
+ const typename Triangulation<dim, spacedim>::cell_iterator
+ tria_cell = cellinfo.cell_ids[c].to_cell(*tria);
+
+ MeshCellIteratorType cell(tria,
+ tria_cell->level(),
+ tria_cell->index(),
+ &mesh);
+
+ unpack(cell, *data);
+ }
+ }
+
+ // make sure that all communication is finished
+ // when we leave this function.
+ if (requests.size() > 0)
+ {
+ const int ierr =
+ MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ }
+ if (reply_requests.size() > 0)
+ {
+ const int ierr = MPI_Waitall(reply_requests.size(),
+ reply_requests.data(),
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ }
- unpack(cell, *data);
- }
- }
- // make sure that all communication is finished
- // when we leave this function.
- if (requests.size())
- {
- const int ierr =
- MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
- AssertThrowMPI(ierr);
- }
# endif // DEAL_II_WITH_MPI
+ }
+
+ } // namespace internal
+
+ template <typename DataType, typename MeshType>
+ void
+ exchange_cell_data_to_ghosts(
+ const MeshType & mesh,
+ const std::function<std_cxx17::optional<DataType>(
+ const typename MeshType::active_cell_iterator &)> &pack,
+ const std::function<void(const typename MeshType::active_cell_iterator &,
+ const DataType &)> & unpack,
+ const std::function<bool(const typename MeshType::active_cell_iterator &)>
+ &cell_filter)
+ {
+# ifndef DEAL_II_WITH_MPI
+ (void)mesh;
+ (void)pack;
+ (void)unpack;
+ (void)cell_filter;
+ Assert(false,
+ ExcMessage(
+ "GridTools::exchange_cell_data_to_ghosts() requires MPI."));
+# else
+ internal::exchange_cell_data<DataType,
+ MeshType,
+ typename MeshType::active_cell_iterator>(
+ mesh,
+ pack,
+ unpack,
+ cell_filter,
+ [&](const auto &process) {
+ for (const auto &cell : mesh.active_cell_iterators())
+ if (cell->is_ghost())
+ process(cell, cell->subdomain_id());
+ },
+ [](const auto &tria) { return tria.ghost_owners(); });
+# endif
}
const std::function<void(const typename MeshType::level_cell_iterator &,
const DataType &)> & unpack,
const std::function<bool(const typename MeshType::level_cell_iterator &)>
- &filter)
+ &cell_filter)
{
# ifndef DEAL_II_WITH_MPI
(void)mesh;
(void)pack;
(void)unpack;
- (void)filter;
+ (void)cell_filter;
Assert(false,
ExcMessage(
- "GridTools::exchange_cell_data_to_ghosts() requires MPI."));
+ "GridTools::exchange_cell_data_to_level_ghosts() requires MPI."));
# else
- constexpr int dim = MeshType::dimension;
- constexpr int spacedim = MeshType::space_dimension;
- auto tria =
- dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &mesh.get_triangulation());
- Assert(
- tria != nullptr,
- ExcMessage(
- "The function exchange_cell_data_to_ghosts() only works with parallel triangulations."));
-
- // build list of cells to request for each neighbor
- std::set<dealii::types::subdomain_id> level_ghost_owners =
- tria->level_ghost_owners();
- std::map<dealii::types::subdomain_id,
- std::vector<typename CellId::binary_type>>
- neighbor_cell_list;
-
- for (const auto level_ghost_owner : level_ghost_owners)
- neighbor_cell_list[level_ghost_owner] = {};
-
- {
- for (const auto &cell : mesh.cell_iterators())
- if (cell->level_subdomain_id() !=
- dealii::numbers::artificial_subdomain_id &&
- !cell->is_locally_owned_on_level())
- if (filter(cell))
- neighbor_cell_list[cell->level_subdomain_id()].emplace_back(
- cell->id().template to_binary<spacedim>());
- }
-
- Assert(level_ghost_owners.size() == neighbor_cell_list.size(),
- ExcInternalError());
-
-
- // Before sending & receiving, make sure we protect this section with
- // a mutex:
- static Utilities::MPI::CollectiveMutex mutex;
- Utilities::MPI::CollectiveMutex::ScopedLock lock(mutex,
- tria->get_communicator());
-
- const int mpi_tag = Utilities::MPI::internal::Tags::
- exchange_cell_data_to_level_ghosts_request;
- const int mpi_tag_reply =
- Utilities::MPI::internal::Tags::exchange_cell_data_to_level_ghosts_reply;
-
- // send our requests:
- std::vector<MPI_Request> requests(level_ghost_owners.size());
- {
- unsigned int idx = 0;
- for (const auto &it : neighbor_cell_list)
- {
- // send the data about the relevant cells
- const int ierr = MPI_Isend(it.second.data(),
- it.second.size() * sizeof(it.second[0]),
- MPI_BYTE,
- it.first,
- mpi_tag,
- tria->get_communicator(),
- &requests[idx]);
- AssertThrowMPI(ierr);
- ++idx;
- }
- }
-
- using DestinationToBufferMap =
- std::map<dealii::types::subdomain_id,
- GridTools::CellDataTransferBuffer<dim, DataType>>;
- DestinationToBufferMap destination_to_data_buffer_map;
-
- // receive requests and reply with the ghost indices
- std::vector<std::vector<typename CellId::binary_type>> cell_data_to_send(
- level_ghost_owners.size());
- std::vector<std::vector<types::global_dof_index>>
- send_dof_numbers_and_indices(level_ghost_owners.size());
- std::vector<MPI_Request> reply_requests(level_ghost_owners.size());
- std::vector<std::vector<char>> sendbuffers(level_ghost_owners.size());
-
- for (unsigned int idx = 0; idx < level_ghost_owners.size(); ++idx)
- {
- MPI_Status status;
- int ierr =
- MPI_Probe(MPI_ANY_SOURCE, mpi_tag, tria->get_communicator(), &status);
- AssertThrowMPI(ierr);
-
- int len;
- ierr = MPI_Get_count(&status, MPI_BYTE, &len);
- AssertThrowMPI(ierr);
- Assert(len % sizeof(cell_data_to_send[idx][0]) == 0,
- ExcInternalError());
-
- const unsigned int n_cells = len / sizeof(typename CellId::binary_type);
- cell_data_to_send[idx].resize(n_cells);
-
- ierr = MPI_Recv(cell_data_to_send[idx].data(),
- len,
- MPI_BYTE,
- status.MPI_SOURCE,
- status.MPI_TAG,
- tria->get_communicator(),
- &status);
- AssertThrowMPI(ierr);
-
- // store data for each cell
- for (unsigned int c = 0; c < static_cast<unsigned int>(n_cells); ++c)
- {
- const auto cell = CellId(cell_data_to_send[idx][c]).to_cell(*tria);
-
- typename MeshType::level_cell_iterator mesh_it(tria,
- cell->level(),
- cell->index(),
- &mesh);
- const std_cxx17::optional<DataType> data = pack(mesh_it);
-
- {
- typename DestinationToBufferMap::iterator p =
- destination_to_data_buffer_map
- .insert(std::make_pair(
- idx, GridTools::CellDataTransferBuffer<dim, DataType>()))
- .first;
-
- p->second.cell_ids.emplace_back(cell->id());
- p->second.data.emplace_back(*data);
- }
- }
-
- // send reply
- GridTools::CellDataTransferBuffer<dim, DataType> &data =
- destination_to_data_buffer_map[idx];
-
- sendbuffers[idx] = Utilities::pack(data, /*enable_compression*/ false);
- ierr = MPI_Isend(sendbuffers[idx].data(),
- sendbuffers[idx].size(),
- MPI_BYTE,
- status.MPI_SOURCE,
- mpi_tag_reply,
- tria->get_communicator(),
- &reply_requests[idx]);
- AssertThrowMPI(ierr);
- }
-
- // finally receive the replies
- std::vector<char> receive;
- for (unsigned int idx = 0; idx < level_ghost_owners.size(); ++idx)
- {
- MPI_Status status;
- int ierr = MPI_Probe(MPI_ANY_SOURCE,
- mpi_tag_reply,
- tria->get_communicator(),
- &status);
- AssertThrowMPI(ierr);
-
- int len;
- ierr = MPI_Get_count(&status, MPI_BYTE, &len);
- AssertThrowMPI(ierr);
-
- receive.resize(len);
-
- char *ptr = receive.data();
- ierr = MPI_Recv(ptr,
- len,
- MPI_BYTE,
- status.MPI_SOURCE,
- status.MPI_TAG,
- tria->get_communicator(),
- &status);
- AssertThrowMPI(ierr);
-
- auto cellinfo =
- Utilities::unpack<CellDataTransferBuffer<dim, DataType>>(
- receive, /*enable_compression*/ false);
-
- DataType *data = cellinfo.data.data();
- for (unsigned int c = 0; c < cellinfo.cell_ids.size(); ++c, ++data)
- {
- const typename Triangulation<dim, spacedim>::cell_iterator
- tria_cell = cellinfo.cell_ids[c].to_cell(*tria);
-
- const typename MeshType::level_cell_iterator cell(
- tria, tria_cell->level(), tria_cell->index(), &mesh);
-
- unpack(cell, *data);
- }
- }
-
- // make sure that all communication is finished
- // when we leave this function.
- if (requests.size() > 0)
- {
- const int ierr =
- MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
- AssertThrowMPI(ierr);
- }
- if (reply_requests.size() > 0)
- {
- const int ierr = MPI_Waitall(reply_requests.size(),
- reply_requests.data(),
- MPI_STATUSES_IGNORE);
- AssertThrowMPI(ierr);
- }
-
-
-# endif // DEAL_II_WITH_MPI
+ internal::exchange_cell_data<DataType,
+ MeshType,
+ typename MeshType::level_cell_iterator>(
+ mesh,
+ pack,
+ unpack,
+ cell_filter,
+ [&](const auto &process) {
+ for (const auto &cell : mesh.cell_iterators())
+ if (cell->level_subdomain_id() !=
+ dealii::numbers::artificial_subdomain_id &&
+ !cell->is_locally_owned_on_level())
+ process(cell, cell->level_subdomain_id());
+ },
+ [](const auto &tria) { return tria.level_ghost_owners(); });
+# endif
}
} // namespace GridTools
template <class DoFHandlerType>
void
communicate_dof_indices_on_marked_cells(
- const DoFHandlerType &dof_handler,
- const std::map<unsigned int, std::set<dealii::types::subdomain_id>> &)
+ const DoFHandlerType &dof_handler)
{
# ifndef DEAL_II_WITH_MPI
- (void)vertices_with_ghost_neighbors;
+ (void)dof_handler;
Assert(false, ExcNotImplemented());
# else
const unsigned int dim = DoFHandlerType::dimension;
// define functions that pack data on cells that are ghost cells
// somewhere else, and unpack data on cells where we get information
// from elsewhere
- auto pack =
- [](const typename DoFHandlerType::active_cell_iterator &cell)
- -> std_cxx17::optional<std::vector<types::global_dof_index>> {
+ const auto pack = [](const auto &cell) {
Assert(cell->is_locally_owned(), ExcInternalError());
- // first see whether we need to do anything at all on this cell.
- // this is determined by whether the user_flag is set on the
- // cell that indicates that the *complete* set of DoF indices
- // has not been sent
- if (cell->user_flag_set())
- {
- // get dof indices for the current cell
- std::vector<types::global_dof_index> local_dof_indices(
- cell->get_fe().dofs_per_cell);
- cell->get_dof_indices(local_dof_indices);
-
- // now see if there are dof indices that were previously
- // unknown. this can only happen in phase 1, and in
- // that case we know that the user flag must have been set
- //
- // in any case, if the cell *is* complete, we do not
- // need to send the data any more in the next phase. indicate
- // this by removing the user flag
- if (std::find(local_dof_indices.begin(),
- local_dof_indices.end(),
- numbers::invalid_dof_index) !=
- local_dof_indices.end())
- {
- Assert(cell->user_flag_set(), ExcInternalError());
- }
- else
- cell->clear_user_flag();
-
- return local_dof_indices;
- }
- else
- {
- // the fact that the user flag wasn't set means that there is
- // nothing we need to send that hasn't been sent so far.
- // so return an empty array, but also verify that indeed
- // the cell is complete
-# ifdef DEBUG
- std::vector<types::global_dof_index> local_dof_indices(
- cell->get_fe().dofs_per_cell);
- cell->get_dof_indices(local_dof_indices);
+ std::vector<dealii::types::global_dof_index> data(
+ cell->get_fe().dofs_per_cell);
+ cell->get_dof_indices(data);
- const bool is_complete =
- (std::find(local_dof_indices.begin(),
- local_dof_indices.end(),
- numbers::invalid_dof_index) ==
- local_dof_indices.end());
- Assert(is_complete, ExcInternalError());
-# endif
- return std_cxx17::optional<
- std::vector<types::global_dof_index>>();
- }
+ return data;
};
- auto unpack =
- [](const typename DoFHandlerType::active_cell_iterator &cell,
- const std::vector<types::global_dof_index> &received_dof_indices)
- -> void {
- // this function should only be called on ghost cells, and
- // on top of that, only on cells that have not been
- // completed -- which we indicate via the user flag.
- // check both
+ const auto unpack = [](const auto &cell, const auto &dofs) {
+ Assert(cell->get_fe().dofs_per_cell == dofs.size(),
+ ExcInternalError());
+
Assert(cell->is_ghost(), ExcInternalError());
- Assert(cell->user_flag_set(), ExcInternalError());
- // if we just got an incomplete array of DoF indices, then we must
- // be in the first ghost exchange and the user flag must have been
- // set. we tested that already above.
- //
- // if we did get a complete array, then we may be in the first
- // or second ghost exchange, but in any case we need not exchange
- // another time. so delete the user flag
- const bool is_complete = (std::find(received_dof_indices.begin(),
- received_dof_indices.end(),
- numbers::invalid_dof_index) ==
- received_dof_indices.end());
- if (is_complete)
- cell->clear_user_flag();
-
- // in any case, set the DoF indices on this cell. some
- // of the ones we received may still be invalid because
- // the sending processor did not know them yet, so we
- // need to merge the ones we get with those that are
- // already set here and may have already been known. for
- // those that we already know *and* get, they must obviously
- // agree
- //
- // before getting the local dof indices, we need to update the
- // cell dof indices cache because we may have set dof indices
- // on a neighboring ghost cell before this one, which may have
- // affected the dof indices we know about the current cell
- std::vector<types::global_dof_index> local_dof_indices(
+ std::vector<dealii::types::global_dof_index> dof_indices(
cell->get_fe().dofs_per_cell);
cell->update_cell_dof_indices_cache();
- cell->get_dof_indices(local_dof_indices);
+ cell->get_dof_indices(dof_indices);
- for (unsigned int i = 0; i < local_dof_indices.size(); ++i)
- if (local_dof_indices[i] == numbers::invalid_dof_index)
- local_dof_indices[i] = received_dof_indices[i];
+ bool complete = true;
+ for (unsigned int i = 0; i < dof_indices.size(); ++i)
+ if (dofs[i] != numbers::invalid_dof_index)
+ {
+ Assert((dof_indices[i] == (numbers::invalid_dof_index)) ||
+ (dof_indices[i] == dofs[i]),
+ ExcInternalError());
+ dof_indices[i] = dofs[i];
+ }
else
- // we already know the dof index. check that there
- // is no conflict
- Assert((received_dof_indices[i] ==
- numbers::invalid_dof_index) ||
- (received_dof_indices[i] == local_dof_indices[i]),
- ExcInternalError());
+ complete = false;
+
+ if (!complete)
+ const_cast<typename DoFHandlerType::active_cell_iterator &>(cell)
+ ->set_user_flag();
+ else
+ const_cast<typename DoFHandlerType::active_cell_iterator &>(cell)
+ ->clear_user_flag();
const_cast<typename DoFHandlerType::active_cell_iterator &>(cell)
- ->set_dof_indices(local_dof_indices);
+ ->set_dof_indices(dof_indices);
+ };
+
+ const auto filter = [](const auto &cell) {
+ return cell->user_flag_set();
};
GridTools::exchange_cell_data_to_ghosts<
std::vector<types::global_dof_index>,
- DoFHandlerType>(dof_handler, pack, unpack);
+ DoFHandlerType>(dof_handler, pack, unpack, filter);
// finally update the cell DoF indices caches to make sure
// our internal data structures are consistent
triangulation->save_user_flags(user_flags);
triangulation->clear_user_flags();
- // figure out which cells are ghost cells on which we have
- // to exchange DoF indices
- const std::map<unsigned int, std::set<dealii::types::subdomain_id>>
- vertices_with_ghost_neighbors =
- GridTools::compute_vertices_with_ghost_neighbors(*triangulation);
-
// mark all cells that either have to send data (locally
// owned cells that are adjacent to ghost neighbors in some
// way) or receive data (all ghost cells) via the user flags
for (const auto &cell : dof_handler->active_cell_iterators())
- if (cell->is_locally_owned())
- {
- for (const unsigned int v : GeometryInfo<dim>::vertex_indices())
- if (vertices_with_ghost_neighbors.find(cell->vertex_index(
- v)) != vertices_with_ghost_neighbors.end())
- {
- cell->set_user_flag();
- break;
- }
- }
- else if (cell->is_ghost())
+ if (cell->is_ghost())
cell->set_user_flag();
-
-
// Send and receive cells. After this, only the local cells
// are marked, that received new data. This has to be
// communicated in a second communication step.
//
// as explained in the 'distributed' paper, this has to be
// done twice
- communicate_dof_indices_on_marked_cells(
- *dof_handler, vertices_with_ghost_neighbors);
+ communicate_dof_indices_on_marked_cells(*dof_handler);
// in case of hp::DoFHandlers, we may have received valid
// indices of degrees of freedom that are dominated by a fe
// DoF indices set. however, some ghost cells
// may still have invalid ones. thus, exchange
// one more time.
- communicate_dof_indices_on_marked_cells(
- *dof_handler, vertices_with_ghost_neighbors);
+ communicate_dof_indices_on_marked_cells(*dof_handler);
// at this point, we must have taken care of the data transfer
// on all cells we had previously marked. verify this
// mark all own cells for transfer
for (const auto &cell : dof_handler->active_cell_iterators())
- if (!cell->is_artificial())
+ if (cell->is_ghost())
cell->set_user_flag();
- // figure out which cells are ghost cells on which we have
- // to exchange DoF indices
- const std::map<unsigned int,
- std::set<dealii::types::subdomain_id>>
- vertices_with_ghost_neighbors =
- GridTools::compute_vertices_with_ghost_neighbors(
- *triangulation);
-
// Send and receive cells. After this, only the local cells
// are marked, that received new data. This has to be
//
// as explained in the 'distributed' paper, this has to be
// done twice
- communicate_dof_indices_on_marked_cells(
- *dof_handler, vertices_with_ghost_neighbors);
+ communicate_dof_indices_on_marked_cells(*dof_handler);
// in case of hp::DoFHandlers, we may have received valid
// indices of degrees of freedom that are dominated by a fe
Implementation::merge_invalid_dof_indices_on_ghost_interfaces(
*dof_handler);
- communicate_dof_indices_on_marked_cells(
- *dof_handler, vertices_with_ghost_neighbors);
+ communicate_dof_indices_on_marked_cells(*dof_handler);
triangulation->load_user_flags(user_flags);
}