From: Peter Munch Date: Wed, 10 Jun 2020 07:06:16 +0000 (+0200) Subject: Add GridTools::exchange_cell_data_to_level_ghosts() X-Git-Tag: v9.3.0-rc1~1450^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=a5521cde66f0cb710d7319f6a3d8ad20bbfbcbbf;p=dealii.git Add GridTools::exchange_cell_data_to_level_ghosts() --- diff --git a/include/deal.II/base/mpi_tags.h b/include/deal.II/base/mpi_tags.h index a8323a0131..d4d401cacf 100644 --- a/include/deal.II/base/mpi_tags.h +++ b/include/deal.II/base/mpi_tags.h @@ -62,11 +62,11 @@ namespace Utilities /// Triangulation::communicate_locally_moved_vertices() triangulation_communicate_locally_moved_vertices, - /// dof_handler_policy.cc: communicate_mg_ghost_cells() - dofhandler_communicate_mg_ghost_cells, + /// grid_tools.h: exchange_cell_data_to_level_ghosts() + exchange_cell_data_to_level_ghosts_request, - /// dof_handler_policy.cc: communicate_mg_ghost_cells() - dofhandler_communicate_mg_ghost_cells_reply, + /// grid_tools.h: exchange_cell_data_to_level_ghosts() + exchange_cell_data_to_level_ghosts_reply, /// mg_transfer_internal.cc: fill_copy_indices() mg_transfer_fill_copy_indices, diff --git a/include/deal.II/grid/grid_tools.h b/include/deal.II/grid/grid_tools.h index 1a19ca6c1b..8de436f580 100644 --- a/include/deal.II/grid/grid_tools.h +++ b/include/deal.II/grid/grid_tools.h @@ -2853,6 +2853,27 @@ namespace GridTools const std::function & unpack); + /** + * Exchange arbitrary data of type @p DataType provided by the function + * objects from locally owned level cells to ghost level cells on other + * processes. + * + * In addition to the parameters of exchange_cell_data_to_ghosts(), this + * function allows to provide a @p filter function, which can be used to only + * communicate marked cells. In the default case, all relevant cells are + * communicated. + */ + template + void + exchange_cell_data_to_level_ghosts( + const MeshType & mesh, + const std::function( + const typename MeshType::level_cell_iterator &)> &pack, + const std::function & unpack, + const std::function + &filter = [](const auto &) { return true; }); + /* Exchange with all processors of the MPI communicator @p mpi_communicator the vector of bounding * boxes @p local_bboxes. * @@ -4035,6 +4056,232 @@ namespace GridTools MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); AssertThrowMPI(ierr); } +# endif // DEAL_II_WITH_MPI + } + + + + template + void + exchange_cell_data_to_level_ghosts( + const MeshType & mesh, + const std::function( + const typename MeshType::level_cell_iterator &)> &pack, + const std::function & unpack, + const std::function + &filter) + { +# ifndef DEAL_II_WITH_MPI + (void)mesh; + (void)pack; + (void)unpack; + (void)filter; + Assert(false, + ExcMessage( + "GridTools::exchange_cell_data_to_ghosts() requires MPI.")); +# else + constexpr int dim = MeshType::dimension; + constexpr int spacedim = MeshType::space_dimension; + auto tria = + dynamic_cast *>( + &mesh.get_triangulation()); + Assert( + tria != nullptr, + ExcMessage( + "The function exchange_cell_data_to_ghosts() only works with parallel triangulations.")); + + // build list of cells to request for each neighbor + std::set level_ghost_owners = + tria->level_ghost_owners(); + std::map> + neighbor_cell_list; + + for (const auto level_ghost_owner : level_ghost_owners) + neighbor_cell_list[level_ghost_owner] = {}; + + { + for (const auto &cell : mesh.cell_iterators()) + if (cell->level_subdomain_id() != + dealii::numbers::artificial_subdomain_id && + !cell->is_locally_owned_on_level()) + if (filter(cell)) + neighbor_cell_list[cell->level_subdomain_id()].emplace_back( + cell->id().template to_binary()); + } + + Assert(level_ghost_owners.size() == neighbor_cell_list.size(), + ExcInternalError()); + + + // Before sending & receiving, make sure we protect this section with + // a mutex: + static Utilities::MPI::CollectiveMutex mutex; + Utilities::MPI::CollectiveMutex::ScopedLock lock(mutex, + tria->get_communicator()); + + const int mpi_tag = Utilities::MPI::internal::Tags:: + exchange_cell_data_to_level_ghosts_request; + const int mpi_tag_reply = + Utilities::MPI::internal::Tags::exchange_cell_data_to_level_ghosts_reply; + + // send our requests: + std::vector requests(level_ghost_owners.size()); + { + unsigned int idx = 0; + for (const auto &it : neighbor_cell_list) + { + // send the data about the relevant cells + const int ierr = MPI_Isend(it.second.data(), + it.second.size() * sizeof(it.second[0]), + MPI_BYTE, + it.first, + mpi_tag, + tria->get_communicator(), + &requests[idx]); + AssertThrowMPI(ierr); + ++idx; + } + } + + using DestinationToBufferMap = + std::map>; + DestinationToBufferMap destination_to_data_buffer_map; + + // receive requests and reply with the ghost indices + std::vector> cell_data_to_send( + level_ghost_owners.size()); + std::vector> + send_dof_numbers_and_indices(level_ghost_owners.size()); + std::vector reply_requests(level_ghost_owners.size()); + std::vector> sendbuffers(level_ghost_owners.size()); + + for (unsigned int idx = 0; idx < level_ghost_owners.size(); ++idx) + { + MPI_Status status; + int ierr = + MPI_Probe(MPI_ANY_SOURCE, mpi_tag, tria->get_communicator(), &status); + AssertThrowMPI(ierr); + + int len; + ierr = MPI_Get_count(&status, MPI_BYTE, &len); + AssertThrowMPI(ierr); + Assert(len % sizeof(cell_data_to_send[idx][0]) == 0, + ExcInternalError()); + + const unsigned int n_cells = len / sizeof(typename CellId::binary_type); + cell_data_to_send[idx].resize(n_cells); + + ierr = MPI_Recv(cell_data_to_send[idx].data(), + len, + MPI_BYTE, + status.MPI_SOURCE, + status.MPI_TAG, + tria->get_communicator(), + &status); + AssertThrowMPI(ierr); + + // store data for each cell + for (unsigned int c = 0; c < static_cast(n_cells); ++c) + { + const auto cell = CellId(cell_data_to_send[idx][c]).to_cell(*tria); + + typename MeshType::level_cell_iterator mesh_it(tria, + cell->level(), + cell->index(), + &mesh); + const std_cxx17::optional data = pack(mesh_it); + + { + typename DestinationToBufferMap::iterator p = + destination_to_data_buffer_map + .insert(std::make_pair( + idx, GridTools::CellDataTransferBuffer())) + .first; + + p->second.cell_ids.emplace_back(cell->id()); + p->second.data.emplace_back(*data); + } + } + + // send reply + GridTools::CellDataTransferBuffer &data = + destination_to_data_buffer_map[idx]; + + sendbuffers[idx] = Utilities::pack(data, /*enable_compression*/ false); + ierr = MPI_Isend(sendbuffers[idx].data(), + sendbuffers[idx].size(), + MPI_BYTE, + status.MPI_SOURCE, + mpi_tag_reply, + tria->get_communicator(), + &reply_requests[idx]); + AssertThrowMPI(ierr); + } + + // finally receive the replies + std::vector receive; + for (unsigned int idx = 0; idx < level_ghost_owners.size(); ++idx) + { + MPI_Status status; + int ierr = MPI_Probe(MPI_ANY_SOURCE, + mpi_tag_reply, + tria->get_communicator(), + &status); + AssertThrowMPI(ierr); + + int len; + ierr = MPI_Get_count(&status, MPI_BYTE, &len); + AssertThrowMPI(ierr); + + receive.resize(len); + + char *ptr = receive.data(); + ierr = MPI_Recv(ptr, + len, + MPI_BYTE, + status.MPI_SOURCE, + status.MPI_TAG, + tria->get_communicator(), + &status); + AssertThrowMPI(ierr); + + auto cellinfo = + Utilities::unpack>( + receive, /*enable_compression*/ false); + + DataType *data = cellinfo.data.data(); + for (unsigned int c = 0; c < cellinfo.cell_ids.size(); ++c, ++data) + { + const typename Triangulation::cell_iterator + tria_cell = cellinfo.cell_ids[c].to_cell(*tria); + + const typename MeshType::level_cell_iterator cell( + tria, tria_cell->level(), tria_cell->index(), &mesh); + + unpack(cell, *data); + } + } + + // make sure that all communication is finished + // when we leave this function. + if (requests.size() > 0) + { + const int ierr = + MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } + if (reply_requests.size() > 0) + { + const int ierr = MPI_Waitall(reply_requests.size(), + reply_requests.data(), + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } + + # endif // DEAL_II_WITH_MPI } } // namespace GridTools diff --git a/source/dofs/dof_handler_policy.cc b/source/dofs/dof_handler_policy.cc index c73670e181..bfe0acc7ee 100644 --- a/source/dofs/dof_handler_policy.cc +++ b/source/dofs/dof_handler_policy.cc @@ -3570,151 +3570,6 @@ namespace internal namespace { - template - void - get_mg_dofindices_recursively( - const dealii::parallel::DistributedTriangulationBase - &tria, - const typename DoFHandler::level_cell_iterator - & dealii_cell, - const typename CellId::binary_type & quadrant, - std::vector &dof_numbers_and_indices) - { - if (dealii_cell->id() == CellId(quadrant)) - { - // why would somebody request a cell that is not ours? - Assert(dealii_cell->level_subdomain_id() == - tria.locally_owned_subdomain(), - ExcInternalError()); - - std::vector local_dof_indices( - dealii_cell->get_fe().dofs_per_cell); - dealii_cell->get_mg_dof_indices(local_dof_indices); - - dof_numbers_and_indices.push_back( - dealii_cell->get_fe().dofs_per_cell); - dof_numbers_and_indices.insert(dof_numbers_and_indices.end(), - local_dof_indices.begin(), - local_dof_indices.end()); - return; // we are done - } - - if (dealii_cell->is_active()) - return; - - if (!dealii_cell->id().is_ancestor_of(CellId(quadrant))) - return; - - for (unsigned int c = 0; c < GeometryInfo::max_children_per_cell; - ++c) - get_mg_dofindices_recursively( - tria, dealii_cell->child(c), quadrant, dof_numbers_and_indices); - } - - - - template - void - find_marked_mg_ghost_cells_recursively( - const typename dealii::parallel:: - DistributedTriangulationBase &tria, - const unsigned int tree_index, - const typename DoFHandler::level_cell_iterator - &dealii_cell, - std::map< - dealii::types::subdomain_id, - std::vector>> - &neighbor_cell_list) - { - // recurse... - if (dealii_cell->has_children()) - { - for (unsigned int c = 0; - c < GeometryInfo::max_children_per_cell; - ++c) - find_marked_mg_ghost_cells_recursively( - tria, tree_index, dealii_cell->child(c), neighbor_cell_list); - } - - if (dealii_cell->user_flag_set() && - dealii_cell->level_subdomain_id() != - tria.locally_owned_subdomain()) - { - neighbor_cell_list[dealii_cell->level_subdomain_id()] - .emplace_back(tree_index, - dealii_cell->id().template to_binary()); - } - } - - - - template - void - set_mg_dofindices_recursively( - const dealii::parallel::DistributedTriangulationBase - &tria, - const typename DoFHandler::level_cell_iterator - & dealii_cell, - const typename CellId::binary_type &quadrant, - dealii::types::global_dof_index * dofs) - { - if (dealii_cell->id() == CellId(quadrant)) - { - Assert(dealii_cell->level_subdomain_id() != - dealii::numbers::artificial_subdomain_id, - ExcInternalError()); - - // update dof indices of cell - std::vector dof_indices( - dealii_cell->get_fe().dofs_per_cell); - dealii_cell->get_mg_dof_indices(dof_indices); - - bool complete = true; - for (unsigned int i = 0; i < dof_indices.size(); ++i) - if (dofs[i] != numbers::invalid_dof_index) - { - Assert((dof_indices[i] == (numbers::invalid_dof_index)) || - (dof_indices[i] == dofs[i]), - ExcInternalError()); - dof_indices[i] = dofs[i]; - } - else - complete = false; - - if (!complete) - const_cast< - typename DoFHandler::level_cell_iterator &>( - dealii_cell) - ->set_user_flag(); - else - const_cast< - typename DoFHandler::level_cell_iterator &>( - dealii_cell) - ->clear_user_flag(); - - const_cast< - typename DoFHandler::level_cell_iterator &>( - dealii_cell) - ->set_mg_dof_indices(dof_indices); - return; - } - - if (dealii_cell->is_active()) - return; - - if (!dealii_cell->id().is_ancestor_of(CellId(quadrant))) - return; - - for (unsigned int c = 0; c < GeometryInfo::max_children_per_cell; - ++c) - set_mg_dofindices_recursively(tria, - dealii_cell->child(c), - quadrant, - dofs); - } - - - template void communicate_mg_ghost_cells( @@ -3722,208 +3577,60 @@ namespace internal DistributedTriangulationBase &tria, DoFHandlerType & dof_handler) { - using QuadrantBufferType = - std::vector>; - // build list of cells to request for each neighbor - std::set level_ghost_owners = - tria.level_ghost_owners(); - std::map - neighbor_cell_list; - for (const auto level_ghost_owner : level_ghost_owners) - neighbor_cell_list[level_ghost_owner] = {}; - - for (typename DoFHandlerType::level_cell_iterator cell = - dof_handler.begin(0); - cell != dof_handler.end(0); - ++cell) - { - types::coarse_cell_id coarse_cell_id = 0; - try - { - coarse_cell_id = cell->id().get_coarse_cell_id(); - } - catch (...) - { - // In the case of parallel::fullydistributed::Triangulation, - // a dummy cell throws an exception which is caught here. - // We ignore this cell here. - continue; - }; - - find_marked_mg_ghost_cells_recursively( - tria, coarse_cell_id, cell, neighbor_cell_list); - } - Assert(level_ghost_owners.size() == neighbor_cell_list.size(), - ExcInternalError()); - - - // Before sending & receiving, make sure we protect this section with - // a mutex: - static Utilities::MPI::CollectiveMutex mutex; - Utilities::MPI::CollectiveMutex::ScopedLock lock( - mutex, tria.get_communicator()); - - const int mpi_tag = Utilities::MPI::internal::Tags:: - dofhandler_communicate_mg_ghost_cells; - const int mpi_tag_reply = Utilities::MPI::internal::Tags:: - dofhandler_communicate_mg_ghost_cells_reply; + const auto pack = [&](const auto &cell) { + // why would somebody request a cell that is not ours? + Assert(cell->level_subdomain_id() == tria.locally_owned_subdomain(), + ExcInternalError()); - //* send our requests: - std::vector requests(level_ghost_owners.size()); - { - unsigned int idx = 0; - for (const auto &it : neighbor_cell_list) - { - // send the data about the relevant cells - const int ierr = - MPI_Isend(it.second.data(), - it.second.size() * sizeof(it.second[0]), - MPI_BYTE, - it.first, - mpi_tag, - tria.get_communicator(), - &requests[idx]); - AssertThrowMPI(ierr); - ++idx; - } - } + std::vector data( + cell->get_fe().dofs_per_cell); + cell->get_mg_dof_indices(data); - //* receive requests and reply with the ghost indices - std::vector quadrant_data_to_send( - level_ghost_owners.size()); - std::vector> - send_dof_numbers_and_indices(level_ghost_owners.size()); - std::vector reply_requests(level_ghost_owners.size()); + return data; + }; - for (unsigned int idx = 0; idx < level_ghost_owners.size(); ++idx) - { - MPI_Status status; - int ierr = MPI_Probe(MPI_ANY_SOURCE, - mpi_tag, - tria.get_communicator(), - &status); - AssertThrowMPI(ierr); + const auto unpack = [](const auto &cell, const auto &dofs) { + Assert(cell->get_fe().dofs_per_cell == dofs.size(), + ExcInternalError()); - int len; - ierr = MPI_Get_count(&status, MPI_BYTE, &len); - AssertThrowMPI(ierr); - Assert(len % sizeof(quadrant_data_to_send[idx][0]) == 0, - ExcInternalError()); + Assert(cell->level_subdomain_id() != + dealii::numbers::artificial_subdomain_id, + ExcInternalError()); - const unsigned int n_cells = - len / sizeof(quadrant_data_to_send[idx][0]); - quadrant_data_to_send[idx].resize(n_cells); - - ierr = MPI_Recv(quadrant_data_to_send[idx].data(), - len, - MPI_BYTE, - status.MPI_SOURCE, - status.MPI_TAG, - tria.get_communicator(), - &status); - AssertThrowMPI(ierr); + std::vector dof_indices( + cell->get_fe().dofs_per_cell); + cell->get_mg_dof_indices(dof_indices); - // store the dof indices for each cell - for (unsigned int c = 0; c < static_cast(n_cells); - ++c) + bool complete = true; + for (unsigned int i = 0; i < dof_indices.size(); ++i) + if (dofs[i] != numbers::invalid_dof_index) { - const auto temp = - CellId(quadrant_data_to_send[idx][c].first, 0, nullptr) - .to_cell(tria); - - typename DoFHandlerType::level_cell_iterator cell( - &dof_handler.get_triangulation(), - 0, - temp->index(), - &dof_handler); - - get_mg_dofindices_recursively( - tria, - cell, - quadrant_data_to_send[idx][c].second, - send_dof_numbers_and_indices[idx]); + Assert((dof_indices[i] == (numbers::invalid_dof_index)) || + (dof_indices[i] == dofs[i]), + ExcInternalError()); + dof_indices[i] = dofs[i]; } + else + complete = false; - // send reply - ierr = MPI_Isend(send_dof_numbers_and_indices[idx].data(), - send_dof_numbers_and_indices[idx].size(), - DEAL_II_DOF_INDEX_MPI_TYPE, - status.MPI_SOURCE, - mpi_tag_reply, - tria.get_communicator(), - &reply_requests[idx]); - AssertThrowMPI(ierr); - } - - //* finally receive the replies - for (unsigned int idx = 0; idx < level_ghost_owners.size(); ++idx) - { - MPI_Status status; - int ierr = MPI_Probe(MPI_ANY_SOURCE, - mpi_tag_reply, - tria.get_communicator(), - &status); - AssertThrowMPI(ierr); - int len; - ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len); - const QuadrantBufferType &quadrants = - neighbor_cell_list[status.MPI_SOURCE]; - AssertThrowMPI(ierr); - Assert((len > 0 && !quadrants.empty()) || - (len == 0 && quadrants.empty()), - ExcInternalError()); - std::vector - receive_dof_numbers_and_indices(len); - - ierr = MPI_Recv(receive_dof_numbers_and_indices.data(), - len, - DEAL_II_DOF_INDEX_MPI_TYPE, - status.MPI_SOURCE, - status.MPI_TAG, - tria.get_communicator(), - &status); - AssertThrowMPI(ierr); - - // set the dof indices for each cell - dealii::types::global_dof_index *dofs = - receive_dof_numbers_and_indices.data(); - for (const auto &it : quadrants) - { - const auto temp = CellId(it.first, 0, nullptr).to_cell(tria); - - typename DoFHandlerType::level_cell_iterator cell( - &tria, 0, temp->index(), &dof_handler); + if (!complete) + const_cast(cell) + ->set_user_flag(); + else + const_cast(cell) + ->clear_user_flag(); - Assert(cell->get_fe().dofs_per_cell == dofs[0], - ExcInternalError()); + const_cast(cell) + ->set_mg_dof_indices(dof_indices); + }; - set_mg_dofindices_recursively(tria, - cell, - it.second, - dofs + 1); - dofs += 1 + dofs[0]; - } - Assert(dofs == receive_dof_numbers_and_indices.data() + - receive_dof_numbers_and_indices.size(), - ExcInternalError()); - } + const auto filter = [](const auto &cell) { + return cell->user_flag_set(); + }; - // complete all sends, so that we can safely destroy the - // buffers. - if (requests.size() > 0) - { - const int ierr = MPI_Waitall(requests.size(), - requests.data(), - MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); - } - if (reply_requests.size() > 0) - { - const int ierr = MPI_Waitall(reply_requests.size(), - reply_requests.data(), - MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); - } + GridTools::exchange_cell_data_to_level_ghosts< + std::vector, + DoFHandlerType>(dof_handler, pack, unpack, filter); }