From: Peter Munch Date: Thu, 11 Jun 2020 17:33:31 +0000 (+0200) Subject: Merge implementations of exchange_cell_data_to_(level)_ghosts X-Git-Tag: v9.3.0-rc1~1420^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F10516%2Fhead;p=dealii.git Merge implementations of exchange_cell_data_to_(level)_ghosts --- diff --git a/include/deal.II/base/mpi_tags.h b/include/deal.II/base/mpi_tags.h index d4d401cacf..37b0ff45f0 100644 --- a/include/deal.II/base/mpi_tags.h +++ b/include/deal.II/base/mpi_tags.h @@ -62,11 +62,11 @@ namespace Utilities /// Triangulation::communicate_locally_moved_vertices() triangulation_communicate_locally_moved_vertices, - /// grid_tools.h: exchange_cell_data_to_level_ghosts() - exchange_cell_data_to_level_ghosts_request, + /// grid_tools.h: exchange_cell_ghosts() + exchange_cell_data_request, - /// grid_tools.h: exchange_cell_data_to_level_ghosts() - exchange_cell_data_to_level_ghosts_reply, + /// grid_tools.h: exchange_cell_ghosts() + exchange_cell_data_reply, /// mg_transfer_internal.cc: fill_copy_indices() mg_transfer_fill_copy_indices, diff --git a/include/deal.II/grid/grid_tools.h b/include/deal.II/grid/grid_tools.h index d3ea4ecffd..f650b814f3 100644 --- a/include/deal.II/grid/grid_tools.h +++ b/include/deal.II/grid/grid_tools.h @@ -2801,7 +2801,9 @@ namespace GridTools * on the sending side returned a non-empty std_cxx17::optional object. * The @p unpack function is then called with the data sent by the * processor that owns that cell. - * + * @param cell_filter Only cells are communicated where this filter function returns + * the value `true`. In the default case, the function returns true on all + * cells and thus, all relevant cells are communicated. * *

An example

* @@ -2852,7 +2854,10 @@ namespace GridTools const std::function( const typename MeshType::active_cell_iterator &)> &pack, const std::function & unpack); + const DataType &)> & unpack, + const std::function + &cell_filter = + [](const typename MeshType::active_cell_iterator &) { return true; }); /** * Exchange arbitrary data of type @p DataType provided by the function @@ -2860,7 +2865,7 @@ namespace GridTools * processes. * * In addition to the parameters of exchange_cell_data_to_ghosts(), this - * function allows to provide a @p filter function, which can be used to only + * function allows to provide a @p cell_filter function, which can be used to only * communicate marked cells. In the default case, all relevant cells are * communicated. */ @@ -2873,7 +2878,7 @@ namespace GridTools const std::function & unpack, const std::function - &filter = + &cell_filter = [](const typename MeshType::level_cell_iterator &) { return true; }); /* Exchange with all processors of the MPI communicator @p mpi_communicator the vector of bounding @@ -3883,182 +3888,282 @@ namespace GridTools } - - template - void - exchange_cell_data_to_ghosts( - const MeshType & mesh, - const std::function( - const typename MeshType::active_cell_iterator &)> &pack, - const std::function & unpack) + namespace internal { + template + void + exchange_cell_data( + const MeshType &mesh, + const std::function< + std_cxx17::optional(const MeshCellIteratorType &)> &pack, + const std::function + & unpack, + const std::function & cell_filter, + const std::function &)> &process_cells, + const std::function( + const parallel::TriangulationBase &)> + &compute_ghost_owners) + { # ifndef DEAL_II_WITH_MPI - (void)mesh; - (void)pack; - (void)unpack; - Assert(false, - ExcMessage( - "GridTools::exchange_cell_data_to_ghosts() requires MPI.")); + (void)mesh; + (void)pack; + (void)unpack; + (void)cell_filter; + (void)process_cells; + (void)compute_ghost_owners; + Assert(false, + ExcMessage("GridTools::exchange_cell_data() requires MPI.")); # else - constexpr int dim = MeshType::dimension; - constexpr int spacedim = MeshType::space_dimension; - auto tria = - dynamic_cast *>( - &mesh.get_triangulation()); - Assert( - tria != nullptr, - ExcMessage( - "The function exchange_cell_data_to_ghosts() only works with parallel triangulations.")); - - // map neighbor_id -> data_buffer where we accumulate the data to send - using DestinationToBufferMap = + constexpr int dim = MeshType::dimension; + constexpr int spacedim = MeshType::space_dimension; + auto tria = + dynamic_cast *>( + &mesh.get_triangulation()); + Assert( + tria != nullptr, + ExcMessage( + "The function exchange_cell_data_to_ghosts() only works with parallel triangulations.")); + + // build list of cells to request for each neighbor + std::set ghost_owners = + compute_ghost_owners(*tria); std::map>; - DestinationToBufferMap destination_to_data_buffer_map; + std::vector> + neighbor_cell_list; - std::map> - vertices_with_ghost_neighbors = - GridTools::compute_vertices_with_ghost_neighbors(*tria); + for (const auto ghost_owner : ghost_owners) + neighbor_cell_list[ghost_owner] = {}; - for (const auto &cell : tria->active_cell_iterators()) - if (cell->is_locally_owned()) - { - std::set send_to; - for (const unsigned int v : GeometryInfo::vertex_indices()) - { - const std::map>:: - const_iterator neighbor_subdomains_of_vertex = - vertices_with_ghost_neighbors.find(cell->vertex_index(v)); + process_cells([&](const auto &cell, const auto key) { + if (cell_filter(cell)) + neighbor_cell_list[key].emplace_back( + cell->id().template to_binary()); + }); - if (neighbor_subdomains_of_vertex == - vertices_with_ghost_neighbors.end()) - continue; + Assert(ghost_owners.size() == neighbor_cell_list.size(), + ExcInternalError()); - Assert(neighbor_subdomains_of_vertex->second.size() != 0, - ExcInternalError()); - send_to.insert(neighbor_subdomains_of_vertex->second.begin(), - neighbor_subdomains_of_vertex->second.end()); - } + // Before sending & receiving, make sure we protect this section with + // a mutex: + static Utilities::MPI::CollectiveMutex mutex; + Utilities::MPI::CollectiveMutex::ScopedLock lock( + mutex, tria->get_communicator()); + + const int mpi_tag = + Utilities::MPI::internal::Tags::exchange_cell_data_request; + const int mpi_tag_reply = + Utilities::MPI::internal::Tags::exchange_cell_data_reply; + + // send our requests: + std::vector requests(ghost_owners.size()); + { + unsigned int idx = 0; + for (const auto &it : neighbor_cell_list) + { + // send the data about the relevant cells + const int ierr = MPI_Isend(it.second.data(), + it.second.size() * sizeof(it.second[0]), + MPI_BYTE, + it.first, + mpi_tag, + tria->get_communicator(), + &requests[idx]); + AssertThrowMPI(ierr); + ++idx; + } + } - if (send_to.size() > 0) + using DestinationToBufferMap = + std::map>; + DestinationToBufferMap destination_to_data_buffer_map; + + // receive requests and reply with the ghost indices + std::vector> cell_data_to_send( + ghost_owners.size()); + std::vector> + send_dof_numbers_and_indices(ghost_owners.size()); + std::vector reply_requests(ghost_owners.size()); + std::vector> sendbuffers(ghost_owners.size()); + + for (unsigned int idx = 0; idx < ghost_owners.size(); ++idx) + { + MPI_Status status; + int ierr = MPI_Probe(MPI_ANY_SOURCE, + mpi_tag, + tria->get_communicator(), + &status); + AssertThrowMPI(ierr); + + int len; + ierr = MPI_Get_count(&status, MPI_BYTE, &len); + AssertThrowMPI(ierr); + Assert(len % sizeof(cell_data_to_send[idx][0]) == 0, + ExcInternalError()); + + const unsigned int n_cells = + len / sizeof(typename CellId::binary_type); + cell_data_to_send[idx].resize(n_cells); + + ierr = MPI_Recv(cell_data_to_send[idx].data(), + len, + MPI_BYTE, + status.MPI_SOURCE, + status.MPI_TAG, + tria->get_communicator(), + &status); + AssertThrowMPI(ierr); + + // store data for each cell + for (unsigned int c = 0; c < static_cast(n_cells); ++c) { - // this cell's data needs to be sent to someone - typename MeshType::active_cell_iterator mesh_it(tria, - cell->level(), - cell->index(), - &mesh); + const auto cell = + CellId(cell_data_to_send[idx][c]).to_cell(*tria); + MeshCellIteratorType mesh_it(tria, + cell->level(), + cell->index(), + &mesh); const std_cxx17::optional data = pack(mesh_it); if (data) { - const CellId cellid = cell->id(); - - for (const auto subdomain : send_to) - { - // find the data buffer for proc "subdomain" if it exists - // or create an empty one otherwise - typename DestinationToBufferMap::iterator p = - destination_to_data_buffer_map - .insert(std::make_pair( - subdomain, CellDataTransferBuffer())) - .first; - - p->second.cell_ids.emplace_back(cellid); - p->second.data.emplace_back(*data); - } + typename DestinationToBufferMap::iterator p = + destination_to_data_buffer_map + .insert(std::make_pair( + idx, + GridTools::CellDataTransferBuffer())) + .first; + + p->second.cell_ids.emplace_back(cell->id()); + p->second.data.emplace_back(*data); } } + + // send reply + GridTools::CellDataTransferBuffer &data = + destination_to_data_buffer_map[idx]; + + sendbuffers[idx] = + Utilities::pack(data, /*enable_compression*/ false); + ierr = MPI_Isend(sendbuffers[idx].data(), + sendbuffers[idx].size(), + MPI_BYTE, + status.MPI_SOURCE, + mpi_tag_reply, + tria->get_communicator(), + &reply_requests[idx]); + AssertThrowMPI(ierr); } - // Protect the following communication: - static Utilities::MPI::CollectiveMutex mutex; - Utilities::MPI::CollectiveMutex::ScopedLock lock(mutex, - tria->get_communicator()); + // finally receive the replies + std::vector receive; + for (unsigned int idx = 0; idx < ghost_owners.size(); ++idx) + { + MPI_Status status; + int ierr = MPI_Probe(MPI_ANY_SOURCE, + mpi_tag_reply, + tria->get_communicator(), + &status); + AssertThrowMPI(ierr); - const int mpi_tag = - Utilities::MPI::internal::Tags::exchange_cell_data_to_ghosts; + int len; + ierr = MPI_Get_count(&status, MPI_BYTE, &len); + AssertThrowMPI(ierr); - // 2. send our messages - std::set ghost_owners = tria->ghost_owners(); - const unsigned int n_ghost_owners = ghost_owners.size(); - std::vector> sendbuffers(n_ghost_owners); - std::vector requests(n_ghost_owners); + receive.resize(len); - unsigned int idx = 0; - for (auto it = ghost_owners.begin(); it != ghost_owners.end(); ++it, ++idx) - { - CellDataTransferBuffer &data = - destination_to_data_buffer_map[*it]; - - // pack all the data into the buffer for this recipient and send it. - // keep data around till we can make sure that the packet has been - // received - sendbuffers[idx] = Utilities::pack(data, /*enable_compression*/ false); - const int ierr = MPI_Isend(sendbuffers[idx].data(), - sendbuffers[idx].size(), - MPI_BYTE, - *it, - mpi_tag, - tria->get_communicator(), - &requests[idx]); - AssertThrowMPI(ierr); - } + char *ptr = receive.data(); + ierr = MPI_Recv(ptr, + len, + MPI_BYTE, + status.MPI_SOURCE, + status.MPI_TAG, + tria->get_communicator(), + &status); + AssertThrowMPI(ierr); - // 3. receive messages - std::vector receive; - for (unsigned int idx = 0; idx < n_ghost_owners; ++idx) - { - MPI_Status status; - int ierr = - MPI_Probe(MPI_ANY_SOURCE, mpi_tag, tria->get_communicator(), &status); - AssertThrowMPI(ierr); - - int len; - ierr = MPI_Get_count(&status, MPI_BYTE, &len); - AssertThrowMPI(ierr); - - receive.resize(len); - - char *ptr = receive.data(); - ierr = MPI_Recv(ptr, - len, - MPI_BYTE, - status.MPI_SOURCE, - status.MPI_TAG, - tria->get_communicator(), - &status); - AssertThrowMPI(ierr); - - auto cellinfo = - Utilities::unpack>( - receive, /*enable_compression*/ false); - - DataType *data = cellinfo.data.data(); - for (unsigned int c = 0; c < cellinfo.cell_ids.size(); ++c, ++data) - { - const typename Triangulation::cell_iterator - tria_cell = cellinfo.cell_ids[c].to_cell(*tria); + auto cellinfo = + Utilities::unpack>( + receive, /*enable_compression*/ false); - const typename MeshType::active_cell_iterator cell( - tria, tria_cell->level(), tria_cell->index(), &mesh); + DataType *data = cellinfo.data.data(); + for (unsigned int c = 0; c < cellinfo.cell_ids.size(); ++c, ++data) + { + const typename Triangulation::cell_iterator + tria_cell = cellinfo.cell_ids[c].to_cell(*tria); + + MeshCellIteratorType cell(tria, + tria_cell->level(), + tria_cell->index(), + &mesh); + + unpack(cell, *data); + } + } + + // make sure that all communication is finished + // when we leave this function. + if (requests.size() > 0) + { + const int ierr = + MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } + if (reply_requests.size() > 0) + { + const int ierr = MPI_Waitall(reply_requests.size(), + reply_requests.data(), + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } - unpack(cell, *data); - } - } - // make sure that all communication is finished - // when we leave this function. - if (requests.size()) - { - const int ierr = - MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); - } # endif // DEAL_II_WITH_MPI + } + + } // namespace internal + + template + void + exchange_cell_data_to_ghosts( + const MeshType & mesh, + const std::function( + const typename MeshType::active_cell_iterator &)> &pack, + const std::function & unpack, + const std::function + &cell_filter) + { +# ifndef DEAL_II_WITH_MPI + (void)mesh; + (void)pack; + (void)unpack; + (void)cell_filter; + Assert(false, + ExcMessage( + "GridTools::exchange_cell_data_to_ghosts() requires MPI.")); +# else + internal::exchange_cell_data( + mesh, + pack, + unpack, + cell_filter, + [&](const auto &process) { + for (const auto &cell : mesh.active_cell_iterators()) + if (cell->is_ghost()) + process(cell, cell->subdomain_id()); + }, + [](const auto &tria) { return tria.ghost_owners(); }); +# endif } @@ -4072,219 +4177,33 @@ namespace GridTools const std::function & unpack, const std::function - &filter) + &cell_filter) { # ifndef DEAL_II_WITH_MPI (void)mesh; (void)pack; (void)unpack; - (void)filter; + (void)cell_filter; Assert(false, ExcMessage( - "GridTools::exchange_cell_data_to_ghosts() requires MPI.")); + "GridTools::exchange_cell_data_to_level_ghosts() requires MPI.")); # else - constexpr int dim = MeshType::dimension; - constexpr int spacedim = MeshType::space_dimension; - auto tria = - dynamic_cast *>( - &mesh.get_triangulation()); - Assert( - tria != nullptr, - ExcMessage( - "The function exchange_cell_data_to_ghosts() only works with parallel triangulations.")); - - // build list of cells to request for each neighbor - std::set level_ghost_owners = - tria->level_ghost_owners(); - std::map> - neighbor_cell_list; - - for (const auto level_ghost_owner : level_ghost_owners) - neighbor_cell_list[level_ghost_owner] = {}; - - { - for (const auto &cell : mesh.cell_iterators()) - if (cell->level_subdomain_id() != - dealii::numbers::artificial_subdomain_id && - !cell->is_locally_owned_on_level()) - if (filter(cell)) - neighbor_cell_list[cell->level_subdomain_id()].emplace_back( - cell->id().template to_binary()); - } - - Assert(level_ghost_owners.size() == neighbor_cell_list.size(), - ExcInternalError()); - - - // Before sending & receiving, make sure we protect this section with - // a mutex: - static Utilities::MPI::CollectiveMutex mutex; - Utilities::MPI::CollectiveMutex::ScopedLock lock(mutex, - tria->get_communicator()); - - const int mpi_tag = Utilities::MPI::internal::Tags:: - exchange_cell_data_to_level_ghosts_request; - const int mpi_tag_reply = - Utilities::MPI::internal::Tags::exchange_cell_data_to_level_ghosts_reply; - - // send our requests: - std::vector requests(level_ghost_owners.size()); - { - unsigned int idx = 0; - for (const auto &it : neighbor_cell_list) - { - // send the data about the relevant cells - const int ierr = MPI_Isend(it.second.data(), - it.second.size() * sizeof(it.second[0]), - MPI_BYTE, - it.first, - mpi_tag, - tria->get_communicator(), - &requests[idx]); - AssertThrowMPI(ierr); - ++idx; - } - } - - using DestinationToBufferMap = - std::map>; - DestinationToBufferMap destination_to_data_buffer_map; - - // receive requests and reply with the ghost indices - std::vector> cell_data_to_send( - level_ghost_owners.size()); - std::vector> - send_dof_numbers_and_indices(level_ghost_owners.size()); - std::vector reply_requests(level_ghost_owners.size()); - std::vector> sendbuffers(level_ghost_owners.size()); - - for (unsigned int idx = 0; idx < level_ghost_owners.size(); ++idx) - { - MPI_Status status; - int ierr = - MPI_Probe(MPI_ANY_SOURCE, mpi_tag, tria->get_communicator(), &status); - AssertThrowMPI(ierr); - - int len; - ierr = MPI_Get_count(&status, MPI_BYTE, &len); - AssertThrowMPI(ierr); - Assert(len % sizeof(cell_data_to_send[idx][0]) == 0, - ExcInternalError()); - - const unsigned int n_cells = len / sizeof(typename CellId::binary_type); - cell_data_to_send[idx].resize(n_cells); - - ierr = MPI_Recv(cell_data_to_send[idx].data(), - len, - MPI_BYTE, - status.MPI_SOURCE, - status.MPI_TAG, - tria->get_communicator(), - &status); - AssertThrowMPI(ierr); - - // store data for each cell - for (unsigned int c = 0; c < static_cast(n_cells); ++c) - { - const auto cell = CellId(cell_data_to_send[idx][c]).to_cell(*tria); - - typename MeshType::level_cell_iterator mesh_it(tria, - cell->level(), - cell->index(), - &mesh); - const std_cxx17::optional data = pack(mesh_it); - - { - typename DestinationToBufferMap::iterator p = - destination_to_data_buffer_map - .insert(std::make_pair( - idx, GridTools::CellDataTransferBuffer())) - .first; - - p->second.cell_ids.emplace_back(cell->id()); - p->second.data.emplace_back(*data); - } - } - - // send reply - GridTools::CellDataTransferBuffer &data = - destination_to_data_buffer_map[idx]; - - sendbuffers[idx] = Utilities::pack(data, /*enable_compression*/ false); - ierr = MPI_Isend(sendbuffers[idx].data(), - sendbuffers[idx].size(), - MPI_BYTE, - status.MPI_SOURCE, - mpi_tag_reply, - tria->get_communicator(), - &reply_requests[idx]); - AssertThrowMPI(ierr); - } - - // finally receive the replies - std::vector receive; - for (unsigned int idx = 0; idx < level_ghost_owners.size(); ++idx) - { - MPI_Status status; - int ierr = MPI_Probe(MPI_ANY_SOURCE, - mpi_tag_reply, - tria->get_communicator(), - &status); - AssertThrowMPI(ierr); - - int len; - ierr = MPI_Get_count(&status, MPI_BYTE, &len); - AssertThrowMPI(ierr); - - receive.resize(len); - - char *ptr = receive.data(); - ierr = MPI_Recv(ptr, - len, - MPI_BYTE, - status.MPI_SOURCE, - status.MPI_TAG, - tria->get_communicator(), - &status); - AssertThrowMPI(ierr); - - auto cellinfo = - Utilities::unpack>( - receive, /*enable_compression*/ false); - - DataType *data = cellinfo.data.data(); - for (unsigned int c = 0; c < cellinfo.cell_ids.size(); ++c, ++data) - { - const typename Triangulation::cell_iterator - tria_cell = cellinfo.cell_ids[c].to_cell(*tria); - - const typename MeshType::level_cell_iterator cell( - tria, tria_cell->level(), tria_cell->index(), &mesh); - - unpack(cell, *data); - } - } - - // make sure that all communication is finished - // when we leave this function. - if (requests.size() > 0) - { - const int ierr = - MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); - } - if (reply_requests.size() > 0) - { - const int ierr = MPI_Waitall(reply_requests.size(), - reply_requests.data(), - MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); - } - - -# endif // DEAL_II_WITH_MPI + internal::exchange_cell_data( + mesh, + pack, + unpack, + cell_filter, + [&](const auto &process) { + for (const auto &cell : mesh.cell_iterators()) + if (cell->level_subdomain_id() != + dealii::numbers::artificial_subdomain_id && + !cell->is_locally_owned_on_level()) + process(cell, cell->level_subdomain_id()); + }, + [](const auto &tria) { return tria.level_ghost_owners(); }); +# endif } } // namespace GridTools diff --git a/source/dofs/dof_handler_policy.cc b/source/dofs/dof_handler_policy.cc index fb7fb4ff66..290e0fed1f 100644 --- a/source/dofs/dof_handler_policy.cc +++ b/source/dofs/dof_handler_policy.cc @@ -3679,11 +3679,10 @@ namespace internal template void communicate_dof_indices_on_marked_cells( - const DoFHandlerType &dof_handler, - const std::map> &) + const DoFHandlerType &dof_handler) { # ifndef DEAL_II_WITH_MPI - (void)vertices_with_ghost_neighbors; + (void)dof_handler; Assert(false, ExcNotImplemented()); # else const unsigned int dim = DoFHandlerType::dimension; @@ -3692,124 +3691,57 @@ namespace internal // define functions that pack data on cells that are ghost cells // somewhere else, and unpack data on cells where we get information // from elsewhere - auto pack = - [](const typename DoFHandlerType::active_cell_iterator &cell) - -> std_cxx17::optional> { + const auto pack = [](const auto &cell) { Assert(cell->is_locally_owned(), ExcInternalError()); - // first see whether we need to do anything at all on this cell. - // this is determined by whether the user_flag is set on the - // cell that indicates that the *complete* set of DoF indices - // has not been sent - if (cell->user_flag_set()) - { - // get dof indices for the current cell - std::vector local_dof_indices( - cell->get_fe().dofs_per_cell); - cell->get_dof_indices(local_dof_indices); - - // now see if there are dof indices that were previously - // unknown. this can only happen in phase 1, and in - // that case we know that the user flag must have been set - // - // in any case, if the cell *is* complete, we do not - // need to send the data any more in the next phase. indicate - // this by removing the user flag - if (std::find(local_dof_indices.begin(), - local_dof_indices.end(), - numbers::invalid_dof_index) != - local_dof_indices.end()) - { - Assert(cell->user_flag_set(), ExcInternalError()); - } - else - cell->clear_user_flag(); - - return local_dof_indices; - } - else - { - // the fact that the user flag wasn't set means that there is - // nothing we need to send that hasn't been sent so far. - // so return an empty array, but also verify that indeed - // the cell is complete -# ifdef DEBUG - std::vector local_dof_indices( - cell->get_fe().dofs_per_cell); - cell->get_dof_indices(local_dof_indices); + std::vector data( + cell->get_fe().dofs_per_cell); + cell->get_dof_indices(data); - const bool is_complete = - (std::find(local_dof_indices.begin(), - local_dof_indices.end(), - numbers::invalid_dof_index) == - local_dof_indices.end()); - Assert(is_complete, ExcInternalError()); -# endif - return std_cxx17::optional< - std::vector>(); - } + return data; }; - auto unpack = - [](const typename DoFHandlerType::active_cell_iterator &cell, - const std::vector &received_dof_indices) - -> void { - // this function should only be called on ghost cells, and - // on top of that, only on cells that have not been - // completed -- which we indicate via the user flag. - // check both + const auto unpack = [](const auto &cell, const auto &dofs) { + Assert(cell->get_fe().dofs_per_cell == dofs.size(), + ExcInternalError()); + Assert(cell->is_ghost(), ExcInternalError()); - Assert(cell->user_flag_set(), ExcInternalError()); - // if we just got an incomplete array of DoF indices, then we must - // be in the first ghost exchange and the user flag must have been - // set. we tested that already above. - // - // if we did get a complete array, then we may be in the first - // or second ghost exchange, but in any case we need not exchange - // another time. so delete the user flag - const bool is_complete = (std::find(received_dof_indices.begin(), - received_dof_indices.end(), - numbers::invalid_dof_index) == - received_dof_indices.end()); - if (is_complete) - cell->clear_user_flag(); - - // in any case, set the DoF indices on this cell. some - // of the ones we received may still be invalid because - // the sending processor did not know them yet, so we - // need to merge the ones we get with those that are - // already set here and may have already been known. for - // those that we already know *and* get, they must obviously - // agree - // - // before getting the local dof indices, we need to update the - // cell dof indices cache because we may have set dof indices - // on a neighboring ghost cell before this one, which may have - // affected the dof indices we know about the current cell - std::vector local_dof_indices( + std::vector dof_indices( cell->get_fe().dofs_per_cell); cell->update_cell_dof_indices_cache(); - cell->get_dof_indices(local_dof_indices); + cell->get_dof_indices(dof_indices); - for (unsigned int i = 0; i < local_dof_indices.size(); ++i) - if (local_dof_indices[i] == numbers::invalid_dof_index) - local_dof_indices[i] = received_dof_indices[i]; + bool complete = true; + for (unsigned int i = 0; i < dof_indices.size(); ++i) + if (dofs[i] != numbers::invalid_dof_index) + { + Assert((dof_indices[i] == (numbers::invalid_dof_index)) || + (dof_indices[i] == dofs[i]), + ExcInternalError()); + dof_indices[i] = dofs[i]; + } else - // we already know the dof index. check that there - // is no conflict - Assert((received_dof_indices[i] == - numbers::invalid_dof_index) || - (received_dof_indices[i] == local_dof_indices[i]), - ExcInternalError()); + complete = false; + + if (!complete) + const_cast(cell) + ->set_user_flag(); + else + const_cast(cell) + ->clear_user_flag(); const_cast(cell) - ->set_dof_indices(local_dof_indices); + ->set_dof_indices(dof_indices); + }; + + const auto filter = [](const auto &cell) { + return cell->user_flag_set(); }; GridTools::exchange_cell_data_to_ghosts< std::vector, - DoFHandlerType>(dof_handler, pack, unpack); + DoFHandlerType>(dof_handler, pack, unpack, filter); // finally update the cell DoF indices caches to make sure // our internal data structures are consistent @@ -3999,39 +3931,20 @@ namespace internal triangulation->save_user_flags(user_flags); triangulation->clear_user_flags(); - // figure out which cells are ghost cells on which we have - // to exchange DoF indices - const std::map> - vertices_with_ghost_neighbors = - GridTools::compute_vertices_with_ghost_neighbors(*triangulation); - // mark all cells that either have to send data (locally // owned cells that are adjacent to ghost neighbors in some // way) or receive data (all ghost cells) via the user flags for (const auto &cell : dof_handler->active_cell_iterators()) - if (cell->is_locally_owned()) - { - for (const unsigned int v : GeometryInfo::vertex_indices()) - if (vertices_with_ghost_neighbors.find(cell->vertex_index( - v)) != vertices_with_ghost_neighbors.end()) - { - cell->set_user_flag(); - break; - } - } - else if (cell->is_ghost()) + if (cell->is_ghost()) cell->set_user_flag(); - - // Send and receive cells. After this, only the local cells // are marked, that received new data. This has to be // communicated in a second communication step. // // as explained in the 'distributed' paper, this has to be // done twice - communicate_dof_indices_on_marked_cells( - *dof_handler, vertices_with_ghost_neighbors); + communicate_dof_indices_on_marked_cells(*dof_handler); // in case of hp::DoFHandlers, we may have received valid // indices of degrees of freedom that are dominated by a fe @@ -4045,8 +3958,7 @@ namespace internal // DoF indices set. however, some ghost cells // may still have invalid ones. thus, exchange // one more time. - communicate_dof_indices_on_marked_cells( - *dof_handler, vertices_with_ghost_neighbors); + communicate_dof_indices_on_marked_cells(*dof_handler); // at this point, we must have taken care of the data transfer // on all cells we had previously marked. verify this @@ -4547,17 +4459,9 @@ namespace internal // mark all own cells for transfer for (const auto &cell : dof_handler->active_cell_iterators()) - if (!cell->is_artificial()) + if (cell->is_ghost()) cell->set_user_flag(); - // figure out which cells are ghost cells on which we have - // to exchange DoF indices - const std::map> - vertices_with_ghost_neighbors = - GridTools::compute_vertices_with_ghost_neighbors( - *triangulation); - // Send and receive cells. After this, only the local cells // are marked, that received new data. This has to be @@ -4565,8 +4469,7 @@ namespace internal // // as explained in the 'distributed' paper, this has to be // done twice - communicate_dof_indices_on_marked_cells( - *dof_handler, vertices_with_ghost_neighbors); + communicate_dof_indices_on_marked_cells(*dof_handler); // in case of hp::DoFHandlers, we may have received valid // indices of degrees of freedom that are dominated by a fe @@ -4576,8 +4479,7 @@ namespace internal Implementation::merge_invalid_dof_indices_on_ghost_interfaces( *dof_handler); - communicate_dof_indices_on_marked_cells( - *dof_handler, vertices_with_ghost_neighbors); + communicate_dof_indices_on_marked_cells(*dof_handler); triangulation->load_user_flags(user_flags); } diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc index 8696acc458..22fe413e1d 100644 --- a/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc @@ -40,20 +40,48 @@ test() GridGenerator::hyper_cube(tria); tria.refine_global(2); - std::set output; + std::set input, output; + + + typedef double DT; + std::map map; + DT counter = 0.0; + + std::map> + vertices_with_ghost_neighbors = + GridTools::compute_vertices_with_ghost_neighbors(tria); + + for (const auto &cell : tria.active_cell_iterators()) + if (cell->is_locally_owned()) + { + for (const unsigned int v : GeometryInfo::vertex_indices()) + { + const std::map>:: + const_iterator neighbor_subdomains_of_vertex = + vertices_with_ghost_neighbors.find(cell->vertex_index(v)); + + if (neighbor_subdomains_of_vertex != + vertices_with_ghost_neighbors.end()) + { + map[cell->id()] = ++counter; + break; + } + } + } typedef typename parallel::distributed::Triangulation::active_cell_iterator - cell_iterator; - typedef double DT; - DT counter = 0.0; + cell_iterator; GridTools:: exchange_cell_data_to_ghosts>( tria, [&](const cell_iterator &cell) { - DT value = ++counter; + DT value = map[cell->id()]; + std::ostringstream oss; + oss << "pack " << cell->id() << " " << value; + input.insert(oss.str()); - deallog << "pack " << cell->id() << " " << value << std::endl; return value; }, [&](const cell_iterator &cell, const DT &data) { @@ -65,6 +93,8 @@ test() }); // sort the output because it will come in in random order + for (auto &it : input) + deallog << it << std::endl; for (auto &it : output) deallog << it << std::endl; } diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc index a7ae3eae06..c0e61d6555 100644 --- a/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc @@ -49,17 +49,44 @@ test() DoFHandler dofhandler(tria); dofhandler.distribute_dofs(fe); - std::set output; + std::set input, output; typedef typename DoFHandler::active_cell_iterator cell_iterator; typedef short DT; + std::map map; short counter = 0; + + std::map> + vertices_with_ghost_neighbors = + GridTools::compute_vertices_with_ghost_neighbors(tria); + + for (const auto &cell : tria.active_cell_iterators()) + if (cell->is_locally_owned()) + { + for (const unsigned int v : GeometryInfo::vertex_indices()) + { + const std::map>:: + const_iterator neighbor_subdomains_of_vertex = + vertices_with_ghost_neighbors.find(cell->vertex_index(v)); + + if (neighbor_subdomains_of_vertex != + vertices_with_ghost_neighbors.end()) + { + map[cell->id()] = ++counter; + break; + } + } + } + GridTools::exchange_cell_data_to_ghosts>( dofhandler, [&](const cell_iterator &cell) { - DT value = ++counter; + DT value = map[cell->id()]; + std::ostringstream oss; + oss << "pack " << cell->id() << " " << value; + input.insert(oss.str()); - deallog << "pack " << cell->id() << " " << value << std::endl; return value; }, [&](const cell_iterator &cell, const DT &data) { @@ -71,6 +98,8 @@ test() }); // sort the output because it will come in in random order + for (auto &it : input) + deallog << it << std::endl; for (auto &it : output) deallog << it << std::endl; } diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_03.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_03.cc index 35ff6c1195..f801858bee 100644 --- a/tests/distributed_grids/grid_tools_exchange_cell_data_03.cc +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_03.cc @@ -46,19 +46,46 @@ test() GridGenerator::hyper_cube(tria); tria.refine_global(2); - std::set output; + std::set input, output; typedef typename parallel::shared::Triangulation::active_cell_iterator - cell_iterator; - typedef double DT; - DT counter = 0.0; + cell_iterator; + typedef double DT; + std::map map; + DT counter = 0.0; + + std::map> + vertices_with_ghost_neighbors = + GridTools::compute_vertices_with_ghost_neighbors(tria); + + for (const auto &cell : tria.active_cell_iterators()) + if (cell->is_locally_owned()) + { + for (const unsigned int v : GeometryInfo::vertex_indices()) + { + const std::map>:: + const_iterator neighbor_subdomains_of_vertex = + vertices_with_ghost_neighbors.find(cell->vertex_index(v)); + + if (neighbor_subdomains_of_vertex != + vertices_with_ghost_neighbors.end()) + { + map[cell->id()] = ++counter; + break; + } + } + } + GridTools::exchange_cell_data_to_ghosts>( tria, [&](const cell_iterator &cell) { - DT value = ++counter; + DT value = map[cell->id()]; + std::ostringstream oss; + oss << "pack " << cell->id() << " " << value; + input.insert(oss.str()); - deallog << "pack " << cell->id() << " " << value << std::endl; return value; }, [&](const cell_iterator &cell, const DT &data) { @@ -70,6 +97,8 @@ test() }); // sort the output because it will come in in random order + for (auto &it : input) + deallog << it << std::endl; for (auto &it : output) deallog << it << std::endl; } diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_04.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_04.cc index 1375538165..7cab39e2ae 100644 --- a/tests/distributed_grids/grid_tools_exchange_cell_data_04.cc +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_04.cc @@ -44,28 +44,57 @@ test() GridGenerator::hyper_cube(tria); tria.refine_global(2); - std::set output; + std::map input; + std::set output; typedef typename parallel::distributed::Triangulation::active_cell_iterator - cell_iterator; - typedef double DT; - int counter = 0; + cell_iterator; + typedef double DT; + std::map map; + int counter = 0; + + std::map> + vertices_with_ghost_neighbors = + GridTools::compute_vertices_with_ghost_neighbors(tria); + + for (const auto &cell : tria.active_cell_iterators()) + if (cell->is_locally_owned()) + { + for (const unsigned int v : GeometryInfo::vertex_indices()) + { + const std::map>:: + const_iterator neighbor_subdomains_of_vertex = + vertices_with_ghost_neighbors.find(cell->vertex_index(v)); + + if (neighbor_subdomains_of_vertex != + vertices_with_ghost_neighbors.end()) + { + map[cell->id()] = ++counter; + break; + } + } + } + GridTools:: exchange_cell_data_to_ghosts>( tria, [&](const cell_iterator &cell) -> std_cxx17::optional
{ - ++counter; + const auto counter = map[cell->id()]; + std::ostringstream oss; if (counter % 2 == 0) { DT value = counter; - deallog << "pack " << cell->id() << " " << value << std::endl; + oss << "pack " << cell->id() << " " << value; + input[cell->id()] = oss.str(); return value; } else { - deallog << "skipping " << cell->id() << ' ' << counter << std::endl; + oss << "skipping " << cell->id() << ' ' << counter; + input[cell->id()] = oss.str(); return std_cxx17::optional
(); } }, @@ -78,6 +107,8 @@ test() }); // sort the output because it will come in in random order + for (auto &it : input) + deallog << it.second << std::endl; for (auto &it : output) deallog << it << std::endl; }