From f19bb7514059accc03f659ebcc05178a86db589e Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Sun, 12 Nov 2017 21:35:40 +0100 Subject: [PATCH] Remove 'parallel' prefix in distributed/grid_tools --- include/deal.II/distributed/grid_tools.h | 727 +++++++++--------- source/distributed/grid_tools.cc | 131 ++-- source/distributed/grid_tools.inst.in | 2 - source/dofs/dof_handler_policy.cc | 2 +- source/hp/dof_handler.cc | 2 +- .../grid_tools_exchange_bounding_boxes_1.cc | 6 +- ...exchange_bounding_boxes_1.mpirun=12.output | 24 +- ..._exchange_bounding_boxes_1.mpirun=4.output | 8 +- .../grid_tools_exchange_cell_data_01.cc | 2 +- .../grid_tools_exchange_cell_data_02.cc | 2 +- .../grid_tools_exchange_cell_data_03.cc | 2 +- .../grid_tools_exchange_cell_data_04.cc | 2 +- 12 files changed, 446 insertions(+), 464 deletions(-) diff --git a/include/deal.II/distributed/grid_tools.h b/include/deal.II/distributed/grid_tools.h index 47606000f7..31e15b78d6 100644 --- a/include/deal.II/distributed/grid_tools.h +++ b/include/deal.II/distributed/grid_tools.h @@ -42,438 +42,425 @@ DEAL_II_ENABLE_EXTRA_DIAGNOSTICS DEAL_II_NAMESPACE_OPEN -namespace parallel +namespace GridTools { + /** + * Exchange arbitrary data of type @p DataType provided by the function + * objects from locally owned cells to ghost cells on other processors. + * + * After this call, you typically will have received data from @p unpack on + * every ghost cell as it was given by @p pack on the owning processor. + * Whether you do or do not receive information to @p unpack on a given + * ghost cell depends on whether the @p pack function decided that + * something needs to be sent. It does so using the boost::optional + * mechanism: if the boost::optional return object of the @p pack + * function is empty, then this implies that no data has to be sent for + * the locally owned cell it was called on. In that case, @p unpack will + * also not be called on the ghost cell that corresponds to it on the + * receiving side. On the other hand, if the boost::optional object is + * not empty, then the data stored within it will be sent to the received + * and the @p unpack function called with it. + * + * @tparam DataType The type of the data to be communicated. It is assumed + * to be serializable by boost::serialization. In many cases, this + * data type can not be deduced by the compiler, e.g., if you provide + * lambda functions for the second and third argument + * to this function. In this case, you have to explicitly specify + * the @p DataType as a template argument to the function call. + * @tparam MeshType The type of @p mesh. + * + * @param mesh A variable of a type that satisfies the requirements of the + * @ref ConceptMeshType "MeshType concept". + * @param pack The function that will be called on each locally owned cell + * that is a ghost cell somewhere else. As mentioned above, the function + * may return a regular data object of type @p DataType to indicate + * that data should be sent, or an empty + * boost::optional@ to indicate that nothing has + * to be sent for this cell. + * @param unpack The function that will be called for each ghost cell + * for which data was sent, i.e., for which the @p pack function + * on the sending side returned a non-empty boost::optional object. + * The @p unpack function is then called with the data sent by the + * processor that owns that cell. + * + * + *

An example

+ * + * Here is an example that shows how this function is to be used + * in a concrete context. It is taken from the code that makes + * sure that the @p active_fe_index (a single unsigned integer) is + * transported from locally owned cells where one can set it in + * hp::DoFHandler objects, to the corresponding ghost cells on + * other processors to ensure that one can query the right value + * also on those processors: + * @code + * auto pack + * = [] (const typename dealii::hp::DoFHandler::active_cell_iterator &cell) -> unsigned int + * { + * return cell->active_fe_index(); + * }; + * + * auto unpack + * = [] (const typename dealii::hp::DoFHandler::active_cell_iterator &cell, + * const unsigned int &active_fe_index) -> void + * { + * cell->set_active_fe_index(active_fe_index); + * }; + * + * GridTools::exchange_cell_data_to_ghosts> + * (dof_handler, pack, unpack); + * @endcode + * + * You will notice that the @p pack lambda function returns an `unsigned int`, + * not a `boost::optional`. The former converts automatically + * to the latter, implying that data will always be transported to the + * other processor. + * + * (In reality, the @p unpack function needs to be a bit more + * complicated because it is not allowed to call + * DoFAccessor::set_active_fe_index() on ghost cells. Rather, the + * @p unpack function directly accesses internal data structures. But + * you get the idea -- the code could, just as well, have exchanged + * material ids, user indices, boundary indictors, or any kind of other + * data with similar calls as the ones above.) + */ + template + void + exchange_cell_data_to_ghosts (const MeshType &mesh, + const std::function (const typename MeshType::active_cell_iterator &)> &pack, + const std::function &unpack); + + /* Exchange with all processors of the MPI communicator @p mpi_communicator the vector of bounding + * boxes @p local_bboxes. + * + * This function is meant to exchange bounding boxes describing the locally owned + * cells in a distributed triangulation obtained with the function + * GridTools::compute_mesh_predicate_bounding_box . + * + * The output vector's size is the number of processes of the MPI communicator: + * its i-th entry contains the vector @p local_bboxes of the i-th process. + */ + template + std::vector< std::vector< BoundingBox > > + exchange_local_bounding_boxes(const std::vector< BoundingBox > &local_bboxes, + MPI_Comm mpi_communicator); /** - * This namespace defines parallel algorithms that operate on meshes. + * A structure that allows the transfer of cell data of type @p T from one processor + * to another. It corresponds to a packed buffer that stores a vector of + * CellId and a vector of type @p T. + * + * This class facilitates the transfer by providing the save/load functions + * that are able to pack up the vector of CellId's and the associated + * data of type @p T into a stream. + * + * Type @p T is assumed to be serializable by boost::serialization (for + * example unsigned int or std::vector@). */ - namespace GridTools + template + struct CellDataTransferBuffer { /** - * Exchange arbitrary data of type @p DataType provided by the function - * objects from locally owned cells to ghost cells on other processors. - * - * After this call, you typically will have received data from @p unpack on - * every ghost cell as it was given by @p pack on the owning processor. - * Whether you do or do not receive information to @p unpack on a given - * ghost cell depends on whether the @p pack function decided that - * something needs to be sent. It does so using the boost::optional - * mechanism: if the boost::optional return object of the @p pack - * function is empty, then this implies that no data has to be sent for - * the locally owned cell it was called on. In that case, @p unpack will - * also not be called on the ghost cell that corresponds to it on the - * receiving side. On the other hand, if the boost::optional object is - * not empty, then the data stored within it will be sent to the received - * and the @p unpack function called with it. - * - * @tparam DataType The type of the data to be communicated. It is assumed - * to be serializable by boost::serialization. In many cases, this - * data type can not be deduced by the compiler, e.g., if you provide - * lambda functions for the second and third argument - * to this function. In this case, you have to explicitly specify - * the @p DataType as a template argument to the function call. - * @tparam MeshType The type of @p mesh. - * - * @param mesh A variable of a type that satisfies the requirements of the - * @ref ConceptMeshType "MeshType concept". - * @param pack The function that will be called on each locally owned cell - * that is a ghost cell somewhere else. As mentioned above, the function - * may return a regular data object of type @p DataType to indicate - * that data should be sent, or an empty - * boost::optional@ to indicate that nothing has - * to be sent for this cell. - * @param unpack The function that will be called for each ghost cell - * for which data was sent, i.e., for which the @p pack function - * on the sending side returned a non-empty boost::optional object. - * The @p unpack function is then called with the data sent by the - * processor that owns that cell. - * - * - *

An example

- * - * Here is an example that shows how this function is to be used - * in a concrete context. It is taken from the code that makes - * sure that the @p active_fe_index (a single unsigned integer) is - * transported from locally owned cells where one can set it in - * hp::DoFHandler objects, to the corresponding ghost cells on - * other processors to ensure that one can query the right value - * also on those processors: - * @code - * auto pack - * = [] (const typename dealii::hp::DoFHandler::active_cell_iterator &cell) -> unsigned int - * { - * return cell->active_fe_index(); - * }; - * - * auto unpack - * = [] (const typename dealii::hp::DoFHandler::active_cell_iterator &cell, - * const unsigned int &active_fe_index) -> void - * { - * cell->set_active_fe_index(active_fe_index); - * }; - * - * parallel::GridTools::exchange_cell_data_to_ghosts> - * (dof_handler, pack, unpack); - * @endcode - * - * You will notice that the @p pack lambda function returns an `unsigned int`, - * not a `boost::optional`. The former converts automatically - * to the latter, implying that data will always be transported to the - * other processor. - * - * (In reality, the @p unpack function needs to be a bit more - * complicated because it is not allowed to call - * DoFAccessor::set_active_fe_index() on ghost cells. Rather, the - * @p unpack function directly accesses internal data structures. But - * you get the idea -- the code could, just as well, have exchanged - * material ids, user indices, boundary indictors, or any kind of other - * data with similar calls as the ones above.) + * A vector to store IDs of cells to be transfered. */ - template - void - exchange_cell_data_to_ghosts (const MeshType &mesh, - const std::function (const typename MeshType::active_cell_iterator &)> &pack, - const std::function &unpack); + std::vector cell_ids; /** - * Exchange with all processors of the MPI communicator @p mpi_communicator the vector of bounding - * boxes @p local_bboxes. - * - * This function is meant to exchange bounding boxes describing the locally owned - * cells in a distributed triangulation obtained with the function - * GridTools::compute_mesh_predicate_bounding_box . - * - * The output vector's size is the number of processes of the MPI communicator: - * its i-th entry contains the vector @p local_bboxes of the i-th process. + * A vector of cell data to be transfered. */ - template - std::vector< std::vector< BoundingBox > > - exchange_local_bounding_boxes(const std::vector< BoundingBox > &local_bboxes, - MPI_Comm mpi_communicator); + std::vector data; /** - * A structure that allows the transfer of cell data of type @p T from one processor - * to another. It corresponds to a packed buffer that stores a vector of - * CellId and a vector of type @p T. + * Write the data of this object to a stream for the purpose of + * serialization. * - * This class facilitates the transfer by providing the save/load functions - * that are able to pack up the vector of CellId's and the associated - * data of type @p T into a stream. - * - * Type @p T is assumed to be serializable by boost::serialization (for - * example unsigned int or std::vector@). + * @pre The user is responsible to keep the size of @p data + * equal to the size as @p cell_ids . */ - template - struct CellDataTransferBuffer - { - /** - * A vector to store IDs of cells to be transfered. - */ - std::vector cell_ids; - - /** - * A vector of cell data to be transfered. - */ - std::vector data; - - /** - * Write the data of this object to a stream for the purpose of - * serialization. - * - * @pre The user is responsible to keep the size of @p data - * equal to the size as @p cell_ids . - */ - template - void save (Archive &ar, - const unsigned int version) const; - - /** - * Read the data of this object from a stream for the purpose of - * serialization. Throw away the previous content. - */ - template - void load (Archive &ar, - const unsigned int version); - - BOOST_SERIALIZATION_SPLIT_MEMBER() - - /** - * Pack the data that corresponds to this object into a buffer in - * the form of a vector of chars and return it. - */ - std::vector pack_data () const; - - /** - * Given a buffer in the form of an array of chars, unpack it and - * restore the current object to the state that it was when - * it was packed into said buffer by the pack_data() function. - */ - void unpack_data (const std::vector &buffer); - - }; + template + void save (Archive &ar, + const unsigned int version) const; - } + /** + * Read the data of this object from a stream for the purpose of + * serialization. Throw away the previous content. + */ + template + void load (Archive &ar, + const unsigned int version); + BOOST_SERIALIZATION_SPLIT_MEMBER() + + /** + * Pack the data that corresponds to this object into a buffer in + * the form of a vector of chars and return it. + */ + std::vector pack_data () const; + + /** + * Given a buffer in the form of an array of chars, unpack it and + * restore the current object to the state that it was when + * it was packed into said buffer by the pack_data() function. + */ + void unpack_data (const std::vector &buffer); + + }; } #ifndef DOXYGEN -namespace parallel +namespace GridTools { - namespace GridTools - { - template - template - void - CellDataTransferBuffer::save (Archive &ar, - const unsigned int /*version*/) const - { - Assert(cell_ids.size() == data.size(), - ExcDimensionMismatch(cell_ids.size(), data.size())); - // archive the cellids in an efficient binary format - const size_t n_cells = cell_ids.size(); - ar &n_cells; - for (auto &it : cell_ids) - { - CellId::binary_type binary_cell_id = it.template to_binary(); - ar &binary_cell_id; - } + template + template + void + CellDataTransferBuffer::save (Archive &ar, + const unsigned int /*version*/) const + { + Assert(cell_ids.size() == data.size(), + ExcDimensionMismatch(cell_ids.size(), data.size())); + // archive the cellids in an efficient binary format + const size_t n_cells = cell_ids.size(); + ar &n_cells; + for (auto &it : cell_ids) + { + CellId::binary_type binary_cell_id = it.template to_binary(); + ar &binary_cell_id; + } - ar &data; - } + ar &data; + } - template - template - void - CellDataTransferBuffer::load (Archive &ar, - const unsigned int /*version*/) - { - size_t n_cells; - ar &n_cells; - cell_ids.clear(); - cell_ids.reserve(n_cells); - for (unsigned int c=0; c + template + void + CellDataTransferBuffer::load (Archive &ar, + const unsigned int /*version*/) + { + size_t n_cells; + ar &n_cells; + cell_ids.clear(); + cell_ids.reserve(n_cells); + for (unsigned int c=0; c - std::vector - CellDataTransferBuffer::pack_data () const + template + std::vector + CellDataTransferBuffer::pack_data () const + { + // set up a buffer and then use it as the target of a compressing + // stream into which we serialize the current object + std::vector buffer; { - // set up a buffer and then use it as the target of a compressing - // stream into which we serialize the current object - std::vector buffer; - { #ifdef DEAL_II_WITH_ZLIB - boost::iostreams::filtering_ostream out; - out.push(boost::iostreams::gzip_compressor - (boost::iostreams::gzip_params - (boost::iostreams::gzip::best_compression))); - out.push(boost::iostreams::back_inserter(buffer)); - - boost::archive::binary_oarchive archive(out); - archive << *this; - out.flush(); + boost::iostreams::filtering_ostream out; + out.push(boost::iostreams::gzip_compressor + (boost::iostreams::gzip_params + (boost::iostreams::gzip::best_compression))); + out.push(boost::iostreams::back_inserter(buffer)); + + boost::archive::binary_oarchive archive(out); + archive << *this; + out.flush(); #else - std::ostringstream out; - boost::archive::binary_oarchive archive(out); - archive << *this; - const std::string &s = out.str(); - buffer.reserve(s.size()); - buffer.assign(s.begin(), s.end()); + std::ostringstream out; + boost::archive::binary_oarchive archive(out); + archive << *this; + const std::string &s = out.str(); + buffer.reserve(s.size()); + buffer.assign(s.begin(), s.end()); #endif - } - - return buffer; } + return buffer; + } + - template - void - CellDataTransferBuffer::unpack_data (const std::vector &buffer) - { - std::string decompressed_buffer; + template + void + CellDataTransferBuffer::unpack_data (const std::vector &buffer) + { + std::string decompressed_buffer; - // first decompress the buffer - { + // first decompress the buffer + { #ifdef DEAL_II_WITH_ZLIB - boost::iostreams::filtering_ostream decompressing_stream; - decompressing_stream.push(boost::iostreams::gzip_decompressor()); - decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer)); - decompressing_stream.write (buffer.data(), buffer.size()); + boost::iostreams::filtering_ostream decompressing_stream; + decompressing_stream.push(boost::iostreams::gzip_decompressor()); + decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer)); + decompressing_stream.write (buffer.data(), buffer.size()); #else - decompressed_buffer.assign (buffer.begin(), buffer.end()); + decompressed_buffer.assign (buffer.begin(), buffer.end()); #endif - } + } - // then restore the object from the buffer - std::istringstream in(decompressed_buffer); - boost::archive::binary_iarchive archive(in); + // then restore the object from the buffer + std::istringstream in(decompressed_buffer); + boost::archive::binary_iarchive archive(in); - archive >> *this; - } + archive >> *this; + } - template - void - exchange_cell_data_to_ghosts (const MeshType &mesh, - const std::function (const typename MeshType::active_cell_iterator &)> &pack, - const std::function &unpack) - { + template + void + exchange_cell_data_to_ghosts (const MeshType &mesh, + const std::function (const typename MeshType::active_cell_iterator &)> &pack, + const std::function &unpack) + { #ifndef DEAL_II_WITH_MPI - (void)mesh; - (void)pack; - (void)unpack; - Assert(false, ExcMessage("parallel::GridTools::exchange_cell_data_to_ghosts() requires MPI.")); + (void)mesh; + (void)pack; + (void)unpack; + Assert(false, ExcMessage("GridTools::exchange_cell_data_to_ghosts() requires MPI.")); #else - constexpr int dim = MeshType::dimension; - constexpr int spacedim = MeshType::space_dimension; - auto tria = - static_cast*>(&mesh.get_triangulation()); - Assert (tria != nullptr, - ExcMessage("The function exchange_cell_data_to_ghosts() only works with parallel triangulations.")); - - // map neighbor_id -> data_buffer where we accumulate the data to send - typedef std::map > - DestinationToBufferMap; - DestinationToBufferMap destination_to_data_buffer_map; - - std::map > - vertices_with_ghost_neighbors = tria->compute_vertices_with_ghost_neighbors(); - - for (auto cell : tria->active_cell_iterators()) - if (cell->is_locally_owned()) - { - std::set send_to; - for (unsigned int v=0; v::vertices_per_cell; ++v) - { - const std::map >::const_iterator - neighbor_subdomains_of_vertex - = vertices_with_ghost_neighbors.find (cell->vertex_index(v)); - - if (neighbor_subdomains_of_vertex == - vertices_with_ghost_neighbors.end()) - continue; - - Assert(neighbor_subdomains_of_vertex->second.size()!=0, - ExcInternalError()); - - send_to.insert(neighbor_subdomains_of_vertex->second.begin(), - neighbor_subdomains_of_vertex->second.end()); - } - - if (send_to.size() > 0) - { - // this cell's data needs to be sent to someone - typename MeshType::active_cell_iterator - mesh_it (tria, cell->level(), cell->index(), &mesh); - - const boost::optional data = pack(mesh_it); - - if (data) - { - const CellId cellid = cell->id(); - - for (auto it : send_to) - { - const dealii::types::subdomain_id subdomain = it; - - // find the data buffer for proc "subdomain" if it exists - // or create an empty one otherwise - typename DestinationToBufferMap::iterator p - = destination_to_data_buffer_map.insert (std::make_pair(subdomain, - CellDataTransferBuffer())) - .first; - - p->second.cell_ids.emplace_back(cellid); - p->second.data.emplace_back(data.get()); - } - } - } - } + constexpr int dim = MeshType::dimension; + constexpr int spacedim = MeshType::space_dimension; + auto tria = + static_cast*>(&mesh.get_triangulation()); + Assert (tria != nullptr, + ExcMessage("The function exchange_cell_data_to_ghosts() only works with parallel triangulations.")); + + // map neighbor_id -> data_buffer where we accumulate the data to send + typedef std::map > + DestinationToBufferMap; + DestinationToBufferMap destination_to_data_buffer_map; + + std::map > + vertices_with_ghost_neighbors = tria->compute_vertices_with_ghost_neighbors(); + + for (auto cell : tria->active_cell_iterators()) + if (cell->is_locally_owned()) + { + std::set send_to; + for (unsigned int v=0; v::vertices_per_cell; ++v) + { + const std::map >::const_iterator + neighbor_subdomains_of_vertex + = vertices_with_ghost_neighbors.find (cell->vertex_index(v)); + if (neighbor_subdomains_of_vertex == + vertices_with_ghost_neighbors.end()) + continue; - // 2. send our messages - std::set ghost_owners = tria->ghost_owners(); - const unsigned int n_ghost_owners = ghost_owners.size(); - std::vector > sendbuffers (n_ghost_owners); - std::vector requests (n_ghost_owners); + Assert(neighbor_subdomains_of_vertex->second.size()!=0, + ExcInternalError()); - unsigned int idx=0; - for (auto it = ghost_owners.begin(); - it!=ghost_owners.end(); - ++it, ++idx) - { - CellDataTransferBuffer &data = destination_to_data_buffer_map[*it]; - - // pack all the data into the buffer for this recipient and send it. - // keep data around till we can make sure that the packet has been - // received - sendbuffers[idx] = data.pack_data (); - const int ierr = MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(), - MPI_BYTE, *it, - 786, tria->get_communicator(), &requests[idx]); - AssertThrowMPI(ierr); + send_to.insert(neighbor_subdomains_of_vertex->second.begin(), + neighbor_subdomains_of_vertex->second.end()); + } + + if (send_to.size() > 0) + { + // this cell's data needs to be sent to someone + typename MeshType::active_cell_iterator + mesh_it (tria, cell->level(), cell->index(), &mesh); + + const boost::optional data = pack(mesh_it); + + if (data) + { + const CellId cellid = cell->id(); + + for (auto it : send_to) + { + const dealii::types::subdomain_id subdomain = it; + + // find the data buffer for proc "subdomain" if it exists + // or create an empty one otherwise + typename DestinationToBufferMap::iterator p + = destination_to_data_buffer_map.insert (std::make_pair(subdomain, + CellDataTransferBuffer())) + .first; + + p->second.cell_ids.emplace_back(cellid); + p->second.data.emplace_back(data.get()); + } + } + } } - // 3. receive messages - std::vector receive; - for (unsigned int idx=0; idxget_communicator(), &status); - AssertThrowMPI(ierr); - ierr = MPI_Get_count(&status, MPI_BYTE, &len); - AssertThrowMPI(ierr); - receive.resize(len); + // 2. send our messages + std::set ghost_owners = tria->ghost_owners(); + const unsigned int n_ghost_owners = ghost_owners.size(); + std::vector > sendbuffers (n_ghost_owners); + std::vector requests (n_ghost_owners); - char *ptr = receive.data(); - ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, - tria->get_communicator(), &status); - AssertThrowMPI(ierr); + unsigned int idx=0; + for (auto it = ghost_owners.begin(); + it!=ghost_owners.end(); + ++it, ++idx) + { + CellDataTransferBuffer &data = destination_to_data_buffer_map[*it]; + + // pack all the data into the buffer for this recipient and send it. + // keep data around till we can make sure that the packet has been + // received + sendbuffers[idx] = data.pack_data (); + const int ierr = MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(), + MPI_BYTE, *it, + 786, tria->get_communicator(), &requests[idx]); + AssertThrowMPI(ierr); + } - CellDataTransferBuffer cellinfo; - cellinfo.unpack_data(receive); + // 3. receive messages + std::vector receive; + for (unsigned int idx=0; idxget_communicator(), &status); + AssertThrowMPI(ierr); + ierr = MPI_Get_count(&status, MPI_BYTE, &len); + AssertThrowMPI(ierr); - DataType *data = cellinfo.data.data(); - for (unsigned int c=0; c::cell_iterator - tria_cell = cellinfo.cell_ids[c].to_cell(*tria); + receive.resize(len); - const typename MeshType::active_cell_iterator - cell (tria, tria_cell->level(), tria_cell->index(), &mesh); + char *ptr = receive.data(); + ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, + tria->get_communicator(), &status); + AssertThrowMPI(ierr); - unpack(cell, *data); - } - } + CellDataTransferBuffer cellinfo; + cellinfo.unpack_data(receive); - // make sure that all communication is finished - // when we leave this function. - if (requests.size()) - { - const int ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); - } -#endif // DEAL_II_WITH_MPI - } + DataType *data = cellinfo.data.data(); + for (unsigned int c=0; c::cell_iterator + tria_cell = cellinfo.cell_ids[c].to_cell(*tria); + const typename MeshType::active_cell_iterator + cell (tria, tria_cell->level(), tria_cell->index(), &mesh); + + unpack(cell, *data); + } + } + + // make sure that all communication is finished + // when we leave this function. + if (requests.size()) + { + const int ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } +#endif // DEAL_II_WITH_MPI } + } #endif // DOXYGEN diff --git a/source/distributed/grid_tools.cc b/source/distributed/grid_tools.cc index 0ab6fdbd21..89f092db52 100644 --- a/source/distributed/grid_tools.cc +++ b/source/distributed/grid_tools.cc @@ -22,85 +22,82 @@ DEAL_II_NAMESPACE_OPEN #ifdef DEAL_II_WITH_MPI -namespace parallel +namespace GridTools { - namespace GridTools + template + std::vector< std::vector< BoundingBox > > + exchange_local_bounding_boxes(const std::vector< BoundingBox > &local_bboxes, + MPI_Comm mpi_communicator) { - template - std::vector< std::vector< BoundingBox > > - exchange_local_bounding_boxes(const std::vector< BoundingBox > &local_bboxes, - MPI_Comm mpi_communicator) - { #ifndef DEAL_II_WITH_MPI - (void)local_bboxes; - (void)mpi_communicator; - Assert(false, ExcMessage("parallel::GridTools::exchange_local_bounding_boxes() requires MPI.")); + (void)local_bboxes; + (void)mpi_communicator; + Assert(false, ExcMessage("parallel::GridTools::exchange_local_bounding_boxes() requires MPI.")); #else - // Step 1: preparing data to be sent - unsigned int n_bboxes = local_bboxes.size(); - // Dimension of the array to be exchanged (number of double) - int n_local_data = 2*spacedim*n_bboxes; - // data array stores each entry of each point describing the bounding boxes - std::vector loc_data_array(n_local_data); - for (unsigned int i=0; i loc_data_array(n_local_data); + for (unsigned int i=0; i size_all_data(n_procs); + // Vector to store the size of loc_data_array for every process + std::vector size_all_data(n_procs); - // Exchanging the number of bboxes - MPI_Allgather(&n_local_data, 1, MPI_INT, - &(size_all_data[0]), 1, MPI_INT, - mpi_communicator); + // Exchanging the number of bboxes + MPI_Allgather(&n_local_data, 1, MPI_INT, + &(size_all_data[0]), 1, MPI_INT, + mpi_communicator); - // Now computing the the displacement, relative to recvbuf, - // at which to store the incoming data - std::vector rdispls(n_procs); - rdispls[0] = 0; - for (unsigned int i=1; i < n_procs; ++i) - rdispls[i] = rdispls[i-1] + size_all_data[i-1]; + // Now computing the the displacement, relative to recvbuf, + // at which to store the incoming data + std::vector rdispls(n_procs); + rdispls[0] = 0; + for (unsigned int i=1; i < n_procs; ++i) + rdispls[i] = rdispls[i-1] + size_all_data[i-1]; - // Step 3: exchange the data and bounding boxes: - // Allocating a vector to contain all the received data - std::vector data_array(rdispls.back() + size_all_data.back()); + // Step 3: exchange the data and bounding boxes: + // Allocating a vector to contain all the received data + std::vector data_array(rdispls.back() + size_all_data.back()); - MPI_Allgatherv(&(loc_data_array[0]), n_local_data, MPI_DOUBLE, - &(data_array[0]), &(size_all_data[0]), - &(rdispls[0]), MPI_DOUBLE, mpi_communicator); + MPI_Allgatherv(&(loc_data_array[0]), n_local_data, MPI_DOUBLE, + &(data_array[0]), &(size_all_data[0]), + &(rdispls[0]), MPI_DOUBLE, mpi_communicator); - // Step 4: create the array of bboxes for output - std::vector< std::vector< BoundingBox > > global_bboxes(n_procs); - unsigned int begin_idx = 0; - for (unsigned int i=0; i < n_procs; ++i) - { - // Number of local bounding boxes - unsigned int n_bbox_i = size_all_data[i]/(spacedim*2); - global_bboxes[i].resize(n_bbox_i); - for (unsigned int bbox=0; bbox p1,p2; // boundary points for bbox - for (unsigned int d=0; d loc_bbox(std::make_pair(p1,p2)); - global_bboxes[i][bbox] = loc_bbox; - } - // Shifting the first index to the start of the next vector - begin_idx += size_all_data[i]; - } - return global_bboxes; + // Step 4: create the array of bboxes for output + std::vector< std::vector< BoundingBox > > global_bboxes(n_procs); + unsigned int begin_idx = 0; + for (unsigned int i=0; i < n_procs; ++i) + { + // Number of local bounding boxes + unsigned int n_bbox_i = size_all_data[i]/(spacedim*2); + global_bboxes[i].resize(n_bbox_i); + for (unsigned int bbox=0; bbox p1,p2; // boundary points for bbox + for (unsigned int d=0; d loc_bbox(std::make_pair(p1,p2)); + global_bboxes[i][bbox] = loc_bbox; + } + // Shifting the first index to the start of the next vector + begin_idx += size_all_data[i]; + } + return global_bboxes; #endif // DEAL_II_WITH_MPI - } } } diff --git a/source/distributed/grid_tools.inst.in b/source/distributed/grid_tools.inst.in index 505e37cd5f..9e245a2678 100644 --- a/source/distributed/grid_tools.inst.in +++ b/source/distributed/grid_tools.inst.in @@ -15,12 +15,10 @@ for (deal_II_space_dimension : SPACE_DIMENSIONS) { - namespace parallel \{ namespace GridTools \{ template std::vector< std::vector< BoundingBox > > exchange_local_bounding_boxes(const std::vector< BoundingBox >&, MPI_Comm); \} - \} } diff --git a/source/dofs/dof_handler_policy.cc b/source/dofs/dof_handler_policy.cc index 1db96a4fab..11d9f2f37b 100644 --- a/source/dofs/dof_handler_policy.cc +++ b/source/dofs/dof_handler_policy.cc @@ -3615,7 +3615,7 @@ namespace internal const_cast(cell)->set_dof_indices(local_dof_indices); }; - parallel::GridTools::exchange_cell_data_to_ghosts, DoFHandlerType> + GridTools::exchange_cell_data_to_ghosts, DoFHandlerType> (dof_handler, pack, unpack); // finally update the cell DoF indices caches to make sure diff --git a/source/hp/dof_handler.cc b/source/hp/dof_handler.cc index 7a25290dfe..fdcf1ae777 100644 --- a/source/hp/dof_handler.cc +++ b/source/hp/dof_handler.cc @@ -938,7 +938,7 @@ namespace internal active_fe_index); }; - parallel::GridTools::exchange_cell_data_to_ghosts> + GridTools::exchange_cell_data_to_ghosts> (dof_handler, pack, unpack); } else diff --git a/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.cc b/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.cc index 141be4c297..b7f233911b 100644 --- a/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.cc +++ b/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.cc @@ -14,7 +14,7 @@ // --------------------------------------------------------------------- -// test parallel::GridTools::exchange_local_bounding_boxes +// test GridTools::exchange_local_bounding_boxes #include "../tests.h" #include @@ -52,7 +52,7 @@ void test_exchange_bbox() } std::vector< std::vector< BoundingBox > > global_boxes - = parallel::GridTools::exchange_local_bounding_boxes(loc_bboxes,mpi_communicator); + = GridTools::exchange_local_bounding_boxes(loc_bboxes,mpi_communicator); bool passed = true; @@ -121,7 +121,7 @@ int main (int argc, char *argv[]) Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1); MPILogInitAll log; - deallog << "Test: parallel::GridTools::exchange_local_bounding_boxes " << std::endl; + deallog << "Test: GridTools::exchange_local_bounding_boxes " << std::endl; test_exchange_bbox<1>(); test_exchange_bbox<2>(); diff --git a/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.mpirun=12.output b/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.mpirun=12.output index 2278be30b2..78f8812425 100644 --- a/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.mpirun=12.output +++ b/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.mpirun=12.output @@ -1,5 +1,5 @@ -DEAL:0::Test: parallel::GridTools::exchange_local_bounding_boxes +DEAL:0::Test: GridTools::exchange_local_bounding_boxes DEAL:0::Test for: dimension 1 DEAL:0::12 mpi processes DEAL:0::Test passed @@ -10,7 +10,7 @@ DEAL:0::Test for: dimension 3 DEAL:0::12 mpi processes DEAL:0::Test passed -DEAL:1::Test: parallel::GridTools::exchange_local_bounding_boxes +DEAL:1::Test: GridTools::exchange_local_bounding_boxes DEAL:1::Test for: dimension 1 DEAL:1::12 mpi processes DEAL:1::Test passed @@ -22,7 +22,7 @@ DEAL:1::12 mpi processes DEAL:1::Test passed -DEAL:2::Test: parallel::GridTools::exchange_local_bounding_boxes +DEAL:2::Test: GridTools::exchange_local_bounding_boxes DEAL:2::Test for: dimension 1 DEAL:2::12 mpi processes DEAL:2::Test passed @@ -34,7 +34,7 @@ DEAL:2::12 mpi processes DEAL:2::Test passed -DEAL:3::Test: parallel::GridTools::exchange_local_bounding_boxes +DEAL:3::Test: GridTools::exchange_local_bounding_boxes DEAL:3::Test for: dimension 1 DEAL:3::12 mpi processes DEAL:3::Test passed @@ -46,7 +46,7 @@ DEAL:3::12 mpi processes DEAL:3::Test passed -DEAL:4::Test: parallel::GridTools::exchange_local_bounding_boxes +DEAL:4::Test: GridTools::exchange_local_bounding_boxes DEAL:4::Test for: dimension 1 DEAL:4::12 mpi processes DEAL:4::Test passed @@ -58,7 +58,7 @@ DEAL:4::12 mpi processes DEAL:4::Test passed -DEAL:5::Test: parallel::GridTools::exchange_local_bounding_boxes +DEAL:5::Test: GridTools::exchange_local_bounding_boxes DEAL:5::Test for: dimension 1 DEAL:5::12 mpi processes DEAL:5::Test passed @@ -70,7 +70,7 @@ DEAL:5::12 mpi processes DEAL:5::Test passed -DEAL:6::Test: parallel::GridTools::exchange_local_bounding_boxes +DEAL:6::Test: GridTools::exchange_local_bounding_boxes DEAL:6::Test for: dimension 1 DEAL:6::12 mpi processes DEAL:6::Test passed @@ -82,7 +82,7 @@ DEAL:6::12 mpi processes DEAL:6::Test passed -DEAL:7::Test: parallel::GridTools::exchange_local_bounding_boxes +DEAL:7::Test: GridTools::exchange_local_bounding_boxes DEAL:7::Test for: dimension 1 DEAL:7::12 mpi processes DEAL:7::Test passed @@ -94,7 +94,7 @@ DEAL:7::12 mpi processes DEAL:7::Test passed -DEAL:8::Test: parallel::GridTools::exchange_local_bounding_boxes +DEAL:8::Test: GridTools::exchange_local_bounding_boxes DEAL:8::Test for: dimension 1 DEAL:8::12 mpi processes DEAL:8::Test passed @@ -106,7 +106,7 @@ DEAL:8::12 mpi processes DEAL:8::Test passed -DEAL:9::Test: parallel::GridTools::exchange_local_bounding_boxes +DEAL:9::Test: GridTools::exchange_local_bounding_boxes DEAL:9::Test for: dimension 1 DEAL:9::12 mpi processes DEAL:9::Test passed @@ -118,7 +118,7 @@ DEAL:9::12 mpi processes DEAL:9::Test passed -DEAL:10::Test: parallel::GridTools::exchange_local_bounding_boxes +DEAL:10::Test: GridTools::exchange_local_bounding_boxes DEAL:10::Test for: dimension 1 DEAL:10::12 mpi processes DEAL:10::Test passed @@ -130,7 +130,7 @@ DEAL:10::12 mpi processes DEAL:10::Test passed -DEAL:11::Test: parallel::GridTools::exchange_local_bounding_boxes +DEAL:11::Test: GridTools::exchange_local_bounding_boxes DEAL:11::Test for: dimension 1 DEAL:11::12 mpi processes DEAL:11::Test passed diff --git a/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.mpirun=4.output b/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.mpirun=4.output index 6a35125161..096d23072e 100644 --- a/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.mpirun=4.output +++ b/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.mpirun=4.output @@ -1,5 +1,5 @@ -DEAL:0::Test: parallel::GridTools::exchange_local_bounding_boxes +DEAL:0::Test: GridTools::exchange_local_bounding_boxes DEAL:0::Test for: dimension 1 DEAL:0::4 mpi processes DEAL:0::Test passed @@ -10,7 +10,7 @@ DEAL:0::Test for: dimension 3 DEAL:0::4 mpi processes DEAL:0::Test passed -DEAL:1::Test: parallel::GridTools::exchange_local_bounding_boxes +DEAL:1::Test: GridTools::exchange_local_bounding_boxes DEAL:1::Test for: dimension 1 DEAL:1::4 mpi processes DEAL:1::Test passed @@ -22,7 +22,7 @@ DEAL:1::4 mpi processes DEAL:1::Test passed -DEAL:2::Test: parallel::GridTools::exchange_local_bounding_boxes +DEAL:2::Test: GridTools::exchange_local_bounding_boxes DEAL:2::Test for: dimension 1 DEAL:2::4 mpi processes DEAL:2::Test passed @@ -34,7 +34,7 @@ DEAL:2::4 mpi processes DEAL:2::Test passed -DEAL:3::Test: parallel::GridTools::exchange_local_bounding_boxes +DEAL:3::Test: GridTools::exchange_local_bounding_boxes DEAL:3::Test for: dimension 1 DEAL:3::4 mpi processes DEAL:3::Test passed diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc index 224874cb55..717c1f7b67 100644 --- a/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc @@ -42,7 +42,7 @@ void test () typedef typename parallel::distributed::Triangulation::active_cell_iterator cell_iterator; typedef double DT; DT counter = 0.0; - parallel::GridTools::exchange_cell_data_to_ghosts< + GridTools::exchange_cell_data_to_ghosts< DT, parallel::distributed::Triangulation> (tria, diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc index fb27167676..a7abfd3676 100644 --- a/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc @@ -49,7 +49,7 @@ void test () typedef typename DoFHandler::active_cell_iterator cell_iterator; typedef short DT; short counter = 0; - parallel::GridTools::exchange_cell_data_to_ghosts< + GridTools::exchange_cell_data_to_ghosts< DT, DoFHandler > (dofhandler, diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_03.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_03.cc index f98f540194..0dec046d3d 100644 --- a/tests/distributed_grids/grid_tools_exchange_cell_data_03.cc +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_03.cc @@ -47,7 +47,7 @@ void test () typedef typename parallel::shared::Triangulation::active_cell_iterator cell_iterator; typedef double DT; DT counter = 0.0; - parallel::GridTools::exchange_cell_data_to_ghosts< + GridTools::exchange_cell_data_to_ghosts< DT, parallel::shared::Triangulation> (tria, [&](const cell_iterator& cell) diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_04.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_04.cc index 447daed1fd..1e106ea7af 100644 --- a/tests/distributed_grids/grid_tools_exchange_cell_data_04.cc +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_04.cc @@ -46,7 +46,7 @@ void test () typedef typename parallel::distributed::Triangulation::active_cell_iterator cell_iterator; typedef double DT; int counter = 0; - parallel::GridTools::exchange_cell_data_to_ghosts< + GridTools::exchange_cell_data_to_ghosts< DT, parallel::distributed::Triangulation> (tria, -- 2.39.5