From 009f27bf7a241191d1df31e2818698c4c6d05293 Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Mon, 13 Nov 2017 12:37:24 +0100 Subject: [PATCH] Merge distributed/grid_tools into grid/grid_tools --- include/deal.II/distributed/grid_tools.h | 470 ------------------ include/deal.II/grid/grid_tools.h | 431 +++++++++++++++- source/distributed/CMakeLists.txt | 2 - source/distributed/grid_tools.cc | 108 ---- source/distributed/grid_tools.inst.in | 24 - source/dofs/dof_handler_policy.cc | 2 +- source/grid/grid_tools.cc | 77 +++ source/grid/grid_tools.inst.in | 4 + source/hp/dof_handler.cc | 2 +- .../grid_tools_exchange_bounding_boxes_1.cc | 2 +- .../grid_tools_exchange_cell_data_01.cc | 1 - .../grid_tools_exchange_cell_data_02.cc | 1 - .../grid_tools_exchange_cell_data_03.cc | 1 - .../grid_tools_exchange_cell_data_04.cc | 1 - 14 files changed, 514 insertions(+), 612 deletions(-) delete mode 100644 include/deal.II/distributed/grid_tools.h delete mode 100644 source/distributed/grid_tools.cc delete mode 100644 source/distributed/grid_tools.inst.in diff --git a/include/deal.II/distributed/grid_tools.h b/include/deal.II/distributed/grid_tools.h deleted file mode 100644 index 31e15b78d6..0000000000 --- a/include/deal.II/distributed/grid_tools.h +++ /dev/null @@ -1,470 +0,0 @@ -// --------------------------------------------------------------------- -// -// Copyright (C) 2017 by the deal.II authors -// -// This file is part of the deal.II library. -// -// The deal.II library is free software; you can use it, redistribute -// it, and/or modify it under the terms of the GNU Lesser General -// Public License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// The full text of the license can be found in the file LICENSE at -// the top level of the deal.II distribution. -// -// --------------------------------------------------------------------- - -#ifndef dealii_distributed_grid_tools_h -#define dealii_distributed_grid_tools_h - - -#include -#include -#include -#include - -DEAL_II_DISABLE_EXTRA_DIAGNOSTICS -#include -#include -#include -#include -#include - -#ifdef DEAL_II_WITH_ZLIB -#include -#include -#include -#include -#endif - -DEAL_II_ENABLE_EXTRA_DIAGNOSTICS - -#include - -DEAL_II_NAMESPACE_OPEN - -namespace GridTools -{ - /** - * Exchange arbitrary data of type @p DataType provided by the function - * objects from locally owned cells to ghost cells on other processors. - * - * After this call, you typically will have received data from @p unpack on - * every ghost cell as it was given by @p pack on the owning processor. - * Whether you do or do not receive information to @p unpack on a given - * ghost cell depends on whether the @p pack function decided that - * something needs to be sent. It does so using the boost::optional - * mechanism: if the boost::optional return object of the @p pack - * function is empty, then this implies that no data has to be sent for - * the locally owned cell it was called on. In that case, @p unpack will - * also not be called on the ghost cell that corresponds to it on the - * receiving side. On the other hand, if the boost::optional object is - * not empty, then the data stored within it will be sent to the received - * and the @p unpack function called with it. - * - * @tparam DataType The type of the data to be communicated. It is assumed - * to be serializable by boost::serialization. In many cases, this - * data type can not be deduced by the compiler, e.g., if you provide - * lambda functions for the second and third argument - * to this function. In this case, you have to explicitly specify - * the @p DataType as a template argument to the function call. - * @tparam MeshType The type of @p mesh. - * - * @param mesh A variable of a type that satisfies the requirements of the - * @ref ConceptMeshType "MeshType concept". - * @param pack The function that will be called on each locally owned cell - * that is a ghost cell somewhere else. As mentioned above, the function - * may return a regular data object of type @p DataType to indicate - * that data should be sent, or an empty - * boost::optional@ to indicate that nothing has - * to be sent for this cell. - * @param unpack The function that will be called for each ghost cell - * for which data was sent, i.e., for which the @p pack function - * on the sending side returned a non-empty boost::optional object. - * The @p unpack function is then called with the data sent by the - * processor that owns that cell. - * - * - *

An example

- * - * Here is an example that shows how this function is to be used - * in a concrete context. It is taken from the code that makes - * sure that the @p active_fe_index (a single unsigned integer) is - * transported from locally owned cells where one can set it in - * hp::DoFHandler objects, to the corresponding ghost cells on - * other processors to ensure that one can query the right value - * also on those processors: - * @code - * auto pack - * = [] (const typename dealii::hp::DoFHandler::active_cell_iterator &cell) -> unsigned int - * { - * return cell->active_fe_index(); - * }; - * - * auto unpack - * = [] (const typename dealii::hp::DoFHandler::active_cell_iterator &cell, - * const unsigned int &active_fe_index) -> void - * { - * cell->set_active_fe_index(active_fe_index); - * }; - * - * GridTools::exchange_cell_data_to_ghosts> - * (dof_handler, pack, unpack); - * @endcode - * - * You will notice that the @p pack lambda function returns an `unsigned int`, - * not a `boost::optional`. The former converts automatically - * to the latter, implying that data will always be transported to the - * other processor. - * - * (In reality, the @p unpack function needs to be a bit more - * complicated because it is not allowed to call - * DoFAccessor::set_active_fe_index() on ghost cells. Rather, the - * @p unpack function directly accesses internal data structures. But - * you get the idea -- the code could, just as well, have exchanged - * material ids, user indices, boundary indictors, or any kind of other - * data with similar calls as the ones above.) - */ - template - void - exchange_cell_data_to_ghosts (const MeshType &mesh, - const std::function (const typename MeshType::active_cell_iterator &)> &pack, - const std::function &unpack); - - /* Exchange with all processors of the MPI communicator @p mpi_communicator the vector of bounding - * boxes @p local_bboxes. - * - * This function is meant to exchange bounding boxes describing the locally owned - * cells in a distributed triangulation obtained with the function - * GridTools::compute_mesh_predicate_bounding_box . - * - * The output vector's size is the number of processes of the MPI communicator: - * its i-th entry contains the vector @p local_bboxes of the i-th process. - */ - template - std::vector< std::vector< BoundingBox > > - exchange_local_bounding_boxes(const std::vector< BoundingBox > &local_bboxes, - MPI_Comm mpi_communicator); - - /** - * A structure that allows the transfer of cell data of type @p T from one processor - * to another. It corresponds to a packed buffer that stores a vector of - * CellId and a vector of type @p T. - * - * This class facilitates the transfer by providing the save/load functions - * that are able to pack up the vector of CellId's and the associated - * data of type @p T into a stream. - * - * Type @p T is assumed to be serializable by boost::serialization (for - * example unsigned int or std::vector@). - */ - template - struct CellDataTransferBuffer - { - /** - * A vector to store IDs of cells to be transfered. - */ - std::vector cell_ids; - - /** - * A vector of cell data to be transfered. - */ - std::vector data; - - /** - * Write the data of this object to a stream for the purpose of - * serialization. - * - * @pre The user is responsible to keep the size of @p data - * equal to the size as @p cell_ids . - */ - template - void save (Archive &ar, - const unsigned int version) const; - - /** - * Read the data of this object from a stream for the purpose of - * serialization. Throw away the previous content. - */ - template - void load (Archive &ar, - const unsigned int version); - - BOOST_SERIALIZATION_SPLIT_MEMBER() - - /** - * Pack the data that corresponds to this object into a buffer in - * the form of a vector of chars and return it. - */ - std::vector pack_data () const; - - /** - * Given a buffer in the form of an array of chars, unpack it and - * restore the current object to the state that it was when - * it was packed into said buffer by the pack_data() function. - */ - void unpack_data (const std::vector &buffer); - - }; - -} - -#ifndef DOXYGEN - -namespace GridTools -{ - - template - template - void - CellDataTransferBuffer::save (Archive &ar, - const unsigned int /*version*/) const - { - Assert(cell_ids.size() == data.size(), - ExcDimensionMismatch(cell_ids.size(), data.size())); - // archive the cellids in an efficient binary format - const size_t n_cells = cell_ids.size(); - ar &n_cells; - for (auto &it : cell_ids) - { - CellId::binary_type binary_cell_id = it.template to_binary(); - ar &binary_cell_id; - } - - ar &data; - } - - - - template - template - void - CellDataTransferBuffer::load (Archive &ar, - const unsigned int /*version*/) - { - size_t n_cells; - ar &n_cells; - cell_ids.clear(); - cell_ids.reserve(n_cells); - for (unsigned int c=0; c - std::vector - CellDataTransferBuffer::pack_data () const - { - // set up a buffer and then use it as the target of a compressing - // stream into which we serialize the current object - std::vector buffer; - { -#ifdef DEAL_II_WITH_ZLIB - boost::iostreams::filtering_ostream out; - out.push(boost::iostreams::gzip_compressor - (boost::iostreams::gzip_params - (boost::iostreams::gzip::best_compression))); - out.push(boost::iostreams::back_inserter(buffer)); - - boost::archive::binary_oarchive archive(out); - archive << *this; - out.flush(); -#else - std::ostringstream out; - boost::archive::binary_oarchive archive(out); - archive << *this; - const std::string &s = out.str(); - buffer.reserve(s.size()); - buffer.assign(s.begin(), s.end()); -#endif - } - - return buffer; - } - - - template - void - CellDataTransferBuffer::unpack_data (const std::vector &buffer) - { - std::string decompressed_buffer; - - // first decompress the buffer - { -#ifdef DEAL_II_WITH_ZLIB - boost::iostreams::filtering_ostream decompressing_stream; - decompressing_stream.push(boost::iostreams::gzip_decompressor()); - decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer)); - decompressing_stream.write (buffer.data(), buffer.size()); -#else - decompressed_buffer.assign (buffer.begin(), buffer.end()); -#endif - } - - // then restore the object from the buffer - std::istringstream in(decompressed_buffer); - boost::archive::binary_iarchive archive(in); - - archive >> *this; - } - - - - template - void - exchange_cell_data_to_ghosts (const MeshType &mesh, - const std::function (const typename MeshType::active_cell_iterator &)> &pack, - const std::function &unpack) - { -#ifndef DEAL_II_WITH_MPI - (void)mesh; - (void)pack; - (void)unpack; - Assert(false, ExcMessage("GridTools::exchange_cell_data_to_ghosts() requires MPI.")); -#else - constexpr int dim = MeshType::dimension; - constexpr int spacedim = MeshType::space_dimension; - auto tria = - static_cast*>(&mesh.get_triangulation()); - Assert (tria != nullptr, - ExcMessage("The function exchange_cell_data_to_ghosts() only works with parallel triangulations.")); - - // map neighbor_id -> data_buffer where we accumulate the data to send - typedef std::map > - DestinationToBufferMap; - DestinationToBufferMap destination_to_data_buffer_map; - - std::map > - vertices_with_ghost_neighbors = tria->compute_vertices_with_ghost_neighbors(); - - for (auto cell : tria->active_cell_iterators()) - if (cell->is_locally_owned()) - { - std::set send_to; - for (unsigned int v=0; v::vertices_per_cell; ++v) - { - const std::map >::const_iterator - neighbor_subdomains_of_vertex - = vertices_with_ghost_neighbors.find (cell->vertex_index(v)); - - if (neighbor_subdomains_of_vertex == - vertices_with_ghost_neighbors.end()) - continue; - - Assert(neighbor_subdomains_of_vertex->second.size()!=0, - ExcInternalError()); - - send_to.insert(neighbor_subdomains_of_vertex->second.begin(), - neighbor_subdomains_of_vertex->second.end()); - } - - if (send_to.size() > 0) - { - // this cell's data needs to be sent to someone - typename MeshType::active_cell_iterator - mesh_it (tria, cell->level(), cell->index(), &mesh); - - const boost::optional data = pack(mesh_it); - - if (data) - { - const CellId cellid = cell->id(); - - for (auto it : send_to) - { - const dealii::types::subdomain_id subdomain = it; - - // find the data buffer for proc "subdomain" if it exists - // or create an empty one otherwise - typename DestinationToBufferMap::iterator p - = destination_to_data_buffer_map.insert (std::make_pair(subdomain, - CellDataTransferBuffer())) - .first; - - p->second.cell_ids.emplace_back(cellid); - p->second.data.emplace_back(data.get()); - } - } - } - } - - - // 2. send our messages - std::set ghost_owners = tria->ghost_owners(); - const unsigned int n_ghost_owners = ghost_owners.size(); - std::vector > sendbuffers (n_ghost_owners); - std::vector requests (n_ghost_owners); - - unsigned int idx=0; - for (auto it = ghost_owners.begin(); - it!=ghost_owners.end(); - ++it, ++idx) - { - CellDataTransferBuffer &data = destination_to_data_buffer_map[*it]; - - // pack all the data into the buffer for this recipient and send it. - // keep data around till we can make sure that the packet has been - // received - sendbuffers[idx] = data.pack_data (); - const int ierr = MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(), - MPI_BYTE, *it, - 786, tria->get_communicator(), &requests[idx]); - AssertThrowMPI(ierr); - } - - // 3. receive messages - std::vector receive; - for (unsigned int idx=0; idxget_communicator(), &status); - AssertThrowMPI(ierr); - ierr = MPI_Get_count(&status, MPI_BYTE, &len); - AssertThrowMPI(ierr); - - receive.resize(len); - - char *ptr = receive.data(); - ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, - tria->get_communicator(), &status); - AssertThrowMPI(ierr); - - CellDataTransferBuffer cellinfo; - cellinfo.unpack_data(receive); - - DataType *data = cellinfo.data.data(); - for (unsigned int c=0; c::cell_iterator - tria_cell = cellinfo.cell_ids[c].to_cell(*tria); - - const typename MeshType::active_cell_iterator - cell (tria, tria_cell->level(), tria_cell->index(), &mesh); - - unpack(cell, *data); - } - } - - // make sure that all communication is finished - // when we leave this function. - if (requests.size()) - { - const int ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); - } -#endif // DEAL_II_WITH_MPI - } - -} - -#endif // DOXYGEN - -DEAL_II_NAMESPACE_CLOSE - -#endif diff --git a/include/deal.II/grid/grid_tools.h b/include/deal.II/grid/grid_tools.h index f5d842f74b..98f90d21dc 100644 --- a/include/deal.II/grid/grid_tools.h +++ b/include/deal.II/grid/grid_tools.h @@ -29,6 +29,22 @@ #include #include +DEAL_II_DISABLE_EXTRA_DIAGNOSTICS +#include +#include +#include +#include +#include + +#ifdef DEAL_II_WITH_ZLIB +# include +# include +# include +# include +#endif + +DEAL_II_ENABLE_EXTRA_DIAGNOSTICS + #include #include #include @@ -2104,12 +2120,174 @@ namespace GridTools /*@}*/ + + /** + * Exchange arbitrary data of type @p DataType provided by the function + * objects from locally owned cells to ghost cells on other processors. + * + * After this call, you typically will have received data from @p unpack on + * every ghost cell as it was given by @p pack on the owning processor. + * Whether you do or do not receive information to @p unpack on a given + * ghost cell depends on whether the @p pack function decided that + * something needs to be sent. It does so using the boost::optional + * mechanism: if the boost::optional return object of the @p pack + * function is empty, then this implies that no data has to be sent for + * the locally owned cell it was called on. In that case, @p unpack will + * also not be called on the ghost cell that corresponds to it on the + * receiving side. On the other hand, if the boost::optional object is + * not empty, then the data stored within it will be sent to the received + * and the @p unpack function called with it. + * + * @tparam DataType The type of the data to be communicated. It is assumed + * to be serializable by boost::serialization. In many cases, this + * data type can not be deduced by the compiler, e.g., if you provide + * lambda functions for the second and third argument + * to this function. In this case, you have to explicitly specify + * the @p DataType as a template argument to the function call. + * @tparam MeshType The type of @p mesh. + * + * @param mesh A variable of a type that satisfies the requirements of the + * @ref ConceptMeshType "MeshType concept". + * @param pack The function that will be called on each locally owned cell + * that is a ghost cell somewhere else. As mentioned above, the function + * may return a regular data object of type @p DataType to indicate + * that data should be sent, or an empty + * boost::optional@ to indicate that nothing has + * to be sent for this cell. + * @param unpack The function that will be called for each ghost cell + * for which data was sent, i.e., for which the @p pack function + * on the sending side returned a non-empty boost::optional object. + * The @p unpack function is then called with the data sent by the + * processor that owns that cell. + * + * + *

An example

+ * + * Here is an example that shows how this function is to be used + * in a concrete context. It is taken from the code that makes + * sure that the @p active_fe_index (a single unsigned integer) is + * transported from locally owned cells where one can set it in + * hp::DoFHandler objects, to the corresponding ghost cells on + * other processors to ensure that one can query the right value + * also on those processors: + * @code + * auto pack + * = [] (const typename dealii::hp::DoFHandler::active_cell_iterator &cell) -> unsigned int + * { + * return cell->active_fe_index(); + * }; + * + * auto unpack + * = [] (const typename dealii::hp::DoFHandler::active_cell_iterator &cell, + * const unsigned int &active_fe_index) -> void + * { + * cell->set_active_fe_index(active_fe_index); + * }; + * + * GridTools::exchange_cell_data_to_ghosts> + * (dof_handler, pack, unpack); + * @endcode + * + * You will notice that the @p pack lambda function returns an `unsigned int`, + * not a `boost::optional`. The former converts automatically + * to the latter, implying that data will always be transported to the + * other processor. + * + * (In reality, the @p unpack function needs to be a bit more + * complicated because it is not allowed to call + * DoFAccessor::set_active_fe_index() on ghost cells. Rather, the + * @p unpack function directly accesses internal data structures. But + * you get the idea -- the code could, just as well, have exchanged + * material ids, user indices, boundary indictors, or any kind of other + * data with similar calls as the ones above.) + */ + template + void + exchange_cell_data_to_ghosts (const MeshType &mesh, + const std::function (const typename MeshType::active_cell_iterator &)> &pack, + const std::function &unpack); + + /* Exchange with all processors of the MPI communicator @p mpi_communicator the vector of bounding + * boxes @p local_bboxes. + * + * This function is meant to exchange bounding boxes describing the locally owned + * cells in a distributed triangulation obtained with the function + * GridTools::compute_mesh_predicate_bounding_box . + * + * The output vector's size is the number of processes of the MPI communicator: + * its i-th entry contains the vector @p local_bboxes of the i-th process. + */ + template + std::vector< std::vector< BoundingBox > > + exchange_local_bounding_boxes(const std::vector< BoundingBox > &local_bboxes, + MPI_Comm mpi_communicator); + + /** + * A structure that allows the transfer of cell data of type @p T from one processor + * to another. It corresponds to a packed buffer that stores a vector of + * CellId and a vector of type @p T. + * + * This class facilitates the transfer by providing the save/load functions + * that are able to pack up the vector of CellId's and the associated + * data of type @p T into a stream. + * + * Type @p T is assumed to be serializable by boost::serialization (for + * example unsigned int or std::vector@). + */ + template + struct CellDataTransferBuffer + { + /** + * A vector to store IDs of cells to be transfered. + */ + std::vector cell_ids; + + /** + * A vector of cell data to be transfered. + */ + std::vector data; + + /** + * Write the data of this object to a stream for the purpose of + * serialization. + * + * @pre The user is responsible to keep the size of @p data + * equal to the size as @p cell_ids . + */ + template + void save (Archive &ar, + const unsigned int version) const; + + /** + * Read the data of this object from a stream for the purpose of + * serialization. Throw away the previous content. + */ + template + void load (Archive &ar, + const unsigned int version); + + BOOST_SERIALIZATION_SPLIT_MEMBER() + + /** + * Pack the data that corresponds to this object into a buffer in + * the form of a vector of chars and return it. + */ + std::vector pack_data () const; + + /** + * Given a buffer in the form of an array of chars, unpack it and + * restore the current object to the state that it was when + * it was packed into said buffer by the pack_data() function. + */ + void unpack_data (const std::vector &buffer); + + }; + /** * @name Exceptions */ /*@{*/ - /** * Exception */ @@ -2837,6 +3015,257 @@ namespace GridTools return projected_point; } + + + + template + template + void + CellDataTransferBuffer::save (Archive &ar, + const unsigned int /*version*/) const + { + Assert(cell_ids.size() == data.size(), + ExcDimensionMismatch(cell_ids.size(), data.size())); + // archive the cellids in an efficient binary format + const size_t n_cells = cell_ids.size(); + ar &n_cells; + for (auto &it : cell_ids) + { + CellId::binary_type binary_cell_id = it.template to_binary(); + ar &binary_cell_id; + } + + ar &data; + } + + + + template + template + void + CellDataTransferBuffer::load (Archive &ar, + const unsigned int /*version*/) + { + size_t n_cells; + ar &n_cells; + cell_ids.clear(); + cell_ids.reserve(n_cells); + for (unsigned int c=0; c + std::vector + CellDataTransferBuffer::pack_data () const + { + // set up a buffer and then use it as the target of a compressing + // stream into which we serialize the current object + std::vector buffer; + { +#ifdef DEAL_II_WITH_ZLIB + boost::iostreams::filtering_ostream out; + out.push(boost::iostreams::gzip_compressor + (boost::iostreams::gzip_params + (boost::iostreams::gzip::best_compression))); + out.push(boost::iostreams::back_inserter(buffer)); + + boost::archive::binary_oarchive archive(out); + archive << *this; + out.flush(); +#else + std::ostringstream out; + boost::archive::binary_oarchive archive(out); + archive << *this; + const std::string &s = out.str(); + buffer.reserve(s.size()); + buffer.assign(s.begin(), s.end()); +#endif + } + + return buffer; + } + + + + template + void + CellDataTransferBuffer::unpack_data (const std::vector &buffer) + { + std::string decompressed_buffer; + + // first decompress the buffer + { +#ifdef DEAL_II_WITH_ZLIB + boost::iostreams::filtering_ostream decompressing_stream; + decompressing_stream.push(boost::iostreams::gzip_decompressor()); + decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer)); + decompressing_stream.write (buffer.data(), buffer.size()); +#else + decompressed_buffer.assign (buffer.begin(), buffer.end()); +#endif + } + + // then restore the object from the buffer + std::istringstream in(decompressed_buffer); + boost::archive::binary_iarchive archive(in); + + archive >> *this; + } + + + + template + void + exchange_cell_data_to_ghosts (const MeshType &mesh, + const std::function (const typename MeshType::active_cell_iterator &)> &pack, + const std::function &unpack) + { +#ifndef DEAL_II_WITH_MPI + (void)mesh; + (void)pack; + (void)unpack; + Assert(false, ExcMessage("GridTools::exchange_cell_data_to_ghosts() requires MPI.")); +#else + constexpr int dim = MeshType::dimension; + constexpr int spacedim = MeshType::space_dimension; + auto tria = + static_cast*>(&mesh.get_triangulation()); + Assert (tria != nullptr, + ExcMessage("The function exchange_cell_data_to_ghosts() only works with parallel triangulations.")); + + // map neighbor_id -> data_buffer where we accumulate the data to send + typedef std::map > + DestinationToBufferMap; + DestinationToBufferMap destination_to_data_buffer_map; + + std::map > + vertices_with_ghost_neighbors = tria->compute_vertices_with_ghost_neighbors(); + + for (auto cell : tria->active_cell_iterators()) + if (cell->is_locally_owned()) + { + std::set send_to; + for (unsigned int v=0; v::vertices_per_cell; ++v) + { + const std::map >::const_iterator + neighbor_subdomains_of_vertex + = vertices_with_ghost_neighbors.find (cell->vertex_index(v)); + + if (neighbor_subdomains_of_vertex == + vertices_with_ghost_neighbors.end()) + continue; + + Assert(neighbor_subdomains_of_vertex->second.size()!=0, + ExcInternalError()); + + send_to.insert(neighbor_subdomains_of_vertex->second.begin(), + neighbor_subdomains_of_vertex->second.end()); + } + + if (send_to.size() > 0) + { + // this cell's data needs to be sent to someone + typename MeshType::active_cell_iterator + mesh_it (tria, cell->level(), cell->index(), &mesh); + + const boost::optional data = pack(mesh_it); + + if (data) + { + const CellId cellid = cell->id(); + + for (auto it : send_to) + { + const dealii::types::subdomain_id subdomain = it; + + // find the data buffer for proc "subdomain" if it exists + // or create an empty one otherwise + typename DestinationToBufferMap::iterator p + = destination_to_data_buffer_map.insert (std::make_pair(subdomain, + CellDataTransferBuffer())) + .first; + + p->second.cell_ids.emplace_back(cellid); + p->second.data.emplace_back(data.get()); + } + } + } + } + + + // 2. send our messages + std::set ghost_owners = tria->ghost_owners(); + const unsigned int n_ghost_owners = ghost_owners.size(); + std::vector > sendbuffers (n_ghost_owners); + std::vector requests (n_ghost_owners); + + unsigned int idx=0; + for (auto it = ghost_owners.begin(); + it!=ghost_owners.end(); + ++it, ++idx) + { + CellDataTransferBuffer &data = destination_to_data_buffer_map[*it]; + + // pack all the data into the buffer for this recipient and send it. + // keep data around till we can make sure that the packet has been + // received + sendbuffers[idx] = data.pack_data (); + const int ierr = MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(), + MPI_BYTE, *it, + 786, tria->get_communicator(), &requests[idx]); + AssertThrowMPI(ierr); + } + + // 3. receive messages + std::vector receive; + for (unsigned int idx=0; idxget_communicator(), &status); + AssertThrowMPI(ierr); + ierr = MPI_Get_count(&status, MPI_BYTE, &len); + AssertThrowMPI(ierr); + + receive.resize(len); + + char *ptr = receive.data(); + ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, + tria->get_communicator(), &status); + AssertThrowMPI(ierr); + + CellDataTransferBuffer cellinfo; + cellinfo.unpack_data(receive); + + DataType *data = cellinfo.data.data(); + for (unsigned int c=0; c::cell_iterator + tria_cell = cellinfo.cell_ids[c].to_cell(*tria); + + const typename MeshType::active_cell_iterator + cell (tria, tria_cell->level(), tria_cell->index(), &mesh); + + unpack(cell, *data); + } + } + + // make sure that all communication is finished + // when we leave this function. + if (requests.size()) + { + const int ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } +#endif // DEAL_II_WITH_MPI + } } #endif diff --git a/source/distributed/CMakeLists.txt b/source/distributed/CMakeLists.txt index c821f46aec..56b6974b56 100644 --- a/source/distributed/CMakeLists.txt +++ b/source/distributed/CMakeLists.txt @@ -16,7 +16,6 @@ INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_BINARY_DIR}) SET(_unity_include_src - grid_tools.cc grid_refinement.cc solution_transfer.cc tria.cc @@ -38,7 +37,6 @@ SETUP_SOURCE_LIST("${_unity_include_src}" ) SET(_inst - grid_tools.inst.in grid_refinement.inst.in solution_transfer.inst.in tria.inst.in diff --git a/source/distributed/grid_tools.cc b/source/distributed/grid_tools.cc deleted file mode 100644 index 89f092db52..0000000000 --- a/source/distributed/grid_tools.cc +++ /dev/null @@ -1,108 +0,0 @@ -// --------------------------------------------------------------------- -// -// Copyright (C) 2017 by the deal.II authors -// -// This file is part of the deal.II library. -// -// The deal.II library is free software; you can use it, redistribute -// it, and/or modify it under the terms of the GNU Lesser General -// Public License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// The full text of the license can be found in the file LICENSE at -// the top level of the deal.II distribution. -// -// --------------------------------------------------------------------- - -#include -#include -#include -#include - -DEAL_II_NAMESPACE_OPEN - -#ifdef DEAL_II_WITH_MPI - -namespace GridTools -{ - template - std::vector< std::vector< BoundingBox > > - exchange_local_bounding_boxes(const std::vector< BoundingBox > &local_bboxes, - MPI_Comm mpi_communicator) - { -#ifndef DEAL_II_WITH_MPI - (void)local_bboxes; - (void)mpi_communicator; - Assert(false, ExcMessage("parallel::GridTools::exchange_local_bounding_boxes() requires MPI.")); -#else - // Step 1: preparing data to be sent - unsigned int n_bboxes = local_bboxes.size(); - // Dimension of the array to be exchanged (number of double) - int n_local_data = 2*spacedim*n_bboxes; - // data array stores each entry of each point describing the bounding boxes - std::vector loc_data_array(n_local_data); - for (unsigned int i=0; i size_all_data(n_procs); - - // Exchanging the number of bboxes - MPI_Allgather(&n_local_data, 1, MPI_INT, - &(size_all_data[0]), 1, MPI_INT, - mpi_communicator); - - // Now computing the the displacement, relative to recvbuf, - // at which to store the incoming data - std::vector rdispls(n_procs); - rdispls[0] = 0; - for (unsigned int i=1; i < n_procs; ++i) - rdispls[i] = rdispls[i-1] + size_all_data[i-1]; - - // Step 3: exchange the data and bounding boxes: - // Allocating a vector to contain all the received data - std::vector data_array(rdispls.back() + size_all_data.back()); - - MPI_Allgatherv(&(loc_data_array[0]), n_local_data, MPI_DOUBLE, - &(data_array[0]), &(size_all_data[0]), - &(rdispls[0]), MPI_DOUBLE, mpi_communicator); - - // Step 4: create the array of bboxes for output - std::vector< std::vector< BoundingBox > > global_bboxes(n_procs); - unsigned int begin_idx = 0; - for (unsigned int i=0; i < n_procs; ++i) - { - // Number of local bounding boxes - unsigned int n_bbox_i = size_all_data[i]/(spacedim*2); - global_bboxes[i].resize(n_bbox_i); - for (unsigned int bbox=0; bbox p1,p2; // boundary points for bbox - for (unsigned int d=0; d loc_bbox(std::make_pair(p1,p2)); - global_bboxes[i][bbox] = loc_bbox; - } - // Shifting the first index to the start of the next vector - begin_idx += size_all_data[i]; - } - return global_bboxes; -#endif // DEAL_II_WITH_MPI - } -} - -// explicit instantiations -#include "grid_tools.inst" - -#endif // DEAL_II_WITH_MPI -DEAL_II_NAMESPACE_CLOSE diff --git a/source/distributed/grid_tools.inst.in b/source/distributed/grid_tools.inst.in deleted file mode 100644 index 9e245a2678..0000000000 --- a/source/distributed/grid_tools.inst.in +++ /dev/null @@ -1,24 +0,0 @@ -// --------------------------------------------------------------------- -// -// Copyright (C) 2017 by the deal.II authors -// -// This file is part of the deal.II library. -// -// The deal.II library is free software; you can use it, redistribute -// it, and/or modify it under the terms of the GNU Lesser General -// Public License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// The full text of the license can be found in the file LICENSE at -// the top level of the deal.II distribution. -// -// --------------------------------------------------------------------- - -for (deal_II_space_dimension : SPACE_DIMENSIONS) -{ - namespace GridTools \{ - template - std::vector< std::vector< BoundingBox > > - exchange_local_bounding_boxes(const std::vector< BoundingBox >&, - MPI_Comm); - \} -} diff --git a/source/dofs/dof_handler_policy.cc b/source/dofs/dof_handler_policy.cc index 11d9f2f37b..e31a98ba59 100644 --- a/source/dofs/dof_handler_policy.cc +++ b/source/dofs/dof_handler_policy.cc @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -28,7 +29,6 @@ #include #include #include -#include DEAL_II_DISABLE_EXTRA_DIAGNOSTICS #include diff --git a/source/grid/grid_tools.cc b/source/grid/grid_tools.cc index 6dd0b32123..710358360f 100644 --- a/source/grid/grid_tools.cc +++ b/source/grid/grid_tools.cc @@ -5136,6 +5136,83 @@ next_cell: cell_hint, marked_vertices); } + + template + std::vector< std::vector< BoundingBox > > + exchange_local_bounding_boxes(const std::vector< BoundingBox > &local_bboxes, + MPI_Comm mpi_communicator) + { +#ifndef DEAL_II_WITH_MPI + (void)local_bboxes; + (void)mpi_communicator; + Assert(false, ExcMessage("GridTools::exchange_local_bounding_boxes() requires MPI.")); +#else + // Step 1: preparing data to be sent + unsigned int n_bboxes = local_bboxes.size(); + // Dimension of the array to be exchanged (number of double) + int n_local_data = 2*spacedim*n_bboxes; + // data array stores each entry of each point describing the bounding boxes + std::vector loc_data_array(n_local_data); + for (unsigned int i=0; i size_all_data(n_procs); + + // Exchanging the number of bboxes + MPI_Allgather(&n_local_data, 1, MPI_INT, + &(size_all_data[0]), 1, MPI_INT, + mpi_communicator); + + // Now computing the the displacement, relative to recvbuf, + // at which to store the incoming data + std::vector rdispls(n_procs); + rdispls[0] = 0; + for (unsigned int i=1; i < n_procs; ++i) + rdispls[i] = rdispls[i-1] + size_all_data[i-1]; + + // Step 3: exchange the data and bounding boxes: + // Allocating a vector to contain all the received data + std::vector data_array(rdispls.back() + size_all_data.back()); + + MPI_Allgatherv(&(loc_data_array[0]), n_local_data, MPI_DOUBLE, + &(data_array[0]), &(size_all_data[0]), + &(rdispls[0]), MPI_DOUBLE, mpi_communicator); + + // Step 4: create the array of bboxes for output + std::vector< std::vector< BoundingBox > > global_bboxes(n_procs); + unsigned int begin_idx = 0; + for (unsigned int i=0; i < n_procs; ++i) + { + // Number of local bounding boxes + unsigned int n_bbox_i = size_all_data[i]/(spacedim*2); + global_bboxes[i].resize(n_bbox_i); + for (unsigned int bbox=0; bbox p1,p2; // boundary points for bbox + for (unsigned int d=0; d loc_bbox(std::make_pair(p1,p2)); + global_bboxes[i][bbox] = loc_bbox; + } + // Shifting the first index to the start of the next vector + begin_idx += size_all_data[i]; + } + return global_bboxes; +#endif // DEAL_II_WITH_MPI + } + } /* namespace GridTools */ diff --git a/source/grid/grid_tools.inst.in b/source/grid/grid_tools.inst.in index c4de24d473..ee1cceff89 100644 --- a/source/grid/grid_tools.inst.in +++ b/source/grid/grid_tools.inst.in @@ -168,6 +168,10 @@ for (deal_II_space_dimension : SPACE_DIMENSIONS) template unsigned int GridTools::find_closest_vertex(const std::map >& vertices, const Point& p); + + template std::vector< std::vector< BoundingBox > > + GridTools::exchange_local_bounding_boxes(const std::vector< BoundingBox >&, + MPI_Comm); } diff --git a/source/hp/dof_handler.cc b/source/hp/dof_handler.cc index fdcf1ae777..12a486eea9 100644 --- a/source/hp/dof_handler.cc +++ b/source/hp/dof_handler.cc @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -28,7 +29,6 @@ #include #include #include -#include #include diff --git a/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.cc b/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.cc index b7f233911b..07e4f99a30 100644 --- a/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.cc +++ b/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.cc @@ -20,7 +20,7 @@ #include #include #include -#include +#include template void test_exchange_bbox() diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc index 717c1f7b67..f1bfa35042 100644 --- a/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc @@ -19,7 +19,6 @@ #include "../tests.h" #include #include -#include #include #include #include diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc index a7abfd3676..11ce3a6e10 100644 --- a/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc @@ -19,7 +19,6 @@ #include "../tests.h" #include #include -#include #include #include #include diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_03.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_03.cc index 0dec046d3d..48103ad4c3 100644 --- a/tests/distributed_grids/grid_tools_exchange_cell_data_03.cc +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_03.cc @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_04.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_04.cc index 1e106ea7af..97fda22c48 100644 --- a/tests/distributed_grids/grid_tools_exchange_cell_data_04.cc +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_04.cc @@ -23,7 +23,6 @@ #include "../tests.h" #include #include -#include #include #include #include -- 2.39.5