+++ /dev/null
-// ---------------------------------------------------------------------
-//
-// Copyright (C) 2017 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// The deal.II library is free software; you can use it, redistribute
-// it, and/or modify it under the terms of the GNU Lesser General
-// Public License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-// The full text of the license can be found in the file LICENSE at
-// the top level of the deal.II distribution.
-//
-// ---------------------------------------------------------------------
-
-#ifndef dealii_distributed_grid_tools_h
-#define dealii_distributed_grid_tools_h
-
-
-#include <deal.II/base/bounding_box.h>
-#include <deal.II/base/config.h>
-#include <deal.II/base/exceptions.h>
-#include <deal.II/distributed/tria_base.h>
-
-DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
-#include <boost/optional.hpp>
-#include <boost/archive/binary_oarchive.hpp>
-#include <boost/archive/binary_iarchive.hpp>
-#include <boost/serialization/vector.hpp>
-#include <boost/serialization/array.hpp>
-
-#ifdef DEAL_II_WITH_ZLIB
-#include <boost/iostreams/stream.hpp>
-#include <boost/iostreams/filtering_stream.hpp>
-#include <boost/iostreams/device/back_inserter.hpp>
-#include <boost/iostreams/filter/gzip.hpp>
-#endif
-
-DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
-
-#include <vector>
-
-DEAL_II_NAMESPACE_OPEN
-
-namespace GridTools
-{
- /**
- * Exchange arbitrary data of type @p DataType provided by the function
- * objects from locally owned cells to ghost cells on other processors.
- *
- * After this call, you typically will have received data from @p unpack on
- * every ghost cell as it was given by @p pack on the owning processor.
- * Whether you do or do not receive information to @p unpack on a given
- * ghost cell depends on whether the @p pack function decided that
- * something needs to be sent. It does so using the boost::optional
- * mechanism: if the boost::optional return object of the @p pack
- * function is empty, then this implies that no data has to be sent for
- * the locally owned cell it was called on. In that case, @p unpack will
- * also not be called on the ghost cell that corresponds to it on the
- * receiving side. On the other hand, if the boost::optional object is
- * not empty, then the data stored within it will be sent to the received
- * and the @p unpack function called with it.
- *
- * @tparam DataType The type of the data to be communicated. It is assumed
- * to be serializable by boost::serialization. In many cases, this
- * data type can not be deduced by the compiler, e.g., if you provide
- * lambda functions for the second and third argument
- * to this function. In this case, you have to explicitly specify
- * the @p DataType as a template argument to the function call.
- * @tparam MeshType The type of @p mesh.
- *
- * @param mesh A variable of a type that satisfies the requirements of the
- * @ref ConceptMeshType "MeshType concept".
- * @param pack The function that will be called on each locally owned cell
- * that is a ghost cell somewhere else. As mentioned above, the function
- * may return a regular data object of type @p DataType to indicate
- * that data should be sent, or an empty
- * <code>boost::optional@<DataType@></code> to indicate that nothing has
- * to be sent for this cell.
- * @param unpack The function that will be called for each ghost cell
- * for which data was sent, i.e., for which the @p pack function
- * on the sending side returned a non-empty boost::optional object.
- * The @p unpack function is then called with the data sent by the
- * processor that owns that cell.
- *
- *
- * <h4> An example </h4>
- *
- * Here is an example that shows how this function is to be used
- * in a concrete context. It is taken from the code that makes
- * sure that the @p active_fe_index (a single unsigned integer) is
- * transported from locally owned cells where one can set it in
- * hp::DoFHandler objects, to the corresponding ghost cells on
- * other processors to ensure that one can query the right value
- * also on those processors:
- * @code
- * auto pack
- * = [] (const typename dealii::hp::DoFHandler<dim,spacedim>::active_cell_iterator &cell) -> unsigned int
- * {
- * return cell->active_fe_index();
- * };
- *
- * auto unpack
- * = [] (const typename dealii::hp::DoFHandler<dim,spacedim>::active_cell_iterator &cell,
- * const unsigned int &active_fe_index) -> void
- * {
- * cell->set_active_fe_index(active_fe_index);
- * };
- *
- * GridTools::exchange_cell_data_to_ghosts<unsigned int, dealii::hp::DoFHandler<dim,spacedim>>
- * (dof_handler, pack, unpack);
- * @endcode
- *
- * You will notice that the @p pack lambda function returns an `unsigned int`,
- * not a `boost::optional<unsigned int>`. The former converts automatically
- * to the latter, implying that data will always be transported to the
- * other processor.
- *
- * (In reality, the @p unpack function needs to be a bit more
- * complicated because it is not allowed to call
- * DoFAccessor::set_active_fe_index() on ghost cells. Rather, the
- * @p unpack function directly accesses internal data structures. But
- * you get the idea -- the code could, just as well, have exchanged
- * material ids, user indices, boundary indictors, or any kind of other
- * data with similar calls as the ones above.)
- */
- template <typename DataType, typename MeshType>
- void
- exchange_cell_data_to_ghosts (const MeshType &mesh,
- const std::function<boost::optional<DataType> (const typename MeshType::active_cell_iterator &)> &pack,
- const std::function<void (const typename MeshType::active_cell_iterator &, const DataType &)> &unpack);
-
- /* Exchange with all processors of the MPI communicator @p mpi_communicator the vector of bounding
- * boxes @p local_bboxes.
- *
- * This function is meant to exchange bounding boxes describing the locally owned
- * cells in a distributed triangulation obtained with the function
- * GridTools::compute_mesh_predicate_bounding_box .
- *
- * The output vector's size is the number of processes of the MPI communicator:
- * its i-th entry contains the vector @p local_bboxes of the i-th process.
- */
- template<int spacedim>
- std::vector< std::vector< BoundingBox<spacedim> > >
- exchange_local_bounding_boxes(const std::vector< BoundingBox<spacedim> > &local_bboxes,
- MPI_Comm mpi_communicator);
-
- /**
- * A structure that allows the transfer of cell data of type @p T from one processor
- * to another. It corresponds to a packed buffer that stores a vector of
- * CellId and a vector of type @p T.
- *
- * This class facilitates the transfer by providing the save/load functions
- * that are able to pack up the vector of CellId's and the associated
- * data of type @p T into a stream.
- *
- * Type @p T is assumed to be serializable by <code>boost::serialization</code> (for
- * example <code>unsigned int</code> or <code>std::vector@<double@></code>).
- */
- template <int dim, typename T>
- struct CellDataTransferBuffer
- {
- /**
- * A vector to store IDs of cells to be transfered.
- */
- std::vector<CellId> cell_ids;
-
- /**
- * A vector of cell data to be transfered.
- */
- std::vector<T> data;
-
- /**
- * Write the data of this object to a stream for the purpose of
- * serialization.
- *
- * @pre The user is responsible to keep the size of @p data
- * equal to the size as @p cell_ids .
- */
- template <class Archive>
- void save (Archive &ar,
- const unsigned int version) const;
-
- /**
- * Read the data of this object from a stream for the purpose of
- * serialization. Throw away the previous content.
- */
- template <class Archive>
- void load (Archive &ar,
- const unsigned int version);
-
- BOOST_SERIALIZATION_SPLIT_MEMBER()
-
- /**
- * Pack the data that corresponds to this object into a buffer in
- * the form of a vector of chars and return it.
- */
- std::vector<char> pack_data () const;
-
- /**
- * Given a buffer in the form of an array of chars, unpack it and
- * restore the current object to the state that it was when
- * it was packed into said buffer by the pack_data() function.
- */
- void unpack_data (const std::vector<char> &buffer);
-
- };
-
-}
-
-#ifndef DOXYGEN
-
-namespace GridTools
-{
-
- template <int dim, typename T>
- template <class Archive>
- void
- CellDataTransferBuffer<dim,T>::save (Archive &ar,
- const unsigned int /*version*/) const
- {
- Assert(cell_ids.size() == data.size(),
- ExcDimensionMismatch(cell_ids.size(), data.size()));
- // archive the cellids in an efficient binary format
- const size_t n_cells = cell_ids.size();
- ar &n_cells;
- for (auto &it : cell_ids)
- {
- CellId::binary_type binary_cell_id = it.template to_binary<dim>();
- ar &binary_cell_id;
- }
-
- ar &data;
- }
-
-
-
- template <int dim, typename T>
- template <class Archive>
- void
- CellDataTransferBuffer<dim,T>::load (Archive &ar,
- const unsigned int /*version*/)
- {
- size_t n_cells;
- ar &n_cells;
- cell_ids.clear();
- cell_ids.reserve(n_cells);
- for (unsigned int c=0; c<n_cells; ++c)
- {
- CellId::binary_type value;
- ar &value;
- cell_ids.emplace_back(std::move(value));
- }
- ar &data;
- }
-
-
-
- template <int dim, typename T>
- std::vector<char>
- CellDataTransferBuffer<dim,T>::pack_data () const
- {
- // set up a buffer and then use it as the target of a compressing
- // stream into which we serialize the current object
- std::vector<char> buffer;
- {
-#ifdef DEAL_II_WITH_ZLIB
- boost::iostreams::filtering_ostream out;
- out.push(boost::iostreams::gzip_compressor
- (boost::iostreams::gzip_params
- (boost::iostreams::gzip::best_compression)));
- out.push(boost::iostreams::back_inserter(buffer));
-
- boost::archive::binary_oarchive archive(out);
- archive << *this;
- out.flush();
-#else
- std::ostringstream out;
- boost::archive::binary_oarchive archive(out);
- archive << *this;
- const std::string &s = out.str();
- buffer.reserve(s.size());
- buffer.assign(s.begin(), s.end());
-#endif
- }
-
- return buffer;
- }
-
-
- template <int dim, typename T>
- void
- CellDataTransferBuffer<dim,T>::unpack_data (const std::vector<char> &buffer)
- {
- std::string decompressed_buffer;
-
- // first decompress the buffer
- {
-#ifdef DEAL_II_WITH_ZLIB
- boost::iostreams::filtering_ostream decompressing_stream;
- decompressing_stream.push(boost::iostreams::gzip_decompressor());
- decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer));
- decompressing_stream.write (buffer.data(), buffer.size());
-#else
- decompressed_buffer.assign (buffer.begin(), buffer.end());
-#endif
- }
-
- // then restore the object from the buffer
- std::istringstream in(decompressed_buffer);
- boost::archive::binary_iarchive archive(in);
-
- archive >> *this;
- }
-
-
-
- template <typename DataType, typename MeshType>
- void
- exchange_cell_data_to_ghosts (const MeshType &mesh,
- const std::function<boost::optional<DataType> (const typename MeshType::active_cell_iterator &)> &pack,
- const std::function<void (const typename MeshType::active_cell_iterator &, const DataType &)> &unpack)
- {
-#ifndef DEAL_II_WITH_MPI
- (void)mesh;
- (void)pack;
- (void)unpack;
- Assert(false, ExcMessage("GridTools::exchange_cell_data_to_ghosts() requires MPI."));
-#else
- constexpr int dim = MeshType::dimension;
- constexpr int spacedim = MeshType::space_dimension;
- auto tria =
- static_cast<const parallel::Triangulation<dim, spacedim>*>(&mesh.get_triangulation());
- Assert (tria != nullptr,
- ExcMessage("The function exchange_cell_data_to_ghosts() only works with parallel triangulations."));
-
- // map neighbor_id -> data_buffer where we accumulate the data to send
- typedef std::map<dealii::types::subdomain_id, CellDataTransferBuffer<dim, DataType> >
- DestinationToBufferMap;
- DestinationToBufferMap destination_to_data_buffer_map;
-
- std::map<unsigned int, std::set<dealii::types::subdomain_id> >
- vertices_with_ghost_neighbors = tria->compute_vertices_with_ghost_neighbors();
-
- for (auto cell : tria->active_cell_iterators())
- if (cell->is_locally_owned())
- {
- std::set<dealii::types::subdomain_id> send_to;
- for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
- {
- const std::map<unsigned int, std::set<dealii::types::subdomain_id> >::const_iterator
- neighbor_subdomains_of_vertex
- = vertices_with_ghost_neighbors.find (cell->vertex_index(v));
-
- if (neighbor_subdomains_of_vertex ==
- vertices_with_ghost_neighbors.end())
- continue;
-
- Assert(neighbor_subdomains_of_vertex->second.size()!=0,
- ExcInternalError());
-
- send_to.insert(neighbor_subdomains_of_vertex->second.begin(),
- neighbor_subdomains_of_vertex->second.end());
- }
-
- if (send_to.size() > 0)
- {
- // this cell's data needs to be sent to someone
- typename MeshType::active_cell_iterator
- mesh_it (tria, cell->level(), cell->index(), &mesh);
-
- const boost::optional<DataType> data = pack(mesh_it);
-
- if (data)
- {
- const CellId cellid = cell->id();
-
- for (auto it : send_to)
- {
- const dealii::types::subdomain_id subdomain = it;
-
- // find the data buffer for proc "subdomain" if it exists
- // or create an empty one otherwise
- typename DestinationToBufferMap::iterator p
- = destination_to_data_buffer_map.insert (std::make_pair(subdomain,
- CellDataTransferBuffer<dim, DataType>()))
- .first;
-
- p->second.cell_ids.emplace_back(cellid);
- p->second.data.emplace_back(data.get());
- }
- }
- }
- }
-
-
- // 2. send our messages
- std::set<dealii::types::subdomain_id> ghost_owners = tria->ghost_owners();
- const unsigned int n_ghost_owners = ghost_owners.size();
- std::vector<std::vector<char> > sendbuffers (n_ghost_owners);
- std::vector<MPI_Request> requests (n_ghost_owners);
-
- unsigned int idx=0;
- for (auto it = ghost_owners.begin();
- it!=ghost_owners.end();
- ++it, ++idx)
- {
- CellDataTransferBuffer<dim, DataType> &data = destination_to_data_buffer_map[*it];
-
- // pack all the data into the buffer for this recipient and send it.
- // keep data around till we can make sure that the packet has been
- // received
- sendbuffers[idx] = data.pack_data ();
- const int ierr = MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(),
- MPI_BYTE, *it,
- 786, tria->get_communicator(), &requests[idx]);
- AssertThrowMPI(ierr);
- }
-
- // 3. receive messages
- std::vector<char> receive;
- for (unsigned int idx=0; idx<n_ghost_owners; ++idx)
- {
- MPI_Status status;
- int len;
- int ierr = MPI_Probe(MPI_ANY_SOURCE, 786, tria->get_communicator(), &status);
- AssertThrowMPI(ierr);
- ierr = MPI_Get_count(&status, MPI_BYTE, &len);
- AssertThrowMPI(ierr);
-
- receive.resize(len);
-
- char *ptr = receive.data();
- ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
- tria->get_communicator(), &status);
- AssertThrowMPI(ierr);
-
- CellDataTransferBuffer<dim, DataType> cellinfo;
- cellinfo.unpack_data(receive);
-
- DataType *data = cellinfo.data.data();
- for (unsigned int c=0; c<cellinfo.cell_ids.size(); ++c, ++data)
- {
- const typename Triangulation<dim,spacedim>::cell_iterator
- tria_cell = cellinfo.cell_ids[c].to_cell(*tria);
-
- const typename MeshType::active_cell_iterator
- cell (tria, tria_cell->level(), tria_cell->index(), &mesh);
-
- unpack(cell, *data);
- }
- }
-
- // make sure that all communication is finished
- // when we leave this function.
- if (requests.size())
- {
- const int ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
- AssertThrowMPI(ierr);
- }
-#endif // DEAL_II_WITH_MPI
- }
-
-}
-
-#endif // DOXYGEN
-
-DEAL_II_NAMESPACE_CLOSE
-
-#endif
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/hp/dof_handler.h>
+DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
+#include <boost/optional.hpp>
+#include <boost/archive/binary_oarchive.hpp>
+#include <boost/archive/binary_iarchive.hpp>
+#include <boost/serialization/vector.hpp>
+#include <boost/serialization/array.hpp>
+
+#ifdef DEAL_II_WITH_ZLIB
+# include <boost/iostreams/stream.hpp>
+# include <boost/iostreams/filtering_stream.hpp>
+# include <boost/iostreams/device/back_inserter.hpp>
+# include <boost/iostreams/filter/gzip.hpp>
+#endif
+
+DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
+
#include <bitset>
#include <list>
#include <set>
/*@}*/
+
+ /**
+ * Exchange arbitrary data of type @p DataType provided by the function
+ * objects from locally owned cells to ghost cells on other processors.
+ *
+ * After this call, you typically will have received data from @p unpack on
+ * every ghost cell as it was given by @p pack on the owning processor.
+ * Whether you do or do not receive information to @p unpack on a given
+ * ghost cell depends on whether the @p pack function decided that
+ * something needs to be sent. It does so using the boost::optional
+ * mechanism: if the boost::optional return object of the @p pack
+ * function is empty, then this implies that no data has to be sent for
+ * the locally owned cell it was called on. In that case, @p unpack will
+ * also not be called on the ghost cell that corresponds to it on the
+ * receiving side. On the other hand, if the boost::optional object is
+ * not empty, then the data stored within it will be sent to the received
+ * and the @p unpack function called with it.
+ *
+ * @tparam DataType The type of the data to be communicated. It is assumed
+ * to be serializable by boost::serialization. In many cases, this
+ * data type can not be deduced by the compiler, e.g., if you provide
+ * lambda functions for the second and third argument
+ * to this function. In this case, you have to explicitly specify
+ * the @p DataType as a template argument to the function call.
+ * @tparam MeshType The type of @p mesh.
+ *
+ * @param mesh A variable of a type that satisfies the requirements of the
+ * @ref ConceptMeshType "MeshType concept".
+ * @param pack The function that will be called on each locally owned cell
+ * that is a ghost cell somewhere else. As mentioned above, the function
+ * may return a regular data object of type @p DataType to indicate
+ * that data should be sent, or an empty
+ * <code>boost::optional@<DataType@></code> to indicate that nothing has
+ * to be sent for this cell.
+ * @param unpack The function that will be called for each ghost cell
+ * for which data was sent, i.e., for which the @p pack function
+ * on the sending side returned a non-empty boost::optional object.
+ * The @p unpack function is then called with the data sent by the
+ * processor that owns that cell.
+ *
+ *
+ * <h4> An example </h4>
+ *
+ * Here is an example that shows how this function is to be used
+ * in a concrete context. It is taken from the code that makes
+ * sure that the @p active_fe_index (a single unsigned integer) is
+ * transported from locally owned cells where one can set it in
+ * hp::DoFHandler objects, to the corresponding ghost cells on
+ * other processors to ensure that one can query the right value
+ * also on those processors:
+ * @code
+ * auto pack
+ * = [] (const typename dealii::hp::DoFHandler<dim,spacedim>::active_cell_iterator &cell) -> unsigned int
+ * {
+ * return cell->active_fe_index();
+ * };
+ *
+ * auto unpack
+ * = [] (const typename dealii::hp::DoFHandler<dim,spacedim>::active_cell_iterator &cell,
+ * const unsigned int &active_fe_index) -> void
+ * {
+ * cell->set_active_fe_index(active_fe_index);
+ * };
+ *
+ * GridTools::exchange_cell_data_to_ghosts<unsigned int, dealii::hp::DoFHandler<dim,spacedim>>
+ * (dof_handler, pack, unpack);
+ * @endcode
+ *
+ * You will notice that the @p pack lambda function returns an `unsigned int`,
+ * not a `boost::optional<unsigned int>`. The former converts automatically
+ * to the latter, implying that data will always be transported to the
+ * other processor.
+ *
+ * (In reality, the @p unpack function needs to be a bit more
+ * complicated because it is not allowed to call
+ * DoFAccessor::set_active_fe_index() on ghost cells. Rather, the
+ * @p unpack function directly accesses internal data structures. But
+ * you get the idea -- the code could, just as well, have exchanged
+ * material ids, user indices, boundary indictors, or any kind of other
+ * data with similar calls as the ones above.)
+ */
+ template <typename DataType, typename MeshType>
+ void
+ exchange_cell_data_to_ghosts (const MeshType &mesh,
+ const std::function<boost::optional<DataType> (const typename MeshType::active_cell_iterator &)> &pack,
+ const std::function<void (const typename MeshType::active_cell_iterator &, const DataType &)> &unpack);
+
+ /* Exchange with all processors of the MPI communicator @p mpi_communicator the vector of bounding
+ * boxes @p local_bboxes.
+ *
+ * This function is meant to exchange bounding boxes describing the locally owned
+ * cells in a distributed triangulation obtained with the function
+ * GridTools::compute_mesh_predicate_bounding_box .
+ *
+ * The output vector's size is the number of processes of the MPI communicator:
+ * its i-th entry contains the vector @p local_bboxes of the i-th process.
+ */
+ template<int spacedim>
+ std::vector< std::vector< BoundingBox<spacedim> > >
+ exchange_local_bounding_boxes(const std::vector< BoundingBox<spacedim> > &local_bboxes,
+ MPI_Comm mpi_communicator);
+
+ /**
+ * A structure that allows the transfer of cell data of type @p T from one processor
+ * to another. It corresponds to a packed buffer that stores a vector of
+ * CellId and a vector of type @p T.
+ *
+ * This class facilitates the transfer by providing the save/load functions
+ * that are able to pack up the vector of CellId's and the associated
+ * data of type @p T into a stream.
+ *
+ * Type @p T is assumed to be serializable by <code>boost::serialization</code> (for
+ * example <code>unsigned int</code> or <code>std::vector@<double@></code>).
+ */
+ template <int dim, typename T>
+ struct CellDataTransferBuffer
+ {
+ /**
+ * A vector to store IDs of cells to be transfered.
+ */
+ std::vector<CellId> cell_ids;
+
+ /**
+ * A vector of cell data to be transfered.
+ */
+ std::vector<T> data;
+
+ /**
+ * Write the data of this object to a stream for the purpose of
+ * serialization.
+ *
+ * @pre The user is responsible to keep the size of @p data
+ * equal to the size as @p cell_ids .
+ */
+ template <class Archive>
+ void save (Archive &ar,
+ const unsigned int version) const;
+
+ /**
+ * Read the data of this object from a stream for the purpose of
+ * serialization. Throw away the previous content.
+ */
+ template <class Archive>
+ void load (Archive &ar,
+ const unsigned int version);
+
+ BOOST_SERIALIZATION_SPLIT_MEMBER()
+
+ /**
+ * Pack the data that corresponds to this object into a buffer in
+ * the form of a vector of chars and return it.
+ */
+ std::vector<char> pack_data () const;
+
+ /**
+ * Given a buffer in the form of an array of chars, unpack it and
+ * restore the current object to the state that it was when
+ * it was packed into said buffer by the pack_data() function.
+ */
+ void unpack_data (const std::vector<char> &buffer);
+
+ };
+
/**
* @name Exceptions
*/
/*@{*/
-
/**
* Exception
*/
return projected_point;
}
+
+
+
+ template <int dim, typename T>
+ template <class Archive>
+ void
+ CellDataTransferBuffer<dim,T>::save (Archive &ar,
+ const unsigned int /*version*/) const
+ {
+ Assert(cell_ids.size() == data.size(),
+ ExcDimensionMismatch(cell_ids.size(), data.size()));
+ // archive the cellids in an efficient binary format
+ const size_t n_cells = cell_ids.size();
+ ar &n_cells;
+ for (auto &it : cell_ids)
+ {
+ CellId::binary_type binary_cell_id = it.template to_binary<dim>();
+ ar &binary_cell_id;
+ }
+
+ ar &data;
+ }
+
+
+
+ template <int dim, typename T>
+ template <class Archive>
+ void
+ CellDataTransferBuffer<dim,T>::load (Archive &ar,
+ const unsigned int /*version*/)
+ {
+ size_t n_cells;
+ ar &n_cells;
+ cell_ids.clear();
+ cell_ids.reserve(n_cells);
+ for (unsigned int c=0; c<n_cells; ++c)
+ {
+ CellId::binary_type value;
+ ar &value;
+ cell_ids.emplace_back(std::move(value));
+ }
+ ar &data;
+ }
+
+
+
+ template <int dim, typename T>
+ std::vector<char>
+ CellDataTransferBuffer<dim,T>::pack_data () const
+ {
+ // set up a buffer and then use it as the target of a compressing
+ // stream into which we serialize the current object
+ std::vector<char> buffer;
+ {
+#ifdef DEAL_II_WITH_ZLIB
+ boost::iostreams::filtering_ostream out;
+ out.push(boost::iostreams::gzip_compressor
+ (boost::iostreams::gzip_params
+ (boost::iostreams::gzip::best_compression)));
+ out.push(boost::iostreams::back_inserter(buffer));
+
+ boost::archive::binary_oarchive archive(out);
+ archive << *this;
+ out.flush();
+#else
+ std::ostringstream out;
+ boost::archive::binary_oarchive archive(out);
+ archive << *this;
+ const std::string &s = out.str();
+ buffer.reserve(s.size());
+ buffer.assign(s.begin(), s.end());
+#endif
+ }
+
+ return buffer;
+ }
+
+
+
+ template <int dim, typename T>
+ void
+ CellDataTransferBuffer<dim,T>::unpack_data (const std::vector<char> &buffer)
+ {
+ std::string decompressed_buffer;
+
+ // first decompress the buffer
+ {
+#ifdef DEAL_II_WITH_ZLIB
+ boost::iostreams::filtering_ostream decompressing_stream;
+ decompressing_stream.push(boost::iostreams::gzip_decompressor());
+ decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer));
+ decompressing_stream.write (buffer.data(), buffer.size());
+#else
+ decompressed_buffer.assign (buffer.begin(), buffer.end());
+#endif
+ }
+
+ // then restore the object from the buffer
+ std::istringstream in(decompressed_buffer);
+ boost::archive::binary_iarchive archive(in);
+
+ archive >> *this;
+ }
+
+
+
+ template <typename DataType, typename MeshType>
+ void
+ exchange_cell_data_to_ghosts (const MeshType &mesh,
+ const std::function<boost::optional<DataType> (const typename MeshType::active_cell_iterator &)> &pack,
+ const std::function<void (const typename MeshType::active_cell_iterator &, const DataType &)> &unpack)
+ {
+#ifndef DEAL_II_WITH_MPI
+ (void)mesh;
+ (void)pack;
+ (void)unpack;
+ Assert(false, ExcMessage("GridTools::exchange_cell_data_to_ghosts() requires MPI."));
+#else
+ constexpr int dim = MeshType::dimension;
+ constexpr int spacedim = MeshType::space_dimension;
+ auto tria =
+ static_cast<const parallel::Triangulation<dim, spacedim>*>(&mesh.get_triangulation());
+ Assert (tria != nullptr,
+ ExcMessage("The function exchange_cell_data_to_ghosts() only works with parallel triangulations."));
+
+ // map neighbor_id -> data_buffer where we accumulate the data to send
+ typedef std::map<dealii::types::subdomain_id, CellDataTransferBuffer<dim, DataType> >
+ DestinationToBufferMap;
+ DestinationToBufferMap destination_to_data_buffer_map;
+
+ std::map<unsigned int, std::set<dealii::types::subdomain_id> >
+ vertices_with_ghost_neighbors = tria->compute_vertices_with_ghost_neighbors();
+
+ for (auto cell : tria->active_cell_iterators())
+ if (cell->is_locally_owned())
+ {
+ std::set<dealii::types::subdomain_id> send_to;
+ for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
+ {
+ const std::map<unsigned int, std::set<dealii::types::subdomain_id> >::const_iterator
+ neighbor_subdomains_of_vertex
+ = vertices_with_ghost_neighbors.find (cell->vertex_index(v));
+
+ if (neighbor_subdomains_of_vertex ==
+ vertices_with_ghost_neighbors.end())
+ continue;
+
+ Assert(neighbor_subdomains_of_vertex->second.size()!=0,
+ ExcInternalError());
+
+ send_to.insert(neighbor_subdomains_of_vertex->second.begin(),
+ neighbor_subdomains_of_vertex->second.end());
+ }
+
+ if (send_to.size() > 0)
+ {
+ // this cell's data needs to be sent to someone
+ typename MeshType::active_cell_iterator
+ mesh_it (tria, cell->level(), cell->index(), &mesh);
+
+ const boost::optional<DataType> data = pack(mesh_it);
+
+ if (data)
+ {
+ const CellId cellid = cell->id();
+
+ for (auto it : send_to)
+ {
+ const dealii::types::subdomain_id subdomain = it;
+
+ // find the data buffer for proc "subdomain" if it exists
+ // or create an empty one otherwise
+ typename DestinationToBufferMap::iterator p
+ = destination_to_data_buffer_map.insert (std::make_pair(subdomain,
+ CellDataTransferBuffer<dim, DataType>()))
+ .first;
+
+ p->second.cell_ids.emplace_back(cellid);
+ p->second.data.emplace_back(data.get());
+ }
+ }
+ }
+ }
+
+
+ // 2. send our messages
+ std::set<dealii::types::subdomain_id> ghost_owners = tria->ghost_owners();
+ const unsigned int n_ghost_owners = ghost_owners.size();
+ std::vector<std::vector<char> > sendbuffers (n_ghost_owners);
+ std::vector<MPI_Request> requests (n_ghost_owners);
+
+ unsigned int idx=0;
+ for (auto it = ghost_owners.begin();
+ it!=ghost_owners.end();
+ ++it, ++idx)
+ {
+ CellDataTransferBuffer<dim, DataType> &data = destination_to_data_buffer_map[*it];
+
+ // pack all the data into the buffer for this recipient and send it.
+ // keep data around till we can make sure that the packet has been
+ // received
+ sendbuffers[idx] = data.pack_data ();
+ const int ierr = MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(),
+ MPI_BYTE, *it,
+ 786, tria->get_communicator(), &requests[idx]);
+ AssertThrowMPI(ierr);
+ }
+
+ // 3. receive messages
+ std::vector<char> receive;
+ for (unsigned int idx=0; idx<n_ghost_owners; ++idx)
+ {
+ MPI_Status status;
+ int len;
+ int ierr = MPI_Probe(MPI_ANY_SOURCE, 786, tria->get_communicator(), &status);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+ AssertThrowMPI(ierr);
+
+ receive.resize(len);
+
+ char *ptr = receive.data();
+ ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+ tria->get_communicator(), &status);
+ AssertThrowMPI(ierr);
+
+ CellDataTransferBuffer<dim, DataType> cellinfo;
+ cellinfo.unpack_data(receive);
+
+ DataType *data = cellinfo.data.data();
+ for (unsigned int c=0; c<cellinfo.cell_ids.size(); ++c, ++data)
+ {
+ const typename Triangulation<dim,spacedim>::cell_iterator
+ tria_cell = cellinfo.cell_ids[c].to_cell(*tria);
+
+ const typename MeshType::active_cell_iterator
+ cell (tria, tria_cell->level(), tria_cell->index(), &mesh);
+
+ unpack(cell, *data);
+ }
+ }
+
+ // make sure that all communication is finished
+ // when we leave this function.
+ if (requests.size())
+ {
+ const int ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ }
+#endif // DEAL_II_WITH_MPI
+ }
}
#endif
INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_BINARY_DIR})
SET(_unity_include_src
- grid_tools.cc
grid_refinement.cc
solution_transfer.cc
tria.cc
)
SET(_inst
- grid_tools.inst.in
grid_refinement.inst.in
solution_transfer.inst.in
tria.inst.in
+++ /dev/null
-// ---------------------------------------------------------------------
-//
-// Copyright (C) 2017 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// The deal.II library is free software; you can use it, redistribute
-// it, and/or modify it under the terms of the GNU Lesser General
-// Public License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-// The full text of the license can be found in the file LICENSE at
-// the top level of the deal.II distribution.
-//
-// ---------------------------------------------------------------------
-
-#include <deal.II/base/point.h>
-#include <deal.II/base/bounding_box.h>
-#include <deal.II/base/mpi.h>
-#include <deal.II/distributed/grid_tools.h>
-
-DEAL_II_NAMESPACE_OPEN
-
-#ifdef DEAL_II_WITH_MPI
-
-namespace GridTools
-{
- template<int spacedim>
- std::vector< std::vector< BoundingBox<spacedim> > >
- exchange_local_bounding_boxes(const std::vector< BoundingBox<spacedim> > &local_bboxes,
- MPI_Comm mpi_communicator)
- {
-#ifndef DEAL_II_WITH_MPI
- (void)local_bboxes;
- (void)mpi_communicator;
- Assert(false, ExcMessage("parallel::GridTools::exchange_local_bounding_boxes() requires MPI."));
-#else
- // Step 1: preparing data to be sent
- unsigned int n_bboxes = local_bboxes.size();
- // Dimension of the array to be exchanged (number of double)
- int n_local_data = 2*spacedim*n_bboxes;
- // data array stores each entry of each point describing the bounding boxes
- std::vector<double> loc_data_array(n_local_data);
- for (unsigned int i=0; i<n_bboxes; ++i)
- for (unsigned int d=0; d < spacedim; ++d)
- {
- // Extracting the coordinates of each boundary point
- loc_data_array[2*i*spacedim + d] = local_bboxes[i].get_boundary_points().first[d];
- loc_data_array[2*i*spacedim + spacedim + d] = local_bboxes[i].get_boundary_points().second[d];
- }
-
- // Step 2: exchanging the size of local data
- unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator);
-
- // Vector to store the size of loc_data_array for every process
- std::vector<int> size_all_data(n_procs);
-
- // Exchanging the number of bboxes
- MPI_Allgather(&n_local_data, 1, MPI_INT,
- &(size_all_data[0]), 1, MPI_INT,
- mpi_communicator);
-
- // Now computing the the displacement, relative to recvbuf,
- // at which to store the incoming data
- std::vector<int> rdispls(n_procs);
- rdispls[0] = 0;
- for (unsigned int i=1; i < n_procs; ++i)
- rdispls[i] = rdispls[i-1] + size_all_data[i-1];
-
- // Step 3: exchange the data and bounding boxes:
- // Allocating a vector to contain all the received data
- std::vector<double> data_array(rdispls.back() + size_all_data.back());
-
- MPI_Allgatherv(&(loc_data_array[0]), n_local_data, MPI_DOUBLE,
- &(data_array[0]), &(size_all_data[0]),
- &(rdispls[0]), MPI_DOUBLE, mpi_communicator);
-
- // Step 4: create the array of bboxes for output
- std::vector< std::vector< BoundingBox<spacedim> > > global_bboxes(n_procs);
- unsigned int begin_idx = 0;
- for (unsigned int i=0; i < n_procs; ++i)
- {
- // Number of local bounding boxes
- unsigned int n_bbox_i = size_all_data[i]/(spacedim*2);
- global_bboxes[i].resize(n_bbox_i);
- for (unsigned int bbox=0; bbox<n_bbox_i; ++bbox)
- {
- Point<spacedim> p1,p2; // boundary points for bbox
- for (unsigned int d=0; d<spacedim; ++d)
- {
- p1[d] = data_array[ begin_idx + 2*bbox*spacedim + d];
- p2[d] = data_array[ begin_idx + 2*bbox*spacedim + spacedim + d];
- }
- BoundingBox<spacedim> loc_bbox(std::make_pair(p1,p2));
- global_bboxes[i][bbox] = loc_bbox;
- }
- // Shifting the first index to the start of the next vector
- begin_idx += size_all_data[i];
- }
- return global_bboxes;
-#endif // DEAL_II_WITH_MPI
- }
-}
-
-// explicit instantiations
-#include "grid_tools.inst"
-
-#endif // DEAL_II_WITH_MPI
-DEAL_II_NAMESPACE_CLOSE
+++ /dev/null
-// ---------------------------------------------------------------------
-//
-// Copyright (C) 2017 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// The deal.II library is free software; you can use it, redistribute
-// it, and/or modify it under the terms of the GNU Lesser General
-// Public License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-// The full text of the license can be found in the file LICENSE at
-// the top level of the deal.II distribution.
-//
-// ---------------------------------------------------------------------
-
-for (deal_II_space_dimension : SPACE_DIMENSIONS)
-{
- namespace GridTools \{
- template
- std::vector< std::vector< BoundingBox<deal_II_space_dimension> > >
- exchange_local_bounding_boxes(const std::vector< BoundingBox<deal_II_space_dimension> >&,
- MPI_Comm);
- \}
-}
#include <deal.II/base/memory_consumption.h>
#include <deal.II/base/thread_management.h>
#include <deal.II/base/partitioner.h>
+#include <deal.II/grid/grid_tools.h>
#include <deal.II/grid/tria.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/dofs/dof_handler.h>
#include <deal.II/fe/fe.h>
#include <deal.II/distributed/shared_tria.h>
#include <deal.II/distributed/tria.h>
-#include <deal.II/distributed/grid_tools.h>
DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
#include <boost/archive/binary_oarchive.hpp>
cell_hint,
marked_vertices);
}
+
+ template<int spacedim>
+ std::vector< std::vector< BoundingBox<spacedim> > >
+ exchange_local_bounding_boxes(const std::vector< BoundingBox<spacedim> > &local_bboxes,
+ MPI_Comm mpi_communicator)
+ {
+#ifndef DEAL_II_WITH_MPI
+ (void)local_bboxes;
+ (void)mpi_communicator;
+ Assert(false, ExcMessage("GridTools::exchange_local_bounding_boxes() requires MPI."));
+#else
+ // Step 1: preparing data to be sent
+ unsigned int n_bboxes = local_bboxes.size();
+ // Dimension of the array to be exchanged (number of double)
+ int n_local_data = 2*spacedim*n_bboxes;
+ // data array stores each entry of each point describing the bounding boxes
+ std::vector<double> loc_data_array(n_local_data);
+ for (unsigned int i=0; i<n_bboxes; ++i)
+ for (unsigned int d=0; d < spacedim; ++d)
+ {
+ // Extracting the coordinates of each boundary point
+ loc_data_array[2*i*spacedim + d] = local_bboxes[i].get_boundary_points().first[d];
+ loc_data_array[2*i*spacedim + spacedim + d] = local_bboxes[i].get_boundary_points().second[d];
+ }
+
+ // Step 2: exchanging the size of local data
+ unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator);
+
+ // Vector to store the size of loc_data_array for every process
+ std::vector<int> size_all_data(n_procs);
+
+ // Exchanging the number of bboxes
+ MPI_Allgather(&n_local_data, 1, MPI_INT,
+ &(size_all_data[0]), 1, MPI_INT,
+ mpi_communicator);
+
+ // Now computing the the displacement, relative to recvbuf,
+ // at which to store the incoming data
+ std::vector<int> rdispls(n_procs);
+ rdispls[0] = 0;
+ for (unsigned int i=1; i < n_procs; ++i)
+ rdispls[i] = rdispls[i-1] + size_all_data[i-1];
+
+ // Step 3: exchange the data and bounding boxes:
+ // Allocating a vector to contain all the received data
+ std::vector<double> data_array(rdispls.back() + size_all_data.back());
+
+ MPI_Allgatherv(&(loc_data_array[0]), n_local_data, MPI_DOUBLE,
+ &(data_array[0]), &(size_all_data[0]),
+ &(rdispls[0]), MPI_DOUBLE, mpi_communicator);
+
+ // Step 4: create the array of bboxes for output
+ std::vector< std::vector< BoundingBox<spacedim> > > global_bboxes(n_procs);
+ unsigned int begin_idx = 0;
+ for (unsigned int i=0; i < n_procs; ++i)
+ {
+ // Number of local bounding boxes
+ unsigned int n_bbox_i = size_all_data[i]/(spacedim*2);
+ global_bboxes[i].resize(n_bbox_i);
+ for (unsigned int bbox=0; bbox<n_bbox_i; ++bbox)
+ {
+ Point<spacedim> p1,p2; // boundary points for bbox
+ for (unsigned int d=0; d<spacedim; ++d)
+ {
+ p1[d] = data_array[ begin_idx + 2*bbox*spacedim + d];
+ p2[d] = data_array[ begin_idx + 2*bbox*spacedim + spacedim + d];
+ }
+ BoundingBox<spacedim> loc_bbox(std::make_pair(p1,p2));
+ global_bboxes[i][bbox] = loc_bbox;
+ }
+ // Shifting the first index to the start of the next vector
+ begin_idx += size_all_data[i];
+ }
+ return global_bboxes;
+#endif // DEAL_II_WITH_MPI
+ }
+
} /* namespace GridTools */
template unsigned int
GridTools::find_closest_vertex(const std::map<unsigned int,Point<deal_II_space_dimension> >& vertices,
const Point<deal_II_space_dimension>& p);
+
+ template std::vector< std::vector< BoundingBox<deal_II_space_dimension> > >
+ GridTools::exchange_local_bounding_boxes(const std::vector< BoundingBox<deal_II_space_dimension> >&,
+ MPI_Comm);
}
#include <deal.II/hp/dof_faces.h>
#include <deal.II/dofs/dof_accessor.h>
#include <deal.II/dofs/dof_handler_policy.h>
+#include <deal.II/grid/grid_tools.h>
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/grid/tria_levels.h>
#include <deal.II/fe/fe.h>
#include <deal.II/distributed/shared_tria.h>
#include <deal.II/distributed/tria.h>
-#include <deal.II/distributed/grid_tools.h>
#include <boost/serialization/array.hpp>
#include <deal.II/base/point.h>
#include <deal.II/base/bounding_box.h>
#include <deal.II/base/mpi.h>
-#include <deal.II/distributed/grid_tools.h>
+#include <deal.II/grid/grid_tools.h>
template <int spacedim>
void test_exchange_bbox()
#include "../tests.h"
#include <deal.II/base/logstream.h>
#include <deal.II/distributed/tria.h>
-#include <deal.II/distributed/grid_tools.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/grid_tools.h>
#include <deal.II/grid/grid_out.h>
#include "../tests.h"
#include <deal.II/base/logstream.h>
#include <deal.II/distributed/tria.h>
-#include <deal.II/distributed/grid_tools.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/grid_tools.h>
#include <deal.II/grid/grid_out.h>
#include <deal.II/base/logstream.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/distributed/shared_tria.h>
-#include <deal.II/distributed/grid_tools.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/grid_tools.h>
#include <deal.II/grid/grid_out.h>
#include "../tests.h"
#include <deal.II/base/logstream.h>
#include <deal.II/distributed/tria.h>
-#include <deal.II/distributed/grid_tools.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/grid_tools.h>
#include <deal.II/grid/grid_out.h>