From 1c827777d28344e1653c237abdc9841c3d747298 Mon Sep 17 00:00:00 2001 From: Timo Heister Date: Sat, 12 Aug 2017 10:59:58 -0600 Subject: [PATCH] introduce GridTools::exchange_cell_data_to_ghosts --- include/deal.II/distributed/grid_tools.h | 333 ++++++++++++++++++ include/deal.II/distributed/tria.h | 2 +- include/deal.II/distributed/tria_base.h | 8 + source/distributed/tria_base.cc | 33 ++ .../grid_tools_exchange_cell_data_01.cc | 86 +++++ ...ll_data_01.with_p4est=true.mpirun=2.output | 87 +++++ ...ll_data_01.with_p4est=true.mpirun=3.output | 159 +++++++++ .../grid_tools_exchange_cell_data_02.cc | 93 +++++ ...ll_data_02.with_p4est=true.mpirun=2.output | 87 +++++ ...ll_data_02.with_p4est=true.mpirun=3.output | 159 +++++++++ 10 files changed, 1046 insertions(+), 1 deletion(-) create mode 100644 include/deal.II/distributed/grid_tools.h create mode 100644 tests/distributed_grids/grid_tools_exchange_cell_data_01.cc create mode 100644 tests/distributed_grids/grid_tools_exchange_cell_data_01.with_p4est=true.mpirun=2.output create mode 100644 tests/distributed_grids/grid_tools_exchange_cell_data_01.with_p4est=true.mpirun=3.output create mode 100644 tests/distributed_grids/grid_tools_exchange_cell_data_02.cc create mode 100644 tests/distributed_grids/grid_tools_exchange_cell_data_02.with_p4est=true.mpirun=2.output create mode 100644 tests/distributed_grids/grid_tools_exchange_cell_data_02.with_p4est=true.mpirun=3.output diff --git a/include/deal.II/distributed/grid_tools.h b/include/deal.II/distributed/grid_tools.h new file mode 100644 index 0000000000..41950701ee --- /dev/null +++ b/include/deal.II/distributed/grid_tools.h @@ -0,0 +1,333 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#ifndef dealii__distributed_grid_tools_h +#define dealii__distributed_grid_tools_h + + +#include +#include +#include +#include + +DEAL_II_DISABLE_EXTRA_DIAGNOSTICS +#include +#include +#include +#include +#include +#include +DEAL_II_ENABLE_EXTRA_DIAGNOSTICS + +#include + +DEAL_II_NAMESPACE_OPEN + +namespace parallel +{ + + namespace GridTools + { + /** + * Exchange arbitrary data of type @p DataType provided by the function + * objects from locally owned cells to ghost cells on other processors. + * + * After this call, you will have received data from @p unpack on every + * ghost cell as it was given by @p pack on the owning processor. + * + * @tparam DataType The type of the data to be communicated. It is assumed + * to be serializable by boost::serialization. + * @tparam MeshType The type of @p mesh. + * + * @param mesh A variable of a type that satisfies the requirements of the + * @ref ConceptMeshType "MeshType concept". + * @param pack The function that will be called on each locally owned cell + * that needs to be sent. + * @param unpack The function that will be called for each ghost cell + * with the data imported from the other procs. + + */ + template + void + exchange_cell_data_to_ghosts (const MeshType &mesh, + std::function pack, + std::function unpack); + } + + +} + +#ifndef DOXYGEN + +namespace parallel +{ + namespace GridTools + { + + namespace internal + { + /** + * A structure that allows the transfer of data of type T from one processor + * to another. It corresponds to a packed buffer that stores a list of + * cells and an array of type T. + * + * The vector @p data is the same size as @p cell_ids. + */ + template + struct CellDataTransferBuffer + { + std::vector cell_ids; + std::vector data; + + /** + * Write the data of this object to a stream for the purpose of + * serialization. + */ + template + void save (Archive &ar, + const unsigned int /*version*/) const + { + Assert(cell_ids.size() == data.size(), ExcInternalError()); + // archive the cellids in an efficient binary format + ar &cell_ids.size(); + for (auto &it : cell_ids) + { + CellId::binary_type binary_cell_id = it.template to_binary(); + ar &binary_cell_id; + } + + ar &data; + } + + /** + * Read the data of this object from a stream for the purpose of + * serialization. Throw away the previous content. + */ + template + void load (Archive &ar, + const unsigned int /*version*/) + { + size_t n_cells; + ar &n_cells; + cell_ids.clear(); + cell_ids.reserve(n_cells); + for (unsigned int c=0; c + pack_data () const + { + // set up a buffer and then use it as the target of a compressing + // stream into which we serialize the current object + std::vector buffer; + { + boost::iostreams::filtering_ostream out; + out.push(boost::iostreams::gzip_compressor + (boost::iostreams::gzip_params + (boost::iostreams::gzip::best_compression))); + out.push(boost::iostreams::back_inserter(buffer)); + + boost::archive::binary_oarchive archive(out); + + archive << *this; + out.flush(); + } + + return buffer; + } + + + /** + * Given a buffer in the form of an array of chars, unpack it and + * restore the current object to the state that it was when + * it was packed into said buffer by the pack_data() function. + */ + void unpack_data (const std::vector &buffer) + { + std::string decompressed_buffer; + + // first decompress the buffer + { + boost::iostreams::filtering_ostream decompressing_stream; + decompressing_stream.push(boost::iostreams::gzip_decompressor()); + decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer)); + + decompressing_stream.write (&buffer[0], buffer.size()); + } + + // then restore the object from the buffer + std::istringstream in(decompressed_buffer); + boost::archive::binary_iarchive archive(in); + + archive >> *this; + } + }; + + } + + + template + void + exchange_cell_data_to_ghosts (const MeshType &mesh, + std::function pack, + std::function unpack) + { + constexpr int dim = MeshType::dimension; + constexpr int spacedim = MeshType::space_dimension; + auto tria = + static_cast*>(&mesh.get_triangulation()); + Assert(tria != NULL, ExcMessage("The function exchange_cell_data_to_ghosts only works with parallel Triangulations.")); + + // map neighbor_id -> data_buffer where we accumulate the data to send + typedef std::map > + cellmap_t; + cellmap_t destination_to_data_buffer; + + std::map > + vertices_with_ghost_neighbors = tria->compute_vertices_with_ghost_neighbors(); + + for (auto cell : tria->active_cell_iterators()) + if (cell->is_locally_owned()) + { + std::set send_to; + for (unsigned int v=0; v::vertices_per_cell; ++v) + { + const std::map >::const_iterator + neighbor_subdomains_of_vertex + = vertices_with_ghost_neighbors.find (cell->vertex_index(v)); + + if (neighbor_subdomains_of_vertex == + vertices_with_ghost_neighbors.end()) + continue; + + Assert(neighbor_subdomains_of_vertex->second.size()!=0, + ExcInternalError()); + + send_to.insert(neighbor_subdomains_of_vertex->second.begin(), + neighbor_subdomains_of_vertex->second.end()); + } + + if (send_to.size() > 0) + { + // this cell's data needs to be sent to someone + typename MeshType::active_cell_iterator + mesh_it (tria, cell->level(), cell->index(), &mesh); + + const DataType data = pack(mesh_it); + const CellId cellid = cell->id(); + + for (auto it : send_to) + { + const dealii::types::subdomain_id subdomain = it; + + // find the data buffer for proc "subdomain" if it exists + // or create an empty one otherwise + typename cellmap_t::iterator p + = destination_to_data_buffer.insert (std::make_pair(subdomain, + internal::CellDataTransferBuffer())) + .first; + + p->second.cell_ids.emplace_back(cellid); + p->second.data.emplace_back(data); + } + } + } + + + // 2. send our messages + std::set ghost_owners = tria->ghost_owners(); + const unsigned int n_ghost_owners = ghost_owners.size(); + std::vector > sendbuffers (n_ghost_owners); + std::vector requests (n_ghost_owners); + + unsigned int idx=0; + for (auto it = ghost_owners.begin(); + it!=ghost_owners.end(); + ++it, ++idx) + { + internal::CellDataTransferBuffer &data = destination_to_data_buffer[*it]; + + // pack all the data into + // the buffer for this + // recipient and send + // it. keep data around + // till we can make sure + // that the packet has been + // received + sendbuffers[idx] = data.pack_data (); + const int ierr = MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(), + MPI_BYTE, *it, + 786, tria->get_communicator(), &requests[idx]); + AssertThrowMPI(ierr); + } + + // 3. receive messages + std::vector receive; + for (unsigned int idx=0; idxget_communicator(), &status); + AssertThrowMPI(ierr); + ierr = MPI_Get_count(&status, MPI_BYTE, &len); + AssertThrowMPI(ierr); + + receive.resize(len); + + char *ptr = &receive[0]; + ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, + tria->get_communicator(), &status); + AssertThrowMPI(ierr); + + internal::CellDataTransferBuffer cellinfo; + cellinfo.unpack_data(receive); + Assert (cellinfo.cell_ids.size()>0, ExcInternalError()); + + DataType *data = cellinfo.data.data(); + for (unsigned int c=0; c::cell_iterator + tria_cell = cellinfo.cell_ids[c].to_cell(*tria); + + const typename MeshType::active_cell_iterator + cell (tria, tria_cell->level(), tria_cell->index(), &mesh); + + unpack(cell, *data); + } + } + + } + + } +} + +#endif // DOXYGEN + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/include/deal.II/distributed/tria.h b/include/deal.II/distributed/tria.h index 4b1e00b36e..238a0c6e05 100644 --- a/include/deal.II/distributed/tria.h +++ b/include/deal.II/distributed/tria.h @@ -862,7 +862,7 @@ namespace parallel * subdomains are adjacent to that vertex. Used by * DoFHandler::Policy::ParallelDistributed. */ - std::map > + virtual std::map > compute_vertices_with_ghost_neighbors () const; /** diff --git a/include/deal.II/distributed/tria_base.h b/include/deal.II/distributed/tria_base.h index d693dbbaaa..a4cd583d7e 100644 --- a/include/deal.II/distributed/tria_base.h +++ b/include/deal.II/distributed/tria_base.h @@ -151,6 +151,14 @@ namespace parallel const std::set & level_ghost_owners () const; + /** + * Return a map that, for each vertex, lists all the processors whose + * subdomains are adjacent to that vertex. Used by + * DoFHandler::Policy::ParallelDistributed. + */ + virtual std::map > + compute_vertices_with_ghost_neighbors () const; + protected: /** * MPI communicator to be used for the triangulation. We create a unique diff --git a/source/distributed/tria_base.cc b/source/distributed/tria_base.cc index 98ce2ba420..59a50da15c 100644 --- a/source/distributed/tria_base.cc +++ b/source/distributed/tria_base.cc @@ -232,6 +232,8 @@ namespace parallel return my_subdomain; } + + template const std::set & Triangulation:: @@ -240,6 +242,8 @@ namespace parallel return number_cache.ghost_owners; } + + template const std::set & Triangulation:: @@ -248,6 +252,35 @@ namespace parallel return number_cache.level_ghost_owners; } + + + template + std::map > + Triangulation:: + compute_vertices_with_ghost_neighbors () const + { + std::vector vertex_of_own_cell(this->n_vertices(), false); + + for (auto cell : this->active_cell_iterators()) + if (cell->is_locally_owned()) + for (unsigned int v=0; v::vertices_per_cell; ++v) + vertex_of_own_cell[cell->vertex_index(v)] = true; + + std::map > result; + for (auto cell : this->active_cell_iterators()) + if (cell->is_ghost()) + { + const types::subdomain_id owner = cell->subdomain_id(); + for (unsigned int v=0; v::vertices_per_cell; ++v) + { + if (vertex_of_own_cell[cell->vertex_index(v)]) + result[cell->vertex_index(v)].insert(owner); + } + } + + return result; + } + } // end namespace parallel diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc new file mode 100644 index 0000000000..3951508412 --- /dev/null +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc @@ -0,0 +1,86 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2001 - 2015 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// test GridTools::exchange_cell_data_to_ghosts + +#include "../tests.h" +#include +#include +#include +#include +#include +#include +#include + +#include + +template +void test () +{ + const MPI_Comm &mpi_communicator = MPI_COMM_WORLD; + deallog << "dim = " << dim << std::endl; + + parallel::distributed::Triangulation tria (mpi_communicator); + GridGenerator::hyper_cube(tria); + tria.refine_global(2); + + std::set output; + + typedef typename parallel::distributed::Triangulation::active_cell_iterator cell_iterator; + typedef double DT; + DT counter = 0.0; + parallel::GridTools::exchange_cell_data_to_ghosts< + parallel::distributed::Triangulation, + DT> (tria, + [&](const cell_iterator& cell) + { + DT value = ++counter; + + deallog << "pack " + << cell->id() + << " " + << value << std::endl; + return value; + }, + [&](const cell_iterator& cell, const DT& data) + { + std::ostringstream oss; + oss << "unpack " + << cell->id() + << " " + << data + << " from " + << cell->subdomain_id(); + + output.insert(oss.str()); + }); + + // sort the output because it will come in in random order + for (auto &it : output) + deallog << it << std::endl; +} + + +int main (int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1); + MPILogInitAll log; + + test<2> (); + test<3> (); + + return 0; +} diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_01.with_p4est=true.mpirun=2.output b/tests/distributed_grids/grid_tools_exchange_cell_data_01.with_p4est=true.mpirun=2.output new file mode 100644 index 0000000000..a4f4232cee --- /dev/null +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_01.with_p4est=true.mpirun=2.output @@ -0,0 +1,87 @@ + +DEAL:0::dim = 2 +DEAL:0::pack 0_2:02 1.00000 +DEAL:0::pack 0_2:03 2.00000 +DEAL:0::pack 0_2:12 3.00000 +DEAL:0::pack 0_2:13 4.00000 +DEAL:0::unpack 0_2:20 1.00000 from 1 +DEAL:0::unpack 0_2:21 2.00000 from 1 +DEAL:0::unpack 0_2:30 3.00000 from 1 +DEAL:0::unpack 0_2:31 4.00000 from 1 +DEAL:0::dim = 3 +DEAL:0::pack 0_2:04 1.00000 +DEAL:0::pack 0_2:05 2.00000 +DEAL:0::pack 0_2:06 3.00000 +DEAL:0::pack 0_2:07 4.00000 +DEAL:0::pack 0_2:14 5.00000 +DEAL:0::pack 0_2:15 6.00000 +DEAL:0::pack 0_2:16 7.00000 +DEAL:0::pack 0_2:17 8.00000 +DEAL:0::pack 0_2:24 9.00000 +DEAL:0::pack 0_2:25 10.0000 +DEAL:0::pack 0_2:26 11.0000 +DEAL:0::pack 0_2:27 12.0000 +DEAL:0::pack 0_2:34 13.0000 +DEAL:0::pack 0_2:35 14.0000 +DEAL:0::pack 0_2:36 15.0000 +DEAL:0::pack 0_2:37 16.0000 +DEAL:0::unpack 0_2:40 1.00000 from 1 +DEAL:0::unpack 0_2:41 2.00000 from 1 +DEAL:0::unpack 0_2:42 3.00000 from 1 +DEAL:0::unpack 0_2:43 4.00000 from 1 +DEAL:0::unpack 0_2:50 5.00000 from 1 +DEAL:0::unpack 0_2:51 6.00000 from 1 +DEAL:0::unpack 0_2:52 7.00000 from 1 +DEAL:0::unpack 0_2:53 8.00000 from 1 +DEAL:0::unpack 0_2:60 9.00000 from 1 +DEAL:0::unpack 0_2:61 10.0000 from 1 +DEAL:0::unpack 0_2:62 11.0000 from 1 +DEAL:0::unpack 0_2:63 12.0000 from 1 +DEAL:0::unpack 0_2:70 13.0000 from 1 +DEAL:0::unpack 0_2:71 14.0000 from 1 +DEAL:0::unpack 0_2:72 15.0000 from 1 +DEAL:0::unpack 0_2:73 16.0000 from 1 + +DEAL:1::dim = 2 +DEAL:1::pack 0_2:20 1.00000 +DEAL:1::pack 0_2:21 2.00000 +DEAL:1::pack 0_2:30 3.00000 +DEAL:1::pack 0_2:31 4.00000 +DEAL:1::unpack 0_2:02 1.00000 from 0 +DEAL:1::unpack 0_2:03 2.00000 from 0 +DEAL:1::unpack 0_2:12 3.00000 from 0 +DEAL:1::unpack 0_2:13 4.00000 from 0 +DEAL:1::dim = 3 +DEAL:1::pack 0_2:40 1.00000 +DEAL:1::pack 0_2:41 2.00000 +DEAL:1::pack 0_2:42 3.00000 +DEAL:1::pack 0_2:43 4.00000 +DEAL:1::pack 0_2:50 5.00000 +DEAL:1::pack 0_2:51 6.00000 +DEAL:1::pack 0_2:52 7.00000 +DEAL:1::pack 0_2:53 8.00000 +DEAL:1::pack 0_2:60 9.00000 +DEAL:1::pack 0_2:61 10.0000 +DEAL:1::pack 0_2:62 11.0000 +DEAL:1::pack 0_2:63 12.0000 +DEAL:1::pack 0_2:70 13.0000 +DEAL:1::pack 0_2:71 14.0000 +DEAL:1::pack 0_2:72 15.0000 +DEAL:1::pack 0_2:73 16.0000 +DEAL:1::unpack 0_2:04 1.00000 from 0 +DEAL:1::unpack 0_2:05 2.00000 from 0 +DEAL:1::unpack 0_2:06 3.00000 from 0 +DEAL:1::unpack 0_2:07 4.00000 from 0 +DEAL:1::unpack 0_2:14 5.00000 from 0 +DEAL:1::unpack 0_2:15 6.00000 from 0 +DEAL:1::unpack 0_2:16 7.00000 from 0 +DEAL:1::unpack 0_2:17 8.00000 from 0 +DEAL:1::unpack 0_2:24 9.00000 from 0 +DEAL:1::unpack 0_2:25 10.0000 from 0 +DEAL:1::unpack 0_2:26 11.0000 from 0 +DEAL:1::unpack 0_2:27 12.0000 from 0 +DEAL:1::unpack 0_2:34 13.0000 from 0 +DEAL:1::unpack 0_2:35 14.0000 from 0 +DEAL:1::unpack 0_2:36 15.0000 from 0 +DEAL:1::unpack 0_2:37 16.0000 from 0 + diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_01.with_p4est=true.mpirun=3.output b/tests/distributed_grids/grid_tools_exchange_cell_data_01.with_p4est=true.mpirun=3.output new file mode 100644 index 0000000000..1cf882a6de --- /dev/null +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_01.with_p4est=true.mpirun=3.output @@ -0,0 +1,159 @@ + +DEAL:0::dim = 2 +DEAL:0::pack 0_2:01 1.00000 +DEAL:0::pack 0_2:02 2.00000 +DEAL:0::pack 0_2:03 3.00000 +DEAL:0::unpack 0_2:10 1 from 1 +DEAL:0::unpack 0_2:12 2 from 1 +DEAL:0::unpack 0_2:20 4 from 1 +DEAL:0::unpack 0_2:21 5 from 1 +DEAL:0::unpack 0_2:30 1 from 2 +DEAL:0::dim = 3 +DEAL:0::pack 0_2:03 1.00000 +DEAL:0::pack 0_2:04 2.00000 +DEAL:0::pack 0_2:05 3.00000 +DEAL:0::pack 0_2:06 4.00000 +DEAL:0::pack 0_2:07 5.00000 +DEAL:0::pack 0_2:12 6.00000 +DEAL:0::pack 0_2:13 7.00000 +DEAL:0::pack 0_2:14 8.00000 +DEAL:0::pack 0_2:15 9.00000 +DEAL:0::pack 0_2:16 10.0000 +DEAL:0::pack 0_2:17 11.0000 +DEAL:0::pack 0_2:21 12.0000 +DEAL:0::pack 0_2:23 13.0000 +DEAL:0::pack 0_2:24 14.0000 +DEAL:0::pack 0_2:25 15.0000 +DEAL:0::pack 0_2:26 16.0000 +DEAL:0::pack 0_2:27 17.0000 +DEAL:0::unpack 0_2:30 1 from 1 +DEAL:0::unpack 0_2:31 2 from 1 +DEAL:0::unpack 0_2:32 3 from 1 +DEAL:0::unpack 0_2:34 4 from 1 +DEAL:0::unpack 0_2:35 5 from 1 +DEAL:0::unpack 0_2:36 6 from 1 +DEAL:0::unpack 0_2:40 8 from 1 +DEAL:0::unpack 0_2:41 9 from 1 +DEAL:0::unpack 0_2:42 10 from 1 +DEAL:0::unpack 0_2:43 11 from 1 +DEAL:0::unpack 0_2:50 1 from 2 +DEAL:0::unpack 0_2:51 2 from 2 +DEAL:0::unpack 0_2:52 3 from 2 +DEAL:0::unpack 0_2:53 4 from 2 +DEAL:0::unpack 0_2:60 7 from 2 +DEAL:0::unpack 0_2:61 8 from 2 +DEAL:0::unpack 0_2:62 9 from 2 +DEAL:0::unpack 0_2:63 10 from 2 +DEAL:0::unpack 0_2:70 13 from 2 +DEAL:0::unpack 0_2:71 14 from 2 +DEAL:0::unpack 0_2:72 15 from 2 + +DEAL:1::dim = 2 +DEAL:1::pack 0_2:10 1.00000 +DEAL:1::pack 0_2:12 2.00000 +DEAL:1::pack 0_2:13 3.00000 +DEAL:1::pack 0_2:20 4.00000 +DEAL:1::pack 0_2:21 5.00000 +DEAL:1::pack 0_2:23 6.00000 +DEAL:1::unpack 0_2:01 1 from 0 +DEAL:1::unpack 0_2:02 2 from 0 +DEAL:1::unpack 0_2:03 3 from 0 +DEAL:1::unpack 0_2:30 1 from 2 +DEAL:1::unpack 0_2:31 2 from 2 +DEAL:1::unpack 0_2:32 3 from 2 +DEAL:1::dim = 3 +DEAL:1::pack 0_2:30 1.00000 +DEAL:1::pack 0_2:31 2.00000 +DEAL:1::pack 0_2:32 3.00000 +DEAL:1::pack 0_2:34 4.00000 +DEAL:1::pack 0_2:35 5.00000 +DEAL:1::pack 0_2:36 6.00000 +DEAL:1::pack 0_2:37 7.00000 +DEAL:1::pack 0_2:40 8.00000 +DEAL:1::pack 0_2:41 9.00000 +DEAL:1::pack 0_2:42 10.0000 +DEAL:1::pack 0_2:43 11.0000 +DEAL:1::pack 0_2:45 12.0000 +DEAL:1::pack 0_2:46 13.0000 +DEAL:1::pack 0_2:47 14.0000 +DEAL:1::unpack 0_2:03 1 from 0 +DEAL:1::unpack 0_2:04 2 from 0 +DEAL:1::unpack 0_2:05 3 from 0 +DEAL:1::unpack 0_2:06 4 from 0 +DEAL:1::unpack 0_2:07 5 from 0 +DEAL:1::unpack 0_2:12 6 from 0 +DEAL:1::unpack 0_2:13 7 from 0 +DEAL:1::unpack 0_2:14 8 from 0 +DEAL:1::unpack 0_2:16 10 from 0 +DEAL:1::unpack 0_2:17 11 from 0 +DEAL:1::unpack 0_2:21 12 from 0 +DEAL:1::unpack 0_2:23 13 from 0 +DEAL:1::unpack 0_2:24 14 from 0 +DEAL:1::unpack 0_2:25 15 from 0 +DEAL:1::unpack 0_2:27 17 from 0 +DEAL:1::unpack 0_2:50 1 from 2 +DEAL:1::unpack 0_2:52 3 from 2 +DEAL:1::unpack 0_2:53 4 from 2 +DEAL:1::unpack 0_2:54 5 from 2 +DEAL:1::unpack 0_2:56 6 from 2 +DEAL:1::unpack 0_2:60 7 from 2 +DEAL:1::unpack 0_2:61 8 from 2 +DEAL:1::unpack 0_2:63 10 from 2 +DEAL:1::unpack 0_2:64 11 from 2 +DEAL:1::unpack 0_2:65 12 from 2 +DEAL:1::unpack 0_2:70 13 from 2 +DEAL:1::unpack 0_2:71 14 from 2 +DEAL:1::unpack 0_2:72 15 from 2 +DEAL:1::unpack 0_2:73 16 from 2 +DEAL:1::unpack 0_2:74 17 from 2 + + +DEAL:2::dim = 2 +DEAL:2::pack 0_2:30 1.00000 +DEAL:2::pack 0_2:31 2.00000 +DEAL:2::pack 0_2:32 3.00000 +DEAL:2::unpack 0_2:03 3 from 0 +DEAL:2::unpack 0_2:12 2 from 1 +DEAL:2::unpack 0_2:13 3 from 1 +DEAL:2::unpack 0_2:21 5 from 1 +DEAL:2::unpack 0_2:23 6 from 1 +DEAL:2::dim = 3 +DEAL:2::pack 0_2:50 1.00000 +DEAL:2::pack 0_2:51 2.00000 +DEAL:2::pack 0_2:52 3.00000 +DEAL:2::pack 0_2:53 4.00000 +DEAL:2::pack 0_2:54 5.00000 +DEAL:2::pack 0_2:56 6.00000 +DEAL:2::pack 0_2:60 7.00000 +DEAL:2::pack 0_2:61 8.00000 +DEAL:2::pack 0_2:62 9.00000 +DEAL:2::pack 0_2:63 10.0000 +DEAL:2::pack 0_2:64 11.0000 +DEAL:2::pack 0_2:65 12.0000 +DEAL:2::pack 0_2:70 13.0000 +DEAL:2::pack 0_2:71 14.0000 +DEAL:2::pack 0_2:72 15.0000 +DEAL:2::pack 0_2:73 16.0000 +DEAL:2::pack 0_2:74 17.0000 +DEAL:2::unpack 0_2:05 3 from 0 +DEAL:2::unpack 0_2:06 4 from 0 +DEAL:2::unpack 0_2:07 5 from 0 +DEAL:2::unpack 0_2:14 8 from 0 +DEAL:2::unpack 0_2:15 9 from 0 +DEAL:2::unpack 0_2:16 10 from 0 +DEAL:2::unpack 0_2:17 11 from 0 +DEAL:2::unpack 0_2:24 14 from 0 +DEAL:2::unpack 0_2:25 15 from 0 +DEAL:2::unpack 0_2:26 16 from 0 +DEAL:2::unpack 0_2:27 17 from 0 +DEAL:2::unpack 0_2:34 4 from 1 +DEAL:2::unpack 0_2:35 5 from 1 +DEAL:2::unpack 0_2:36 6 from 1 +DEAL:2::unpack 0_2:37 7 from 1 +DEAL:2::unpack 0_2:41 9 from 1 +DEAL:2::unpack 0_2:42 10 from 1 +DEAL:2::unpack 0_2:43 11 from 1 +DEAL:2::unpack 0_2:45 12 from 1 +DEAL:2::unpack 0_2:46 13 from 1 +DEAL:2::unpack 0_2:47 14 from 1 + diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc new file mode 100644 index 0000000000..d66e04d2d4 --- /dev/null +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc @@ -0,0 +1,93 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2001 - 2015 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// test GridTools::exchange_cell_data_to_ghosts this time with a DoFHandler + +#include "../tests.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +template +void test () +{ + const MPI_Comm &mpi_communicator = MPI_COMM_WORLD; + deallog << "dim = " << dim << std::endl; + + parallel::distributed::Triangulation tria (mpi_communicator); + GridGenerator::hyper_cube(tria); + tria.refine_global(2); + + + FE_Q fe(1); + DoFHandler dofhandler(tria); + dofhandler.distribute_dofs (fe); + + std::set output; + + typedef typename DoFHandler::active_cell_iterator cell_iterator; + typedef short DT; + short counter = 0; + parallel::GridTools::exchange_cell_data_to_ghosts< + DoFHandler, + DT> (dofhandler, + [&](const cell_iterator& cell) + { + DT value = ++counter; + + deallog << "pack " + << cell->id() + << " " + << value << std::endl; + return value; + }, + [&](const cell_iterator& cell, const DT& data) + { + std::ostringstream oss; + oss << "unpack " + << cell->id() + << " " + << data + << " from " + << cell->subdomain_id(); + + output.insert(oss.str()); + }); + + // sort the output because it will come in in random order + for (auto &it : output) + deallog << it << std::endl; +} + + +int main (int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1); + MPILogInitAll log; + + test<2> (); + test<3> (); + + return 0; +} diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_02.with_p4est=true.mpirun=2.output b/tests/distributed_grids/grid_tools_exchange_cell_data_02.with_p4est=true.mpirun=2.output new file mode 100644 index 0000000000..6e65bdc051 --- /dev/null +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_02.with_p4est=true.mpirun=2.output @@ -0,0 +1,87 @@ + +DEAL:0::dim = 2 +DEAL:0::pack 0_2:02 1 +DEAL:0::pack 0_2:03 2 +DEAL:0::pack 0_2:12 3 +DEAL:0::pack 0_2:13 4 +DEAL:0::unpack 0_2:20 1 from 1 +DEAL:0::unpack 0_2:21 2 from 1 +DEAL:0::unpack 0_2:30 3 from 1 +DEAL:0::unpack 0_2:31 4 from 1 +DEAL:0::dim = 3 +DEAL:0::pack 0_2:04 1 +DEAL:0::pack 0_2:05 2 +DEAL:0::pack 0_2:06 3 +DEAL:0::pack 0_2:07 4 +DEAL:0::pack 0_2:14 5 +DEAL:0::pack 0_2:15 6 +DEAL:0::pack 0_2:16 7 +DEAL:0::pack 0_2:17 8 +DEAL:0::pack 0_2:24 9 +DEAL:0::pack 0_2:25 10 +DEAL:0::pack 0_2:26 11 +DEAL:0::pack 0_2:27 12 +DEAL:0::pack 0_2:34 13 +DEAL:0::pack 0_2:35 14 +DEAL:0::pack 0_2:36 15 +DEAL:0::pack 0_2:37 16 +DEAL:0::unpack 0_2:40 1 from 1 +DEAL:0::unpack 0_2:41 2 from 1 +DEAL:0::unpack 0_2:42 3 from 1 +DEAL:0::unpack 0_2:43 4 from 1 +DEAL:0::unpack 0_2:50 5 from 1 +DEAL:0::unpack 0_2:51 6 from 1 +DEAL:0::unpack 0_2:52 7 from 1 +DEAL:0::unpack 0_2:53 8 from 1 +DEAL:0::unpack 0_2:60 9 from 1 +DEAL:0::unpack 0_2:61 10 from 1 +DEAL:0::unpack 0_2:62 11 from 1 +DEAL:0::unpack 0_2:63 12 from 1 +DEAL:0::unpack 0_2:70 13 from 1 +DEAL:0::unpack 0_2:71 14 from 1 +DEAL:0::unpack 0_2:72 15 from 1 +DEAL:0::unpack 0_2:73 16 from 1 + +DEAL:1::dim = 2 +DEAL:1::pack 0_2:20 1 +DEAL:1::pack 0_2:21 2 +DEAL:1::pack 0_2:30 3 +DEAL:1::pack 0_2:31 4 +DEAL:1::unpack 0_2:02 1 from 0 +DEAL:1::unpack 0_2:03 2 from 0 +DEAL:1::unpack 0_2:12 3 from 0 +DEAL:1::unpack 0_2:13 4 from 0 +DEAL:1::dim = 3 +DEAL:1::pack 0_2:40 1 +DEAL:1::pack 0_2:41 2 +DEAL:1::pack 0_2:42 3 +DEAL:1::pack 0_2:43 4 +DEAL:1::pack 0_2:50 5 +DEAL:1::pack 0_2:51 6 +DEAL:1::pack 0_2:52 7 +DEAL:1::pack 0_2:53 8 +DEAL:1::pack 0_2:60 9 +DEAL:1::pack 0_2:61 10 +DEAL:1::pack 0_2:62 11 +DEAL:1::pack 0_2:63 12 +DEAL:1::pack 0_2:70 13 +DEAL:1::pack 0_2:71 14 +DEAL:1::pack 0_2:72 15 +DEAL:1::pack 0_2:73 16 +DEAL:1::unpack 0_2:04 1 from 0 +DEAL:1::unpack 0_2:05 2 from 0 +DEAL:1::unpack 0_2:06 3 from 0 +DEAL:1::unpack 0_2:07 4 from 0 +DEAL:1::unpack 0_2:14 5 from 0 +DEAL:1::unpack 0_2:15 6 from 0 +DEAL:1::unpack 0_2:16 7 from 0 +DEAL:1::unpack 0_2:17 8 from 0 +DEAL:1::unpack 0_2:24 9 from 0 +DEAL:1::unpack 0_2:25 10 from 0 +DEAL:1::unpack 0_2:26 11 from 0 +DEAL:1::unpack 0_2:27 12 from 0 +DEAL:1::unpack 0_2:34 13 from 0 +DEAL:1::unpack 0_2:35 14 from 0 +DEAL:1::unpack 0_2:36 15 from 0 +DEAL:1::unpack 0_2:37 16 from 0 + diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_02.with_p4est=true.mpirun=3.output b/tests/distributed_grids/grid_tools_exchange_cell_data_02.with_p4est=true.mpirun=3.output new file mode 100644 index 0000000000..8419aa2378 --- /dev/null +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_02.with_p4est=true.mpirun=3.output @@ -0,0 +1,159 @@ + +DEAL:0::dim = 2 +DEAL:0::pack 0_2:01 1 +DEAL:0::pack 0_2:02 2 +DEAL:0::pack 0_2:03 3 +DEAL:0::unpack 0_2:10 1 from 1 +DEAL:0::unpack 0_2:12 2 from 1 +DEAL:0::unpack 0_2:20 4 from 1 +DEAL:0::unpack 0_2:21 5 from 1 +DEAL:0::unpack 0_2:30 1 from 2 +DEAL:0::dim = 3 +DEAL:0::pack 0_2:03 1 +DEAL:0::pack 0_2:04 2 +DEAL:0::pack 0_2:05 3 +DEAL:0::pack 0_2:06 4 +DEAL:0::pack 0_2:07 5 +DEAL:0::pack 0_2:12 6 +DEAL:0::pack 0_2:13 7 +DEAL:0::pack 0_2:14 8 +DEAL:0::pack 0_2:15 9 +DEAL:0::pack 0_2:16 10 +DEAL:0::pack 0_2:17 11 +DEAL:0::pack 0_2:21 12 +DEAL:0::pack 0_2:23 13 +DEAL:0::pack 0_2:24 14 +DEAL:0::pack 0_2:25 15 +DEAL:0::pack 0_2:26 16 +DEAL:0::pack 0_2:27 17 +DEAL:0::unpack 0_2:30 1 from 1 +DEAL:0::unpack 0_2:31 2 from 1 +DEAL:0::unpack 0_2:32 3 from 1 +DEAL:0::unpack 0_2:34 4 from 1 +DEAL:0::unpack 0_2:35 5 from 1 +DEAL:0::unpack 0_2:36 6 from 1 +DEAL:0::unpack 0_2:40 8 from 1 +DEAL:0::unpack 0_2:41 9 from 1 +DEAL:0::unpack 0_2:42 10 from 1 +DEAL:0::unpack 0_2:43 11 from 1 +DEAL:0::unpack 0_2:50 1 from 2 +DEAL:0::unpack 0_2:51 2 from 2 +DEAL:0::unpack 0_2:52 3 from 2 +DEAL:0::unpack 0_2:53 4 from 2 +DEAL:0::unpack 0_2:60 7 from 2 +DEAL:0::unpack 0_2:61 8 from 2 +DEAL:0::unpack 0_2:62 9 from 2 +DEAL:0::unpack 0_2:63 10 from 2 +DEAL:0::unpack 0_2:70 13 from 2 +DEAL:0::unpack 0_2:71 14 from 2 +DEAL:0::unpack 0_2:72 15 from 2 + +DEAL:1::dim = 2 +DEAL:1::pack 0_2:10 1 +DEAL:1::pack 0_2:12 2 +DEAL:1::pack 0_2:13 3 +DEAL:1::pack 0_2:20 4 +DEAL:1::pack 0_2:21 5 +DEAL:1::pack 0_2:23 6 +DEAL:1::unpack 0_2:01 1 from 0 +DEAL:1::unpack 0_2:02 2 from 0 +DEAL:1::unpack 0_2:03 3 from 0 +DEAL:1::unpack 0_2:30 1 from 2 +DEAL:1::unpack 0_2:31 2 from 2 +DEAL:1::unpack 0_2:32 3 from 2 +DEAL:1::dim = 3 +DEAL:1::pack 0_2:30 1 +DEAL:1::pack 0_2:31 2 +DEAL:1::pack 0_2:32 3 +DEAL:1::pack 0_2:34 4 +DEAL:1::pack 0_2:35 5 +DEAL:1::pack 0_2:36 6 +DEAL:1::pack 0_2:37 7 +DEAL:1::pack 0_2:40 8 +DEAL:1::pack 0_2:41 9 +DEAL:1::pack 0_2:42 10 +DEAL:1::pack 0_2:43 11 +DEAL:1::pack 0_2:45 12 +DEAL:1::pack 0_2:46 13 +DEAL:1::pack 0_2:47 14 +DEAL:1::unpack 0_2:03 1 from 0 +DEAL:1::unpack 0_2:04 2 from 0 +DEAL:1::unpack 0_2:05 3 from 0 +DEAL:1::unpack 0_2:06 4 from 0 +DEAL:1::unpack 0_2:07 5 from 0 +DEAL:1::unpack 0_2:12 6 from 0 +DEAL:1::unpack 0_2:13 7 from 0 +DEAL:1::unpack 0_2:14 8 from 0 +DEAL:1::unpack 0_2:16 10 from 0 +DEAL:1::unpack 0_2:17 11 from 0 +DEAL:1::unpack 0_2:21 12 from 0 +DEAL:1::unpack 0_2:23 13 from 0 +DEAL:1::unpack 0_2:24 14 from 0 +DEAL:1::unpack 0_2:25 15 from 0 +DEAL:1::unpack 0_2:27 17 from 0 +DEAL:1::unpack 0_2:50 1 from 2 +DEAL:1::unpack 0_2:52 3 from 2 +DEAL:1::unpack 0_2:53 4 from 2 +DEAL:1::unpack 0_2:54 5 from 2 +DEAL:1::unpack 0_2:56 6 from 2 +DEAL:1::unpack 0_2:60 7 from 2 +DEAL:1::unpack 0_2:61 8 from 2 +DEAL:1::unpack 0_2:63 10 from 2 +DEAL:1::unpack 0_2:64 11 from 2 +DEAL:1::unpack 0_2:65 12 from 2 +DEAL:1::unpack 0_2:70 13 from 2 +DEAL:1::unpack 0_2:71 14 from 2 +DEAL:1::unpack 0_2:72 15 from 2 +DEAL:1::unpack 0_2:73 16 from 2 +DEAL:1::unpack 0_2:74 17 from 2 + + +DEAL:2::dim = 2 +DEAL:2::pack 0_2:30 1 +DEAL:2::pack 0_2:31 2 +DEAL:2::pack 0_2:32 3 +DEAL:2::unpack 0_2:03 3 from 0 +DEAL:2::unpack 0_2:12 2 from 1 +DEAL:2::unpack 0_2:13 3 from 1 +DEAL:2::unpack 0_2:21 5 from 1 +DEAL:2::unpack 0_2:23 6 from 1 +DEAL:2::dim = 3 +DEAL:2::pack 0_2:50 1 +DEAL:2::pack 0_2:51 2 +DEAL:2::pack 0_2:52 3 +DEAL:2::pack 0_2:53 4 +DEAL:2::pack 0_2:54 5 +DEAL:2::pack 0_2:56 6 +DEAL:2::pack 0_2:60 7 +DEAL:2::pack 0_2:61 8 +DEAL:2::pack 0_2:62 9 +DEAL:2::pack 0_2:63 10 +DEAL:2::pack 0_2:64 11 +DEAL:2::pack 0_2:65 12 +DEAL:2::pack 0_2:70 13 +DEAL:2::pack 0_2:71 14 +DEAL:2::pack 0_2:72 15 +DEAL:2::pack 0_2:73 16 +DEAL:2::pack 0_2:74 17 +DEAL:2::unpack 0_2:05 3 from 0 +DEAL:2::unpack 0_2:06 4 from 0 +DEAL:2::unpack 0_2:07 5 from 0 +DEAL:2::unpack 0_2:14 8 from 0 +DEAL:2::unpack 0_2:15 9 from 0 +DEAL:2::unpack 0_2:16 10 from 0 +DEAL:2::unpack 0_2:17 11 from 0 +DEAL:2::unpack 0_2:24 14 from 0 +DEAL:2::unpack 0_2:25 15 from 0 +DEAL:2::unpack 0_2:26 16 from 0 +DEAL:2::unpack 0_2:27 17 from 0 +DEAL:2::unpack 0_2:34 4 from 1 +DEAL:2::unpack 0_2:35 5 from 1 +DEAL:2::unpack 0_2:36 6 from 1 +DEAL:2::unpack 0_2:37 7 from 1 +DEAL:2::unpack 0_2:41 9 from 1 +DEAL:2::unpack 0_2:42 10 from 1 +DEAL:2::unpack 0_2:43 11 from 1 +DEAL:2::unpack 0_2:45 12 from 1 +DEAL:2::unpack 0_2:46 13 from 1 +DEAL:2::unpack 0_2:47 14 from 1 + -- 2.39.5