]> https://gitweb.dealii.org/ - dealii.git/commitdiff
introduce GridTools::exchange_cell_data_to_ghosts
authorTimo Heister <timo.heister@gmail.com>
Sat, 12 Aug 2017 16:59:58 +0000 (10:59 -0600)
committerTimo Heister <timo.heister@gmail.com>
Sat, 12 Aug 2017 21:32:14 +0000 (15:32 -0600)
include/deal.II/distributed/grid_tools.h [new file with mode: 0644]
include/deal.II/distributed/tria.h
include/deal.II/distributed/tria_base.h
source/distributed/tria_base.cc
tests/distributed_grids/grid_tools_exchange_cell_data_01.cc [new file with mode: 0644]
tests/distributed_grids/grid_tools_exchange_cell_data_01.with_p4est=true.mpirun=2.output [new file with mode: 0644]
tests/distributed_grids/grid_tools_exchange_cell_data_01.with_p4est=true.mpirun=3.output [new file with mode: 0644]
tests/distributed_grids/grid_tools_exchange_cell_data_02.cc [new file with mode: 0644]
tests/distributed_grids/grid_tools_exchange_cell_data_02.with_p4est=true.mpirun=2.output [new file with mode: 0644]
tests/distributed_grids/grid_tools_exchange_cell_data_02.with_p4est=true.mpirun=3.output [new file with mode: 0644]

diff --git a/include/deal.II/distributed/grid_tools.h b/include/deal.II/distributed/grid_tools.h
new file mode 100644 (file)
index 0000000..4195070
--- /dev/null
@@ -0,0 +1,333 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii__distributed_grid_tools_h
+#define dealii__distributed_grid_tools_h
+
+
+#include <deal.II/base/config.h>
+#include <deal.II/base/exceptions.h>
+#include <deal.II/distributed/tria_base.h>
+#include <deal.II/distributed/grid_tools.h>
+
+DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
+#include <boost/archive/binary_oarchive.hpp>
+#include <boost/archive/binary_iarchive.hpp>
+#include <boost/iostreams/stream.hpp>
+#include <boost/iostreams/filtering_stream.hpp>
+#include <boost/iostreams/device/back_inserter.hpp>
+#include <boost/iostreams/filter/gzip.hpp>
+DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
+
+#include <vector>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace parallel
+{
+
+  namespace GridTools
+  {
+    /**
+     * Exchange arbitrary data of type @p DataType provided by the function
+     * objects from locally owned cells to ghost cells on other processors.
+     *
+     * After this call, you will have received data from @p unpack on every
+     * ghost cell as it was given by @p pack on the owning processor.
+     *
+     * @tparam DataType The type of the data to be communicated. It is assumed
+     * to be serializable by boost::serialization.
+     * @tparam MeshType The type of @p mesh.
+     *
+     * @param mesh A variable of a type that satisfies the requirements of the
+     * @ref ConceptMeshType "MeshType concept".
+     * @param pack The function that will be called on each locally owned cell
+     * that needs to be sent.
+     * @param unpack The function that will be called for each ghost cell
+     * with the data imported from the other procs.
+
+     */
+    template <typename MeshType, typename DataType>
+    void
+    exchange_cell_data_to_ghosts (const MeshType &mesh,
+                                  std::function<DataType (const typename MeshType::active_cell_iterator &)> pack,
+                                  std::function<void (const typename MeshType::active_cell_iterator &, const DataType &)> unpack);
+  }
+
+
+}
+
+#ifndef DOXYGEN
+
+namespace parallel
+{
+  namespace GridTools
+  {
+
+    namespace internal
+    {
+      /**
+       * A structure that allows the transfer of data of type T from one processor
+       * to another. It corresponds to a packed buffer that stores a list of
+       * cells and an array of type T.
+       *
+       * The vector @p data is the same size as @p cell_ids.
+       */
+      template <int dim, typename T>
+      struct CellDataTransferBuffer
+      {
+        std::vector<CellId> cell_ids;
+        std::vector<T> data;
+
+        /**
+         * Write the data of this object to a stream for the purpose of
+         * serialization.
+         */
+        template <class Archive>
+        void save (Archive &ar,
+                   const unsigned int /*version*/) const
+        {
+          Assert(cell_ids.size() == data.size(), ExcInternalError());
+          // archive the cellids in an efficient binary format
+          ar &cell_ids.size();
+          for (auto &it : cell_ids)
+            {
+              CellId::binary_type binary_cell_id = it.template to_binary<dim>();
+              ar &binary_cell_id;
+            }
+
+          ar &data;
+        }
+
+        /**
+         * Read the data of this object from a stream for the purpose of
+         * serialization. Throw away the previous content.
+         */
+        template <class Archive>
+        void load (Archive &ar,
+                   const unsigned int /*version*/)
+        {
+          size_t n_cells;
+          ar &n_cells;
+          cell_ids.clear();
+          cell_ids.reserve(n_cells);
+          for (unsigned int c=0; c<n_cells; ++c)
+            {
+              CellId::binary_type value;
+              ar &value;
+              cell_ids.emplace_back(std::move(value));
+            }
+          ar &data;
+        }
+
+        BOOST_SERIALIZATION_SPLIT_MEMBER()
+
+
+        /**
+         * Pack the data that corresponds to this object into a buffer in
+         * the form of a vector of chars and return it.
+         */
+        std::vector<char>
+        pack_data () const
+        {
+          // set up a buffer and then use it as the target of a compressing
+          // stream into which we serialize the current object
+          std::vector<char> buffer;
+          {
+            boost::iostreams::filtering_ostream out;
+            out.push(boost::iostreams::gzip_compressor
+                     (boost::iostreams::gzip_params
+                      (boost::iostreams::gzip::best_compression)));
+            out.push(boost::iostreams::back_inserter(buffer));
+
+            boost::archive::binary_oarchive archive(out);
+
+            archive << *this;
+            out.flush();
+          }
+
+          return buffer;
+        }
+
+
+        /**
+         * Given a buffer in the form of an array of chars, unpack it and
+         * restore the current object to the state that it was when
+         * it was packed into said buffer by the pack_data() function.
+         */
+        void unpack_data (const std::vector<char> &buffer)
+        {
+          std::string decompressed_buffer;
+
+          // first decompress the buffer
+          {
+            boost::iostreams::filtering_ostream decompressing_stream;
+            decompressing_stream.push(boost::iostreams::gzip_decompressor());
+            decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer));
+
+            decompressing_stream.write (&buffer[0], buffer.size());
+          }
+
+          // then restore the object from the buffer
+          std::istringstream in(decompressed_buffer);
+          boost::archive::binary_iarchive archive(in);
+
+          archive >> *this;
+        }
+      };
+
+    }
+
+
+    template <typename MeshType, typename DataType>
+    void
+    exchange_cell_data_to_ghosts (const MeshType &mesh,
+                                  std::function<DataType (const typename MeshType::active_cell_iterator &)> pack,
+                                  std::function<void (const typename MeshType::active_cell_iterator &, const DataType &)> unpack)
+    {
+      constexpr int dim = MeshType::dimension;
+      constexpr int spacedim = MeshType::space_dimension;
+      auto tria =
+        static_cast<const parallel::Triangulation<dim, spacedim>*>(&mesh.get_triangulation());
+      Assert(tria != NULL, ExcMessage("The function exchange_cell_data_to_ghosts only works with parallel Triangulations."));
+
+      // map neighbor_id -> data_buffer where we accumulate the data to send
+      typedef std::map<dealii::types::subdomain_id, internal::CellDataTransferBuffer<dim, DataType> >
+      cellmap_t;
+      cellmap_t destination_to_data_buffer;
+
+      std::map<unsigned int, std::set<dealii::types::subdomain_id> >
+      vertices_with_ghost_neighbors = tria->compute_vertices_with_ghost_neighbors();
+
+      for (auto cell : tria->active_cell_iterators())
+        if (cell->is_locally_owned())
+          {
+            std::set<dealii::types::subdomain_id> send_to;
+            for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
+              {
+                const std::map<unsigned int, std::set<dealii::types::subdomain_id> >::const_iterator
+                neighbor_subdomains_of_vertex
+                  = vertices_with_ghost_neighbors.find (cell->vertex_index(v));
+
+                if (neighbor_subdomains_of_vertex ==
+                    vertices_with_ghost_neighbors.end())
+                  continue;
+
+                Assert(neighbor_subdomains_of_vertex->second.size()!=0,
+                       ExcInternalError());
+
+                send_to.insert(neighbor_subdomains_of_vertex->second.begin(),
+                               neighbor_subdomains_of_vertex->second.end());
+              }
+
+            if (send_to.size() > 0)
+              {
+                // this cell's data needs to be sent to someone
+                typename MeshType::active_cell_iterator
+                mesh_it (tria, cell->level(), cell->index(), &mesh);
+
+                const DataType data = pack(mesh_it);
+                const CellId cellid = cell->id();
+
+                for (auto it : send_to)
+                  {
+                    const dealii::types::subdomain_id subdomain = it;
+
+                    // find the data buffer for proc "subdomain" if it exists
+                    // or create an empty one otherwise
+                    typename cellmap_t::iterator p
+                      = destination_to_data_buffer.insert (std::make_pair(subdomain,
+                                                                          internal::CellDataTransferBuffer<dim, DataType>()))
+                        .first;
+
+                    p->second.cell_ids.emplace_back(cellid);
+                    p->second.data.emplace_back(data);
+                  }
+              }
+          }
+
+
+      // 2. send our messages
+      std::set<dealii::types::subdomain_id> ghost_owners = tria->ghost_owners();
+      const unsigned int n_ghost_owners = ghost_owners.size();
+      std::vector<std::vector<char> > sendbuffers (n_ghost_owners);
+      std::vector<MPI_Request> requests (n_ghost_owners);
+
+      unsigned int idx=0;
+      for (auto it = ghost_owners.begin();
+           it!=ghost_owners.end();
+           ++it, ++idx)
+        {
+          internal::CellDataTransferBuffer<dim, DataType> &data = destination_to_data_buffer[*it];
+
+          // pack all the data into
+          // the buffer for this
+          // recipient and send
+          // it. keep data around
+          // till we can make sure
+          // that the packet has been
+          // received
+          sendbuffers[idx] = data.pack_data ();
+          const int ierr = MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(),
+                                     MPI_BYTE, *it,
+                                     786, tria->get_communicator(), &requests[idx]);
+          AssertThrowMPI(ierr);
+        }
+
+      // 3. receive messages
+      std::vector<char> receive;
+      for (unsigned int idx=0; idx<n_ghost_owners; ++idx)
+        {
+          MPI_Status status;
+          int len;
+          int ierr = MPI_Probe(MPI_ANY_SOURCE, 786, tria->get_communicator(), &status);
+          AssertThrowMPI(ierr);
+          ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+          AssertThrowMPI(ierr);
+
+          receive.resize(len);
+
+          char *ptr = &receive[0];
+          ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+                          tria->get_communicator(), &status);
+          AssertThrowMPI(ierr);
+
+          internal::CellDataTransferBuffer<dim, DataType> cellinfo;
+          cellinfo.unpack_data(receive);
+          Assert (cellinfo.cell_ids.size()>0, ExcInternalError());
+
+          DataType *data = cellinfo.data.data();
+          for (unsigned int c=0; c<cellinfo.cell_ids.size(); ++c, ++data)
+            {
+              const typename Triangulation<dim,spacedim>::cell_iterator
+              tria_cell = cellinfo.cell_ids[c].to_cell(*tria);
+
+              const typename MeshType::active_cell_iterator
+              cell (tria, tria_cell->level(), tria_cell->index(), &mesh);
+
+              unpack(cell, *data);
+            }
+        }
+
+    }
+
+  }
+}
+
+#endif // DOXYGEN
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
index 4b1e00b36e88bfd6febdc4bee35feb7e7256b75f..238a0c6e0517cfd6aa90397f92fb3352266572d0 100644 (file)
@@ -862,7 +862,7 @@ namespace parallel
        * subdomains are adjacent to that vertex. Used by
        * DoFHandler::Policy::ParallelDistributed.
        */
-      std::map<unsigned int, std::set<dealii::types::subdomain_id> >
+      virtual std::map<unsigned int, std::set<dealii::types::subdomain_id> >
       compute_vertices_with_ghost_neighbors () const;
 
       /**
index d693dbbaaac8e183f63a3ab45ea7d5c056769c2e..a4cd583d7e1e9a736f9b9bc34e736018bb67fb3a 100644 (file)
@@ -151,6 +151,14 @@ namespace parallel
     const std::set<types::subdomain_id> &
     level_ghost_owners () const;
 
+    /**
+     * Return a map that, for each vertex, lists all the processors whose
+     * subdomains are adjacent to that vertex. Used by
+     * DoFHandler::Policy::ParallelDistributed.
+     */
+    virtual std::map<unsigned int, std::set<dealii::types::subdomain_id> >
+    compute_vertices_with_ghost_neighbors () const;
+
   protected:
     /**
      * MPI communicator to be used for the triangulation. We create a unique
index 98ce2ba4207b3b0646e221ea547d4a9d7a65e456..59a50da15c263b6707535050540557cb9cbbafe1 100644 (file)
@@ -232,6 +232,8 @@ namespace parallel
     return my_subdomain;
   }
 
+
+
   template <int dim, int spacedim>
   const std::set<types::subdomain_id> &
   Triangulation<dim,spacedim>::
@@ -240,6 +242,8 @@ namespace parallel
     return number_cache.ghost_owners;
   }
 
+
+
   template <int dim, int spacedim>
   const std::set<types::subdomain_id> &
   Triangulation<dim,spacedim>::
@@ -248,6 +252,35 @@ namespace parallel
     return number_cache.level_ghost_owners;
   }
 
+
+
+  template <int dim, int spacedim>
+  std::map<unsigned int, std::set<dealii::types::subdomain_id> >
+  Triangulation<dim,spacedim>::
+  compute_vertices_with_ghost_neighbors () const
+  {
+    std::vector<bool> vertex_of_own_cell(this->n_vertices(), false);
+
+    for (auto cell : this->active_cell_iterators())
+      if (cell->is_locally_owned())
+        for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
+          vertex_of_own_cell[cell->vertex_index(v)] = true;
+
+    std::map<unsigned int, std::set<dealii::types::subdomain_id> > result;
+    for (auto cell : this->active_cell_iterators())
+      if (cell->is_ghost())
+        {
+          const types::subdomain_id owner = cell->subdomain_id();
+          for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
+            {
+              if (vertex_of_own_cell[cell->vertex_index(v)])
+                result[cell->vertex_index(v)].insert(owner);
+            }
+        }
+
+    return result;
+  }
+
 } // end namespace parallel
 
 
diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc
new file mode 100644 (file)
index 0000000..3951508
--- /dev/null
@@ -0,0 +1,86 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2001 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// test GridTools::exchange_cell_data_to_ghosts
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/distributed/grid_tools.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/cell_id.h>
+
+#include <algorithm>
+
+template <int dim>
+void test ()
+{
+  const MPI_Comm &mpi_communicator = MPI_COMM_WORLD;
+  deallog << "dim = " << dim << std::endl;
+
+  parallel::distributed::Triangulation<dim> tria (mpi_communicator);
+  GridGenerator::hyper_cube(tria);
+  tria.refine_global(2);
+
+  std::set<std::string> output;
+
+  typedef typename parallel::distributed::Triangulation<dim>::active_cell_iterator cell_iterator;
+  typedef double DT;
+  DT counter = 0.0;
+  parallel::GridTools::exchange_cell_data_to_ghosts<
+  parallel::distributed::Triangulation<dim>,
+           DT> (tria,
+                [&](const cell_iterator& cell)
+  {
+    DT value = ++counter;
+
+    deallog << "pack "
+            << cell->id()
+            << " "
+            << value << std::endl;
+    return value;
+  },
+  [&](const cell_iterator& cell, const DT& data)
+  {
+    std::ostringstream oss;
+    oss << "unpack "
+        << cell->id()
+        << " "
+        << data
+        << " from "
+        << cell->subdomain_id();
+
+    output.insert(oss.str());
+  });
+
+  // sort the output because it will come in in random order
+  for (auto &it : output)
+    deallog << it << std::endl;
+}
+
+
+int main (int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1);
+  MPILogInitAll log;
+
+  test<2> ();
+  test<3> ();
+
+  return 0;
+}
diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_01.with_p4est=true.mpirun=2.output b/tests/distributed_grids/grid_tools_exchange_cell_data_01.with_p4est=true.mpirun=2.output
new file mode 100644 (file)
index 0000000..a4f4232
--- /dev/null
@@ -0,0 +1,87 @@
+
+DEAL:0::dim = 2
+DEAL:0::pack 0_2:02 1.00000
+DEAL:0::pack 0_2:03 2.00000
+DEAL:0::pack 0_2:12 3.00000
+DEAL:0::pack 0_2:13 4.00000
+DEAL:0::unpack 0_2:20 1.00000 from 1
+DEAL:0::unpack 0_2:21 2.00000 from 1
+DEAL:0::unpack 0_2:30 3.00000 from 1
+DEAL:0::unpack 0_2:31 4.00000 from 1
+DEAL:0::dim = 3
+DEAL:0::pack 0_2:04 1.00000
+DEAL:0::pack 0_2:05 2.00000
+DEAL:0::pack 0_2:06 3.00000
+DEAL:0::pack 0_2:07 4.00000
+DEAL:0::pack 0_2:14 5.00000
+DEAL:0::pack 0_2:15 6.00000
+DEAL:0::pack 0_2:16 7.00000
+DEAL:0::pack 0_2:17 8.00000
+DEAL:0::pack 0_2:24 9.00000
+DEAL:0::pack 0_2:25 10.0000
+DEAL:0::pack 0_2:26 11.0000
+DEAL:0::pack 0_2:27 12.0000
+DEAL:0::pack 0_2:34 13.0000
+DEAL:0::pack 0_2:35 14.0000
+DEAL:0::pack 0_2:36 15.0000
+DEAL:0::pack 0_2:37 16.0000
+DEAL:0::unpack 0_2:40 1.00000 from 1
+DEAL:0::unpack 0_2:41 2.00000 from 1
+DEAL:0::unpack 0_2:42 3.00000 from 1
+DEAL:0::unpack 0_2:43 4.00000 from 1
+DEAL:0::unpack 0_2:50 5.00000 from 1
+DEAL:0::unpack 0_2:51 6.00000 from 1
+DEAL:0::unpack 0_2:52 7.00000 from 1
+DEAL:0::unpack 0_2:53 8.00000 from 1
+DEAL:0::unpack 0_2:60 9.00000 from 1
+DEAL:0::unpack 0_2:61 10.0000 from 1
+DEAL:0::unpack 0_2:62 11.0000 from 1
+DEAL:0::unpack 0_2:63 12.0000 from 1
+DEAL:0::unpack 0_2:70 13.0000 from 1
+DEAL:0::unpack 0_2:71 14.0000 from 1
+DEAL:0::unpack 0_2:72 15.0000 from 1
+DEAL:0::unpack 0_2:73 16.0000 from 1
+
+DEAL:1::dim = 2
+DEAL:1::pack 0_2:20 1.00000
+DEAL:1::pack 0_2:21 2.00000
+DEAL:1::pack 0_2:30 3.00000
+DEAL:1::pack 0_2:31 4.00000
+DEAL:1::unpack 0_2:02 1.00000 from 0
+DEAL:1::unpack 0_2:03 2.00000 from 0
+DEAL:1::unpack 0_2:12 3.00000 from 0
+DEAL:1::unpack 0_2:13 4.00000 from 0
+DEAL:1::dim = 3
+DEAL:1::pack 0_2:40 1.00000
+DEAL:1::pack 0_2:41 2.00000
+DEAL:1::pack 0_2:42 3.00000
+DEAL:1::pack 0_2:43 4.00000
+DEAL:1::pack 0_2:50 5.00000
+DEAL:1::pack 0_2:51 6.00000
+DEAL:1::pack 0_2:52 7.00000
+DEAL:1::pack 0_2:53 8.00000
+DEAL:1::pack 0_2:60 9.00000
+DEAL:1::pack 0_2:61 10.0000
+DEAL:1::pack 0_2:62 11.0000
+DEAL:1::pack 0_2:63 12.0000
+DEAL:1::pack 0_2:70 13.0000
+DEAL:1::pack 0_2:71 14.0000
+DEAL:1::pack 0_2:72 15.0000
+DEAL:1::pack 0_2:73 16.0000
+DEAL:1::unpack 0_2:04 1.00000 from 0
+DEAL:1::unpack 0_2:05 2.00000 from 0
+DEAL:1::unpack 0_2:06 3.00000 from 0
+DEAL:1::unpack 0_2:07 4.00000 from 0
+DEAL:1::unpack 0_2:14 5.00000 from 0
+DEAL:1::unpack 0_2:15 6.00000 from 0
+DEAL:1::unpack 0_2:16 7.00000 from 0
+DEAL:1::unpack 0_2:17 8.00000 from 0
+DEAL:1::unpack 0_2:24 9.00000 from 0
+DEAL:1::unpack 0_2:25 10.0000 from 0
+DEAL:1::unpack 0_2:26 11.0000 from 0
+DEAL:1::unpack 0_2:27 12.0000 from 0
+DEAL:1::unpack 0_2:34 13.0000 from 0
+DEAL:1::unpack 0_2:35 14.0000 from 0
+DEAL:1::unpack 0_2:36 15.0000 from 0
+DEAL:1::unpack 0_2:37 16.0000 from 0
+
diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_01.with_p4est=true.mpirun=3.output b/tests/distributed_grids/grid_tools_exchange_cell_data_01.with_p4est=true.mpirun=3.output
new file mode 100644 (file)
index 0000000..1cf882a
--- /dev/null
@@ -0,0 +1,159 @@
+
+DEAL:0::dim = 2
+DEAL:0::pack 0_2:01 1.00000
+DEAL:0::pack 0_2:02 2.00000
+DEAL:0::pack 0_2:03 3.00000
+DEAL:0::unpack 0_2:10 1 from 1
+DEAL:0::unpack 0_2:12 2 from 1
+DEAL:0::unpack 0_2:20 4 from 1
+DEAL:0::unpack 0_2:21 5 from 1
+DEAL:0::unpack 0_2:30 1 from 2
+DEAL:0::dim = 3
+DEAL:0::pack 0_2:03 1.00000
+DEAL:0::pack 0_2:04 2.00000
+DEAL:0::pack 0_2:05 3.00000
+DEAL:0::pack 0_2:06 4.00000
+DEAL:0::pack 0_2:07 5.00000
+DEAL:0::pack 0_2:12 6.00000
+DEAL:0::pack 0_2:13 7.00000
+DEAL:0::pack 0_2:14 8.00000
+DEAL:0::pack 0_2:15 9.00000
+DEAL:0::pack 0_2:16 10.0000
+DEAL:0::pack 0_2:17 11.0000
+DEAL:0::pack 0_2:21 12.0000
+DEAL:0::pack 0_2:23 13.0000
+DEAL:0::pack 0_2:24 14.0000
+DEAL:0::pack 0_2:25 15.0000
+DEAL:0::pack 0_2:26 16.0000
+DEAL:0::pack 0_2:27 17.0000
+DEAL:0::unpack 0_2:30 1 from 1
+DEAL:0::unpack 0_2:31 2 from 1
+DEAL:0::unpack 0_2:32 3 from 1
+DEAL:0::unpack 0_2:34 4 from 1
+DEAL:0::unpack 0_2:35 5 from 1
+DEAL:0::unpack 0_2:36 6 from 1
+DEAL:0::unpack 0_2:40 8 from 1
+DEAL:0::unpack 0_2:41 9 from 1
+DEAL:0::unpack 0_2:42 10 from 1
+DEAL:0::unpack 0_2:43 11 from 1
+DEAL:0::unpack 0_2:50 1 from 2
+DEAL:0::unpack 0_2:51 2 from 2
+DEAL:0::unpack 0_2:52 3 from 2
+DEAL:0::unpack 0_2:53 4 from 2
+DEAL:0::unpack 0_2:60 7 from 2
+DEAL:0::unpack 0_2:61 8 from 2
+DEAL:0::unpack 0_2:62 9 from 2
+DEAL:0::unpack 0_2:63 10 from 2
+DEAL:0::unpack 0_2:70 13 from 2
+DEAL:0::unpack 0_2:71 14 from 2
+DEAL:0::unpack 0_2:72 15 from 2
+
+DEAL:1::dim = 2
+DEAL:1::pack 0_2:10 1.00000
+DEAL:1::pack 0_2:12 2.00000
+DEAL:1::pack 0_2:13 3.00000
+DEAL:1::pack 0_2:20 4.00000
+DEAL:1::pack 0_2:21 5.00000
+DEAL:1::pack 0_2:23 6.00000
+DEAL:1::unpack 0_2:01 1 from 0
+DEAL:1::unpack 0_2:02 2 from 0
+DEAL:1::unpack 0_2:03 3 from 0
+DEAL:1::unpack 0_2:30 1 from 2
+DEAL:1::unpack 0_2:31 2 from 2
+DEAL:1::unpack 0_2:32 3 from 2
+DEAL:1::dim = 3
+DEAL:1::pack 0_2:30 1.00000
+DEAL:1::pack 0_2:31 2.00000
+DEAL:1::pack 0_2:32 3.00000
+DEAL:1::pack 0_2:34 4.00000
+DEAL:1::pack 0_2:35 5.00000
+DEAL:1::pack 0_2:36 6.00000
+DEAL:1::pack 0_2:37 7.00000
+DEAL:1::pack 0_2:40 8.00000
+DEAL:1::pack 0_2:41 9.00000
+DEAL:1::pack 0_2:42 10.0000
+DEAL:1::pack 0_2:43 11.0000
+DEAL:1::pack 0_2:45 12.0000
+DEAL:1::pack 0_2:46 13.0000
+DEAL:1::pack 0_2:47 14.0000
+DEAL:1::unpack 0_2:03 1 from 0
+DEAL:1::unpack 0_2:04 2 from 0
+DEAL:1::unpack 0_2:05 3 from 0
+DEAL:1::unpack 0_2:06 4 from 0
+DEAL:1::unpack 0_2:07 5 from 0
+DEAL:1::unpack 0_2:12 6 from 0
+DEAL:1::unpack 0_2:13 7 from 0
+DEAL:1::unpack 0_2:14 8 from 0
+DEAL:1::unpack 0_2:16 10 from 0
+DEAL:1::unpack 0_2:17 11 from 0
+DEAL:1::unpack 0_2:21 12 from 0
+DEAL:1::unpack 0_2:23 13 from 0
+DEAL:1::unpack 0_2:24 14 from 0
+DEAL:1::unpack 0_2:25 15 from 0
+DEAL:1::unpack 0_2:27 17 from 0
+DEAL:1::unpack 0_2:50 1 from 2
+DEAL:1::unpack 0_2:52 3 from 2
+DEAL:1::unpack 0_2:53 4 from 2
+DEAL:1::unpack 0_2:54 5 from 2
+DEAL:1::unpack 0_2:56 6 from 2
+DEAL:1::unpack 0_2:60 7 from 2
+DEAL:1::unpack 0_2:61 8 from 2
+DEAL:1::unpack 0_2:63 10 from 2
+DEAL:1::unpack 0_2:64 11 from 2
+DEAL:1::unpack 0_2:65 12 from 2
+DEAL:1::unpack 0_2:70 13 from 2
+DEAL:1::unpack 0_2:71 14 from 2
+DEAL:1::unpack 0_2:72 15 from 2
+DEAL:1::unpack 0_2:73 16 from 2
+DEAL:1::unpack 0_2:74 17 from 2
+
+
+DEAL:2::dim = 2
+DEAL:2::pack 0_2:30 1.00000
+DEAL:2::pack 0_2:31 2.00000
+DEAL:2::pack 0_2:32 3.00000
+DEAL:2::unpack 0_2:03 3 from 0
+DEAL:2::unpack 0_2:12 2 from 1
+DEAL:2::unpack 0_2:13 3 from 1
+DEAL:2::unpack 0_2:21 5 from 1
+DEAL:2::unpack 0_2:23 6 from 1
+DEAL:2::dim = 3
+DEAL:2::pack 0_2:50 1.00000
+DEAL:2::pack 0_2:51 2.00000
+DEAL:2::pack 0_2:52 3.00000
+DEAL:2::pack 0_2:53 4.00000
+DEAL:2::pack 0_2:54 5.00000
+DEAL:2::pack 0_2:56 6.00000
+DEAL:2::pack 0_2:60 7.00000
+DEAL:2::pack 0_2:61 8.00000
+DEAL:2::pack 0_2:62 9.00000
+DEAL:2::pack 0_2:63 10.0000
+DEAL:2::pack 0_2:64 11.0000
+DEAL:2::pack 0_2:65 12.0000
+DEAL:2::pack 0_2:70 13.0000
+DEAL:2::pack 0_2:71 14.0000
+DEAL:2::pack 0_2:72 15.0000
+DEAL:2::pack 0_2:73 16.0000
+DEAL:2::pack 0_2:74 17.0000
+DEAL:2::unpack 0_2:05 3 from 0
+DEAL:2::unpack 0_2:06 4 from 0
+DEAL:2::unpack 0_2:07 5 from 0
+DEAL:2::unpack 0_2:14 8 from 0
+DEAL:2::unpack 0_2:15 9 from 0
+DEAL:2::unpack 0_2:16 10 from 0
+DEAL:2::unpack 0_2:17 11 from 0
+DEAL:2::unpack 0_2:24 14 from 0
+DEAL:2::unpack 0_2:25 15 from 0
+DEAL:2::unpack 0_2:26 16 from 0
+DEAL:2::unpack 0_2:27 17 from 0
+DEAL:2::unpack 0_2:34 4 from 1
+DEAL:2::unpack 0_2:35 5 from 1
+DEAL:2::unpack 0_2:36 6 from 1
+DEAL:2::unpack 0_2:37 7 from 1
+DEAL:2::unpack 0_2:41 9 from 1
+DEAL:2::unpack 0_2:42 10 from 1
+DEAL:2::unpack 0_2:43 11 from 1
+DEAL:2::unpack 0_2:45 12 from 1
+DEAL:2::unpack 0_2:46 13 from 1
+DEAL:2::unpack 0_2:47 14 from 1
+
diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc
new file mode 100644 (file)
index 0000000..d66e04d
--- /dev/null
@@ -0,0 +1,93 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2001 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// test GridTools::exchange_cell_data_to_ghosts this time with a DoFHandler
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/distributed/grid_tools.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/cell_id.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/fe/fe_q.h>
+
+#include <algorithm>
+
+template <int dim>
+void test ()
+{
+  const MPI_Comm &mpi_communicator = MPI_COMM_WORLD;
+  deallog << "dim = " << dim << std::endl;
+
+  parallel::distributed::Triangulation<dim> tria (mpi_communicator);
+  GridGenerator::hyper_cube(tria);
+  tria.refine_global(2);
+
+
+  FE_Q<dim> fe(1);
+  DoFHandler<dim> dofhandler(tria);
+  dofhandler.distribute_dofs (fe);
+
+  std::set<std::string> output;
+
+  typedef typename DoFHandler<dim>::active_cell_iterator cell_iterator;
+  typedef short DT;
+  short counter = 0;
+  parallel::GridTools::exchange_cell_data_to_ghosts<
+  DoFHandler<dim>,
+             DT> (dofhandler,
+                  [&](const cell_iterator& cell)
+  {
+    DT value = ++counter;
+
+    deallog << "pack "
+            << cell->id()
+            << " "
+            << value << std::endl;
+    return value;
+  },
+  [&](const cell_iterator& cell, const DT& data)
+  {
+    std::ostringstream oss;
+    oss << "unpack "
+        << cell->id()
+        << " "
+        << data
+        << " from "
+        << cell->subdomain_id();
+
+    output.insert(oss.str());
+  });
+
+  // sort the output because it will come in in random order
+  for (auto &it : output)
+    deallog << it << std::endl;
+}
+
+
+int main (int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1);
+  MPILogInitAll log;
+
+  test<2> ();
+  test<3> ();
+
+  return 0;
+}
diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_02.with_p4est=true.mpirun=2.output b/tests/distributed_grids/grid_tools_exchange_cell_data_02.with_p4est=true.mpirun=2.output
new file mode 100644 (file)
index 0000000..6e65bdc
--- /dev/null
@@ -0,0 +1,87 @@
+
+DEAL:0::dim = 2
+DEAL:0::pack 0_2:02 1
+DEAL:0::pack 0_2:03 2
+DEAL:0::pack 0_2:12 3
+DEAL:0::pack 0_2:13 4
+DEAL:0::unpack 0_2:20 1 from 1
+DEAL:0::unpack 0_2:21 2 from 1
+DEAL:0::unpack 0_2:30 3 from 1
+DEAL:0::unpack 0_2:31 4 from 1
+DEAL:0::dim = 3
+DEAL:0::pack 0_2:04 1
+DEAL:0::pack 0_2:05 2
+DEAL:0::pack 0_2:06 3
+DEAL:0::pack 0_2:07 4
+DEAL:0::pack 0_2:14 5
+DEAL:0::pack 0_2:15 6
+DEAL:0::pack 0_2:16 7
+DEAL:0::pack 0_2:17 8
+DEAL:0::pack 0_2:24 9
+DEAL:0::pack 0_2:25 10
+DEAL:0::pack 0_2:26 11
+DEAL:0::pack 0_2:27 12
+DEAL:0::pack 0_2:34 13
+DEAL:0::pack 0_2:35 14
+DEAL:0::pack 0_2:36 15
+DEAL:0::pack 0_2:37 16
+DEAL:0::unpack 0_2:40 1 from 1
+DEAL:0::unpack 0_2:41 2 from 1
+DEAL:0::unpack 0_2:42 3 from 1
+DEAL:0::unpack 0_2:43 4 from 1
+DEAL:0::unpack 0_2:50 5 from 1
+DEAL:0::unpack 0_2:51 6 from 1
+DEAL:0::unpack 0_2:52 7 from 1
+DEAL:0::unpack 0_2:53 8 from 1
+DEAL:0::unpack 0_2:60 9 from 1
+DEAL:0::unpack 0_2:61 10 from 1
+DEAL:0::unpack 0_2:62 11 from 1
+DEAL:0::unpack 0_2:63 12 from 1
+DEAL:0::unpack 0_2:70 13 from 1
+DEAL:0::unpack 0_2:71 14 from 1
+DEAL:0::unpack 0_2:72 15 from 1
+DEAL:0::unpack 0_2:73 16 from 1
+
+DEAL:1::dim = 2
+DEAL:1::pack 0_2:20 1
+DEAL:1::pack 0_2:21 2
+DEAL:1::pack 0_2:30 3
+DEAL:1::pack 0_2:31 4
+DEAL:1::unpack 0_2:02 1 from 0
+DEAL:1::unpack 0_2:03 2 from 0
+DEAL:1::unpack 0_2:12 3 from 0
+DEAL:1::unpack 0_2:13 4 from 0
+DEAL:1::dim = 3
+DEAL:1::pack 0_2:40 1
+DEAL:1::pack 0_2:41 2
+DEAL:1::pack 0_2:42 3
+DEAL:1::pack 0_2:43 4
+DEAL:1::pack 0_2:50 5
+DEAL:1::pack 0_2:51 6
+DEAL:1::pack 0_2:52 7
+DEAL:1::pack 0_2:53 8
+DEAL:1::pack 0_2:60 9
+DEAL:1::pack 0_2:61 10
+DEAL:1::pack 0_2:62 11
+DEAL:1::pack 0_2:63 12
+DEAL:1::pack 0_2:70 13
+DEAL:1::pack 0_2:71 14
+DEAL:1::pack 0_2:72 15
+DEAL:1::pack 0_2:73 16
+DEAL:1::unpack 0_2:04 1 from 0
+DEAL:1::unpack 0_2:05 2 from 0
+DEAL:1::unpack 0_2:06 3 from 0
+DEAL:1::unpack 0_2:07 4 from 0
+DEAL:1::unpack 0_2:14 5 from 0
+DEAL:1::unpack 0_2:15 6 from 0
+DEAL:1::unpack 0_2:16 7 from 0
+DEAL:1::unpack 0_2:17 8 from 0
+DEAL:1::unpack 0_2:24 9 from 0
+DEAL:1::unpack 0_2:25 10 from 0
+DEAL:1::unpack 0_2:26 11 from 0
+DEAL:1::unpack 0_2:27 12 from 0
+DEAL:1::unpack 0_2:34 13 from 0
+DEAL:1::unpack 0_2:35 14 from 0
+DEAL:1::unpack 0_2:36 15 from 0
+DEAL:1::unpack 0_2:37 16 from 0
+
diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_02.with_p4est=true.mpirun=3.output b/tests/distributed_grids/grid_tools_exchange_cell_data_02.with_p4est=true.mpirun=3.output
new file mode 100644 (file)
index 0000000..8419aa2
--- /dev/null
@@ -0,0 +1,159 @@
+
+DEAL:0::dim = 2
+DEAL:0::pack 0_2:01 1
+DEAL:0::pack 0_2:02 2
+DEAL:0::pack 0_2:03 3
+DEAL:0::unpack 0_2:10 1 from 1
+DEAL:0::unpack 0_2:12 2 from 1
+DEAL:0::unpack 0_2:20 4 from 1
+DEAL:0::unpack 0_2:21 5 from 1
+DEAL:0::unpack 0_2:30 1 from 2
+DEAL:0::dim = 3
+DEAL:0::pack 0_2:03 1
+DEAL:0::pack 0_2:04 2
+DEAL:0::pack 0_2:05 3
+DEAL:0::pack 0_2:06 4
+DEAL:0::pack 0_2:07 5
+DEAL:0::pack 0_2:12 6
+DEAL:0::pack 0_2:13 7
+DEAL:0::pack 0_2:14 8
+DEAL:0::pack 0_2:15 9
+DEAL:0::pack 0_2:16 10
+DEAL:0::pack 0_2:17 11
+DEAL:0::pack 0_2:21 12
+DEAL:0::pack 0_2:23 13
+DEAL:0::pack 0_2:24 14
+DEAL:0::pack 0_2:25 15
+DEAL:0::pack 0_2:26 16
+DEAL:0::pack 0_2:27 17
+DEAL:0::unpack 0_2:30 1 from 1
+DEAL:0::unpack 0_2:31 2 from 1
+DEAL:0::unpack 0_2:32 3 from 1
+DEAL:0::unpack 0_2:34 4 from 1
+DEAL:0::unpack 0_2:35 5 from 1
+DEAL:0::unpack 0_2:36 6 from 1
+DEAL:0::unpack 0_2:40 8 from 1
+DEAL:0::unpack 0_2:41 9 from 1
+DEAL:0::unpack 0_2:42 10 from 1
+DEAL:0::unpack 0_2:43 11 from 1
+DEAL:0::unpack 0_2:50 1 from 2
+DEAL:0::unpack 0_2:51 2 from 2
+DEAL:0::unpack 0_2:52 3 from 2
+DEAL:0::unpack 0_2:53 4 from 2
+DEAL:0::unpack 0_2:60 7 from 2
+DEAL:0::unpack 0_2:61 8 from 2
+DEAL:0::unpack 0_2:62 9 from 2
+DEAL:0::unpack 0_2:63 10 from 2
+DEAL:0::unpack 0_2:70 13 from 2
+DEAL:0::unpack 0_2:71 14 from 2
+DEAL:0::unpack 0_2:72 15 from 2
+
+DEAL:1::dim = 2
+DEAL:1::pack 0_2:10 1
+DEAL:1::pack 0_2:12 2
+DEAL:1::pack 0_2:13 3
+DEAL:1::pack 0_2:20 4
+DEAL:1::pack 0_2:21 5
+DEAL:1::pack 0_2:23 6
+DEAL:1::unpack 0_2:01 1 from 0
+DEAL:1::unpack 0_2:02 2 from 0
+DEAL:1::unpack 0_2:03 3 from 0
+DEAL:1::unpack 0_2:30 1 from 2
+DEAL:1::unpack 0_2:31 2 from 2
+DEAL:1::unpack 0_2:32 3 from 2
+DEAL:1::dim = 3
+DEAL:1::pack 0_2:30 1
+DEAL:1::pack 0_2:31 2
+DEAL:1::pack 0_2:32 3
+DEAL:1::pack 0_2:34 4
+DEAL:1::pack 0_2:35 5
+DEAL:1::pack 0_2:36 6
+DEAL:1::pack 0_2:37 7
+DEAL:1::pack 0_2:40 8
+DEAL:1::pack 0_2:41 9
+DEAL:1::pack 0_2:42 10
+DEAL:1::pack 0_2:43 11
+DEAL:1::pack 0_2:45 12
+DEAL:1::pack 0_2:46 13
+DEAL:1::pack 0_2:47 14
+DEAL:1::unpack 0_2:03 1 from 0
+DEAL:1::unpack 0_2:04 2 from 0
+DEAL:1::unpack 0_2:05 3 from 0
+DEAL:1::unpack 0_2:06 4 from 0
+DEAL:1::unpack 0_2:07 5 from 0
+DEAL:1::unpack 0_2:12 6 from 0
+DEAL:1::unpack 0_2:13 7 from 0
+DEAL:1::unpack 0_2:14 8 from 0
+DEAL:1::unpack 0_2:16 10 from 0
+DEAL:1::unpack 0_2:17 11 from 0
+DEAL:1::unpack 0_2:21 12 from 0
+DEAL:1::unpack 0_2:23 13 from 0
+DEAL:1::unpack 0_2:24 14 from 0
+DEAL:1::unpack 0_2:25 15 from 0
+DEAL:1::unpack 0_2:27 17 from 0
+DEAL:1::unpack 0_2:50 1 from 2
+DEAL:1::unpack 0_2:52 3 from 2
+DEAL:1::unpack 0_2:53 4 from 2
+DEAL:1::unpack 0_2:54 5 from 2
+DEAL:1::unpack 0_2:56 6 from 2
+DEAL:1::unpack 0_2:60 7 from 2
+DEAL:1::unpack 0_2:61 8 from 2
+DEAL:1::unpack 0_2:63 10 from 2
+DEAL:1::unpack 0_2:64 11 from 2
+DEAL:1::unpack 0_2:65 12 from 2
+DEAL:1::unpack 0_2:70 13 from 2
+DEAL:1::unpack 0_2:71 14 from 2
+DEAL:1::unpack 0_2:72 15 from 2
+DEAL:1::unpack 0_2:73 16 from 2
+DEAL:1::unpack 0_2:74 17 from 2
+
+
+DEAL:2::dim = 2
+DEAL:2::pack 0_2:30 1
+DEAL:2::pack 0_2:31 2
+DEAL:2::pack 0_2:32 3
+DEAL:2::unpack 0_2:03 3 from 0
+DEAL:2::unpack 0_2:12 2 from 1
+DEAL:2::unpack 0_2:13 3 from 1
+DEAL:2::unpack 0_2:21 5 from 1
+DEAL:2::unpack 0_2:23 6 from 1
+DEAL:2::dim = 3
+DEAL:2::pack 0_2:50 1
+DEAL:2::pack 0_2:51 2
+DEAL:2::pack 0_2:52 3
+DEAL:2::pack 0_2:53 4
+DEAL:2::pack 0_2:54 5
+DEAL:2::pack 0_2:56 6
+DEAL:2::pack 0_2:60 7
+DEAL:2::pack 0_2:61 8
+DEAL:2::pack 0_2:62 9
+DEAL:2::pack 0_2:63 10
+DEAL:2::pack 0_2:64 11
+DEAL:2::pack 0_2:65 12
+DEAL:2::pack 0_2:70 13
+DEAL:2::pack 0_2:71 14
+DEAL:2::pack 0_2:72 15
+DEAL:2::pack 0_2:73 16
+DEAL:2::pack 0_2:74 17
+DEAL:2::unpack 0_2:05 3 from 0
+DEAL:2::unpack 0_2:06 4 from 0
+DEAL:2::unpack 0_2:07 5 from 0
+DEAL:2::unpack 0_2:14 8 from 0
+DEAL:2::unpack 0_2:15 9 from 0
+DEAL:2::unpack 0_2:16 10 from 0
+DEAL:2::unpack 0_2:17 11 from 0
+DEAL:2::unpack 0_2:24 14 from 0
+DEAL:2::unpack 0_2:25 15 from 0
+DEAL:2::unpack 0_2:26 16 from 0
+DEAL:2::unpack 0_2:27 17 from 0
+DEAL:2::unpack 0_2:34 4 from 1
+DEAL:2::unpack 0_2:35 5 from 1
+DEAL:2::unpack 0_2:36 6 from 1
+DEAL:2::unpack 0_2:37 7 from 1
+DEAL:2::unpack 0_2:41 9 from 1
+DEAL:2::unpack 0_2:42 10 from 1
+DEAL:2::unpack 0_2:43 11 from 1
+DEAL:2::unpack 0_2:45 12 from 1
+DEAL:2::unpack 0_2:46 13 from 1
+DEAL:2::unpack 0_2:47 14 from 1
+

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.