]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Implements variable size serialization. 6983/head
authorMarc Fehling <marc.fehling@gmx.net>
Fri, 6 Jul 2018 21:11:33 +0000 (15:11 -0600)
committerMarc Fehling <m.fehling@fz-juelich.de>
Thu, 26 Jul 2018 17:22:17 +0000 (11:22 -0600)
include/deal.II/distributed/tria.h
source/distributed/tria.cc
tests/mpi/attach_data_02.cc
tests/mpi/p4est_save_05.cc [new file with mode: 0644]
tests/mpi/p4est_save_05.with_p4est=true.mpirun=5.output [new file with mode: 0644]

index b093783fde8c61ad2c9fe96a3f043ba19872d45c..1f09092bf257e4966d41c0e4694a09f2fee98b0e 100644 (file)
@@ -1032,9 +1032,10 @@ namespace parallel
          *
          * The data will be written in a separate file, whose name
          * consists of the stem @p filename and an attached identifier
-         * <tt>-fixed.data</tt>.
+         * <tt>-fixed.data</tt> for fixed size data and <tt>-variable.data</tt>
+         * for variable size data.
          *
-         * All processors write into that file simultaneously via MPIIO.
+         * All processors write into these files simultaneously via MPIIO.
          * Each processor's position to write to will be determined
          * from the provided @p parallel_forest.
          *
@@ -1050,10 +1051,13 @@ namespace parallel
          *
          * The data will be read from separate file, whose name
          * consists of the stem @p filename and an attached identifier
-         * <tt>-fixed.data</tt>. The @p n_attached_deserialize parameter
-         * is required to gather the memory offsets for each callback.
+         * <tt>-fixed.data</tt> for fixed size data and <tt>-variable.data</tt>
+         * for variable size data.
+         * The @p n_attached_deserialize_fixed and @p n_attached_deserialize_variable
+         * parameters are required to gather the memory offsets for each
+         * callback.
          *
-         * All processors read from that file simultaneously via MPIIO.
+         * All processors read from these files simultaneously via MPIIO.
          * Each processor's position to read from will be determined
          * from the provided @p parallel_forest.
          *
@@ -1064,7 +1068,8 @@ namespace parallel
         load(const typename dealii::internal::p4est::types<dim>::forest
                *                parallel_forest,
              const char *       filename,
-             const unsigned int n_attached_deserialize);
+             const unsigned int n_attached_deserialize_fixed,
+             const unsigned int n_attached_deserialize_variable);
 
         /**
          * Clears all containers and associated data, and resets member
index 2609b936de232fd16ed6527d1bcad682d67ff568..48988eeec5d52984df782d4ec4b930bc436210d9 100644 (file)
@@ -1788,72 +1788,160 @@ namespace parallel
         *         parallel_forest,
       const char *filename) const
     {
-      Assert(variable_size_data_stored == false, ExcNotImplemented());
+      // Large fractions of this function have been copied from
+      // DataOutInterface::write_vtu_in_parallel.
+      // TODO: Write general MPIIO interface.
 
       Assert(sizes_fixed_cumulative.size() > 0,
              ExcMessage("No data has been packed!"));
 
-      const std::string fname_fixed = std::string(filename) + "_fixed.data";
-
-      // ----- copied -----
-      // from DataOutInterface::write_vtu_in_parallel
-      // TODO: write general MPIIO interface
       const int myrank = Utilities::MPI::this_mpi_process(mpi_communicator);
 
-      MPI_Info info;
-      int      ierr = MPI_Info_create(&info);
-      AssertThrowMPI(ierr);
-      MPI_File fh;
-      ierr = MPI_File_open(mpi_communicator,
-                           const_cast<char *>(fname_fixed.c_str()),
-                           MPI_MODE_CREATE | MPI_MODE_WRONLY,
-                           info,
-                           &fh);
-      AssertThrowMPI(ierr);
-
-      ierr = MPI_File_set_size(fh, 0); // delete the file contents
-      AssertThrowMPI(ierr);
-      // this barrier is necessary, because otherwise others might already
-      // write while one core is still setting the size to zero.
-      ierr = MPI_Barrier(mpi_communicator);
-      AssertThrowMPI(ierr);
-      ierr = MPI_Info_free(&info);
-      AssertThrowMPI(ierr);
-      // ------------------
-
-      // Check if number of processors is lined up with p4est partitioning.
-      Assert(myrank < parallel_forest->mpisize, ExcInternalError());
-
-      // Write cumulative sizes to file.
-      // Since each processor owns the same information about the data sizes,
-      // it is sufficient to let only the first processor perform this task.
-      if (myrank == 0)
+      //
+      // ---------- Fixed size data ----------
+      //
+      {
+        const std::string fname_fixed = std::string(filename) + "_fixed.data";
+
+        MPI_Info info;
+        int      ierr = MPI_Info_create(&info);
+        AssertThrowMPI(ierr);
+
+        MPI_File fh;
+        ierr = MPI_File_open(mpi_communicator,
+                             fname_fixed.c_str(),
+                             MPI_MODE_CREATE | MPI_MODE_WRONLY,
+                             info,
+                             &fh);
+        AssertThrowMPI(ierr);
+
+        ierr = MPI_File_set_size(fh, 0); // delete the file contents
+        AssertThrowMPI(ierr);
+        // this barrier is necessary, because otherwise others might already
+        // write while one core is still setting the size to zero.
+        ierr = MPI_Barrier(mpi_communicator);
+        AssertThrowMPI(ierr);
+        ierr = MPI_Info_free(&info);
+        AssertThrowMPI(ierr);
+        // ------------------
+
+        // Check if number of processors is lined up with p4est partitioning.
+        Assert(myrank < parallel_forest->mpisize, ExcInternalError());
+
+        // Write cumulative sizes to file.
+        // Since each processor owns the same information about the data sizes,
+        // it is sufficient to let only the first processor perform this task.
+        if (myrank == 0)
+          {
+            ierr = MPI_File_write_at(fh,
+                                     0,
+                                     sizes_fixed_cumulative.data(),
+                                     sizes_fixed_cumulative.size(),
+                                     MPI_UNSIGNED,
+                                     MPI_STATUS_IGNORE);
+            AssertThrowMPI(ierr);
+          }
+
+        // Write packed data to file simultaneously.
+        const unsigned int offset_fixed =
+          sizes_fixed_cumulative.size() * sizeof(unsigned int);
+
+        ierr = MPI_File_write_at(
+          fh,
+          offset_fixed +
+            parallel_forest->global_first_quadrant[myrank] *
+              sizes_fixed_cumulative.back(), // global position in file
+          src_data_fixed.data(),
+          src_data_fixed.size(), // local buffer
+          MPI_CHAR,
+          MPI_STATUS_IGNORE);
+        AssertThrowMPI(ierr);
+
+        ierr = MPI_File_close(&fh);
+        AssertThrowMPI(ierr);
+      }
+
+      //
+      // ---------- Variable size data ----------
+      //
+      if (variable_size_data_stored)
         {
-          ierr = MPI_File_write_at(fh,
-                                   0,
-                                   sizes_fixed_cumulative.data(),
-                                   sizes_fixed_cumulative.size(),
-                                   MPI_UNSIGNED,
-                                   MPI_STATUS_IGNORE);
+          const std::string fname_variable =
+            std::string(filename) + "_variable.data";
+
+          const int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator);
+
+          MPI_Info info;
+          int      ierr = MPI_Info_create(&info);
           AssertThrowMPI(ierr);
-        }
 
-      // Write packed data to file simultaneously.
-      const unsigned int offset =
-        sizes_fixed_cumulative.size() * sizeof(unsigned int);
-
-      ierr = MPI_File_write_at(
-        fh,
-        offset + parallel_forest->global_first_quadrant[myrank] *
-                   sizes_fixed_cumulative.back(), // global position in file
-        src_data_fixed.data(),
-        src_data_fixed.size(), // local buffer
-        MPI_CHAR,
-        MPI_STATUS_IGNORE);
-      AssertThrowMPI(ierr);
-
-      ierr = MPI_File_close(&fh);
-      AssertThrowMPI(ierr);
+          MPI_File fh;
+          ierr = MPI_File_open(mpi_communicator,
+                               fname_variable.c_str(),
+                               MPI_MODE_CREATE | MPI_MODE_WRONLY,
+                               info,
+                               &fh);
+          AssertThrowMPI(ierr);
+
+          ierr = MPI_File_set_size(fh, 0); // delete the file contents
+          AssertThrowMPI(ierr);
+          // this barrier is necessary, because otherwise others might already
+          // write while one core is still setting the size to zero.
+          ierr = MPI_Barrier(mpi_communicator);
+          AssertThrowMPI(ierr);
+          ierr = MPI_Info_free(&info);
+          AssertThrowMPI(ierr);
+
+          // Write sizes of each cell into file simultaneously.
+          ierr =
+            MPI_File_write_at(fh,
+                              parallel_forest->global_first_quadrant[myrank] *
+                                sizeof(int), // global position in file
+                              src_sizes_variable.data(),
+                              src_sizes_variable.size(), // local buffer
+                              MPI_INT,
+                              MPI_STATUS_IGNORE);
+          AssertThrowMPI(ierr);
+
+          const unsigned int offset_variable =
+            parallel_forest->global_num_quadrants * sizeof(int);
+
+          // Gather size of data in bytes we want to store from this processor.
+          const unsigned int size_on_proc = src_data_variable.size();
+
+          // Share information among all processors.
+          std::vector<unsigned int> sizes_on_all_procs(n_procs);
+          ierr = MPI_Allgather(&size_on_proc,
+                               1,
+                               MPI_UNSIGNED,
+                               sizes_on_all_procs.data(),
+                               1,
+                               MPI_UNSIGNED,
+                               mpi_communicator);
+          AssertThrowMPI(ierr);
+
+          // Generate accumulated sum to get an offset for writing variable
+          // size data.
+          std::partial_sum(sizes_on_all_procs.begin(),
+                           sizes_on_all_procs.end(),
+                           sizes_on_all_procs.begin());
+
+          // Write data consecutively into file.
+          ierr = MPI_File_write_at(
+            fh,
+            offset_variable +
+              ((myrank == 0) ?
+                 0 :
+                 sizes_on_all_procs[myrank - 1]), // global position in file
+            src_data_variable.data(),
+            src_data_variable.size(), // local buffer
+            MPI_CHAR,
+            MPI_STATUS_IGNORE);
+          AssertThrowMPI(ierr);
+
+          ierr = MPI_File_close(&fh);
+          AssertThrowMPI(ierr);
+        }
     }
 
 
@@ -1864,68 +1952,152 @@ namespace parallel
       const typename dealii::internal::p4est::types<dim>::forest
         *                parallel_forest,
       const char *       filename,
-      const unsigned int n_attached_deserialize)
+      const unsigned int n_attached_deserialize_fixed,
+      const unsigned int n_attached_deserialize_variable)
     {
+      // Large fractions of this function have been copied from
+      // DataOutInterface::write_vtu_in_parallel.
+      // TODO: Write general MPIIO interface.
+
       Assert(dest_data_fixed.size() == 0,
              ExcMessage("Previously loaded data has not been released yet!"));
 
-      const std::string fname_fixed = std::string(filename) + "_fixed.data";
+      variable_size_data_stored = (n_attached_deserialize_variable > 0);
 
-      // ----- copied -----
-      // from DataOutInterface::write_vtu_in_parallel
-      // TODO: write general MPIIO interface
       const int myrank = Utilities::MPI::this_mpi_process(mpi_communicator);
 
-      MPI_Info info;
-      int      ierr = MPI_Info_create(&info);
-      AssertThrowMPI(ierr);
-      MPI_File fh;
-      ierr = MPI_File_open(mpi_communicator,
-                           const_cast<char *>(fname_fixed.c_str()),
-                           MPI_MODE_RDONLY,
-                           info,
-                           &fh);
-      AssertThrowMPI(ierr);
-
-      ierr = MPI_Info_free(&info);
-      AssertThrowMPI(ierr);
-      // ------------------
-
-      // Check if number of processors is lined up with p4est partitioning.
-      Assert(myrank < parallel_forest->mpisize, ExcInternalError());
-
-      // Read cumulative sizes from file.
-      // Since all processors need the same information about the data sizes,
-      // let each of them gain it by reading from the same location in the file.
-      sizes_fixed_cumulative.resize(1 + n_attached_deserialize);
-      ierr = MPI_File_read_at(fh,
-                              0,
-                              sizes_fixed_cumulative.data(),
-                              sizes_fixed_cumulative.size(),
-                              MPI_UNSIGNED,
-                              MPI_STATUS_IGNORE);
-      AssertThrowMPI(ierr);
+      //
+      // ---------- Fixed size data ----------
+      //
+      {
+        const std::string fname_fixed = std::string(filename) + "_fixed.data";
+
+        MPI_Info info;
+        int      ierr = MPI_Info_create(&info);
+        AssertThrowMPI(ierr);
+
+        MPI_File fh;
+        ierr = MPI_File_open(
+          mpi_communicator, fname_fixed.c_str(), MPI_MODE_RDONLY, info, &fh);
+        AssertThrowMPI(ierr);
+
+        ierr = MPI_Info_free(&info);
+        AssertThrowMPI(ierr);
+
+        // Check if number of processors is lined up with p4est partitioning.
+        Assert(myrank < parallel_forest->mpisize, ExcInternalError());
+
+        // Read cumulative sizes from file.
+        // Since all processors need the same information about the data sizes,
+        // let each of them retrieve it by reading from the same location in
+        // the file.
+        sizes_fixed_cumulative.resize(1 + n_attached_deserialize_fixed +
+                                      (variable_size_data_stored ? 1 : 0));
+        ierr = MPI_File_read_at(fh,
+                                0,
+                                sizes_fixed_cumulative.data(),
+                                sizes_fixed_cumulative.size(),
+                                MPI_UNSIGNED,
+                                MPI_STATUS_IGNORE);
+        AssertThrowMPI(ierr);
+
+        // Allocate sufficient memory.
+        dest_data_fixed.resize(parallel_forest->local_num_quadrants *
+                               sizes_fixed_cumulative.back());
+
+        // Read packed data from file simultaneously.
+        const unsigned int offset =
+          sizes_fixed_cumulative.size() * sizeof(unsigned int);
+
+        ierr = MPI_File_read_at(
+          fh,
+          offset + parallel_forest->global_first_quadrant[myrank] *
+                     sizes_fixed_cumulative.back(), // global position in file
+          dest_data_fixed.data(),
+          dest_data_fixed.size(), // local buffer
+          MPI_CHAR,
+          MPI_STATUS_IGNORE);
+        AssertThrowMPI(ierr);
 
-      // Allocate sufficient memory.
-      dest_data_fixed.resize(parallel_forest->local_num_quadrants *
-                             sizes_fixed_cumulative.back());
+        ierr = MPI_File_close(&fh);
+        AssertThrowMPI(ierr);
+      }
+
+      //
+      // ---------- Variable size data ----------
+      //
+      if (variable_size_data_stored)
+        {
+          const std::string fname_variable =
+            std::string(filename) + "_variable.data";
+
+          const int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator);
+
+          MPI_Info info;
+          int      ierr = MPI_Info_create(&info);
+          AssertThrowMPI(ierr);
 
-      // Read packed data from file simultaneously.
-      const unsigned int offset =
-        sizes_fixed_cumulative.size() * sizeof(unsigned int);
-
-      ierr = MPI_File_read_at(
-        fh,
-        offset + parallel_forest->global_first_quadrant[myrank] *
-                   sizes_fixed_cumulative.back(), // global position in file
-        dest_data_fixed.data(),
-        dest_data_fixed.size(), // local buffer
-        MPI_CHAR,
-        MPI_STATUS_IGNORE);
-      AssertThrowMPI(ierr);
-
-      ierr = MPI_File_close(&fh);
-      AssertThrowMPI(ierr);
+          MPI_File fh;
+          ierr = MPI_File_open(mpi_communicator,
+                               fname_variable.c_str(),
+                               MPI_MODE_RDONLY,
+                               info,
+                               &fh);
+          AssertThrowMPI(ierr);
+
+          ierr = MPI_Info_free(&info);
+          AssertThrowMPI(ierr);
+
+          // Read sizes of all locally owned cells.
+          dest_sizes_variable.resize(parallel_forest->local_num_quadrants);
+          ierr =
+            MPI_File_read_at(fh,
+                             parallel_forest->global_first_quadrant[myrank] *
+                               sizeof(int),
+                             dest_sizes_variable.data(),
+                             dest_sizes_variable.size(),
+                             MPI_INT,
+                             MPI_STATUS_IGNORE);
+          AssertThrowMPI(ierr);
+
+          const unsigned int offset =
+            parallel_forest->global_num_quadrants * sizeof(int);
+
+          const unsigned int size_on_proc =
+            std::accumulate(dest_sizes_variable.begin(),
+                            dest_sizes_variable.end(),
+                            0);
+
+          // share information among all processors
+          std::vector<unsigned int> sizes_on_all_procs(n_procs);
+          ierr = MPI_Allgather(&size_on_proc,
+                               1,
+                               MPI_UNSIGNED,
+                               sizes_on_all_procs.data(),
+                               1,
+                               MPI_UNSIGNED,
+                               mpi_communicator);
+          AssertThrowMPI(ierr);
+
+          // generate accumulated sum
+          std::partial_sum(sizes_on_all_procs.begin(),
+                           sizes_on_all_procs.end(),
+                           sizes_on_all_procs.begin());
+
+          dest_data_variable.resize(size_on_proc);
+          ierr = MPI_File_read_at(fh,
+                                  offset + ((myrank == 0) ?
+                                              0 :
+                                              sizes_on_all_procs[myrank - 1]),
+                                  dest_data_variable.data(),
+                                  dest_data_variable.size(),
+                                  MPI_CHAR,
+                                  MPI_STATUS_IGNORE);
+          AssertThrowMPI(ierr);
+
+          ierr = MPI_File_close(&fh);
+          AssertThrowMPI(ierr);
+        }
     }
 
 
@@ -2530,10 +2702,12 @@ namespace parallel
         {
           std::string   fname = std::string(filename) + ".info";
           std::ofstream f(fname.c_str());
-          f << "version nproc n_attached_objs n_coarse_cells" << std::endl
-            << 3 << " "
+          f << "version nproc n_attached_fixed_size_objs n_attached_variable_size_objs n_coarse_cells"
+            << std::endl
+            << 4 << " "
             << Utilities::MPI::n_mpi_processes(this->mpi_communicator) << " "
             << cell_attached_data.pack_callbacks_fixed.size() << " "
+            << cell_attached_data.pack_callbacks_variable.size() << " "
             << this->n_cells(0) << std::endl;
         }
 
@@ -2613,25 +2787,28 @@ namespace parallel
         connectivity);
       connectivity = nullptr;
 
-      unsigned int version, numcpus, attached_count, n_coarse_cells;
+      unsigned int version, numcpus, attached_count_fixed,
+        attached_count_variable, n_coarse_cells;
       {
         std::string   fname = std::string(filename) + ".info";
         std::ifstream f(fname.c_str());
         AssertThrow(f, ExcIO());
         std::string firstline;
         getline(f, firstline); // skip first line
-        f >> version >> numcpus >> attached_count >> n_coarse_cells;
+        f >> version >> numcpus >> attached_count_fixed >>
+          attached_count_variable >> n_coarse_cells;
       }
 
-      Assert(version == 3,
-             ExcMessage("Incompatible version found in .info file."));
+      AssertThrow(version == 4,
+                  ExcMessage("Incompatible version found in .info file."));
       Assert(this->n_cells(0) == n_coarse_cells,
              ExcMessage("Number of coarse cells differ!"));
 
       // clear all of the callback data, as explained in the documentation of
       // register_data_attach()
-      cell_attached_data.n_attached_data_sets   = 0;
-      cell_attached_data.n_attached_deserialize = attached_count;
+      cell_attached_data.n_attached_data_sets = 0;
+      cell_attached_data.n_attached_deserialize =
+        attached_count_fixed + attached_count_variable;
 
       parallel_forest = dealii::internal::p4est::functions<dim>::load_ext(
         filename,
@@ -2665,9 +2842,12 @@ namespace parallel
         }
 
       // load saved data, if any was stored
-      if (attached_count > 0)
+      if (cell_attached_data.n_attached_deserialize > 0)
         {
-          data_transfer.load(parallel_forest, filename, attached_count);
+          data_transfer.load(parallel_forest,
+                             filename,
+                             attached_count_fixed,
+                             attached_count_variable);
 
           data_transfer.unpack_cell_status(local_quadrant_cell_relations);
 
index 0c6c4f4fb8789bd229a6a8dedcf65f78b3079f4d..68d02d7602888daf5250f1c46c84f710822321c8 100644 (file)
@@ -170,7 +170,7 @@ test()
 
       unsigned int handle =
         tr.register_data_attach(pack_function<dim>,
-                                /*packs_variable_size_data=*/true);
+                                /*returns_variable_size_data=*/true);
 
       deallog << "handle=" << handle << std::endl;
       tr.execute_coarsening_and_refinement();
diff --git a/tests/mpi/p4est_save_05.cc b/tests/mpi/p4est_save_05.cc
new file mode 100644 (file)
index 0000000..e45bddb
--- /dev/null
@@ -0,0 +1,187 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+// save and load a triangulation with a different number of cpus
+// with variable size data attach
+// this is a combination of tests p4est_save_04 and attach_data_02
+
+#include <deal.II/base/tensor.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+
+#include "../tests.h"
+
+
+
+template <int dim>
+std::vector<char>
+pack_function(
+  const typename parallel::distributed::Triangulation<dim, dim>::cell_iterator
+    &cell,
+  const typename parallel::distributed::Triangulation<dim, dim>::CellStatus
+    status)
+{
+  static int                some_number = 1;
+  std::vector<unsigned int> some_vector(some_number);
+  for (unsigned int i = 0; i < some_number; ++i)
+    some_vector[i] = i;
+
+  std::vector<char> buffer;
+  buffer.reserve(some_number * sizeof(unsigned int));
+  for (auto vector_it = some_vector.cbegin(); vector_it != some_vector.cend();
+       ++vector_it)
+    {
+      Utilities::pack(*vector_it, buffer, /*allow_compression=*/false);
+    }
+
+  deallog << "packing cell " << cell->id()
+          << " with data size=" << buffer.size() << " accumulated data="
+          << std::accumulate(some_vector.begin(), some_vector.end(), 0)
+          << std::endl;
+
+  Assert((status ==
+          parallel::distributed::Triangulation<dim, dim>::CELL_PERSIST),
+         ExcInternalError());
+
+  ++some_number;
+  return buffer;
+}
+
+
+
+template <int dim>
+void
+unpack_function(
+  const typename parallel::distributed::Triangulation<dim, dim>::cell_iterator
+    &cell,
+  const typename parallel::distributed::Triangulation<dim, dim>::CellStatus
+                                                                  status,
+  const boost::iterator_range<std::vector<char>::const_iterator> &data_range)
+{
+  const unsigned int data_in_bytes =
+    std::distance(data_range.begin(), data_range.end());
+
+  std::vector<unsigned int> intdatavector(data_in_bytes / sizeof(unsigned int));
+
+  auto vector_it = intdatavector.begin();
+  auto data_it   = data_range.begin();
+  for (; data_it != data_range.end();
+       ++vector_it, data_it += sizeof(unsigned int))
+    {
+      *vector_it =
+        Utilities::unpack<unsigned int>(data_it,
+                                        data_it + sizeof(unsigned int),
+                                        /*allow_compression=*/false);
+    }
+
+  deallog << "unpacking cell " << cell->id() << " with data size="
+          << std::distance(data_range.begin(), data_range.end())
+          << " accumulated data="
+          << std::accumulate(intdatavector.begin(), intdatavector.end(), 0)
+          << std::endl;
+
+  Assert((status ==
+          parallel::distributed::Triangulation<dim, dim>::CELL_PERSIST),
+         ExcInternalError());
+}
+
+
+
+template <int dim>
+void
+test()
+{
+  unsigned int myid    = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+  MPI_Comm     com_all = MPI_COMM_WORLD;
+  MPI_Comm     com_small;
+
+  // split the communicator in proc 0,1,2 and 3,4
+  MPI_Comm_split(com_all, (myid < 3) ? 0 : 1, myid, &com_small);
+
+  // write with small com
+  if (myid < 3)
+    {
+      deallog << "writing with " << Utilities::MPI::n_mpi_processes(com_small)
+              << std::endl;
+
+      parallel::distributed::Triangulation<dim> tr(com_small);
+      GridGenerator::subdivided_hyper_cube(tr, 2);
+      tr.refine_global(1);
+
+      typename Triangulation<dim, dim>::active_cell_iterator cell;
+      for (cell = tr.begin_active(); cell != tr.end(); ++cell)
+        {
+          if (cell->is_locally_owned())
+            {
+              if (cell->id().to_string() == "0_1:0")
+                cell->set_refine_flag();
+              else if (cell->parent()->id().to_string() == "3_0:")
+                cell->set_coarsen_flag();
+            }
+        }
+      tr.execute_coarsening_and_refinement();
+
+      unsigned int handle =
+        tr.register_data_attach(pack_function<dim>,
+                                /*returns_variable_size_data=*/true);
+
+      tr.save("file");
+      deallog << "#cells = " << tr.n_global_active_cells() << std::endl;
+      deallog << "Checksum: " << tr.get_checksum() << std::endl;
+    }
+
+  MPI_Barrier(MPI_COMM_WORLD);
+
+  deallog << "reading with " << Utilities::MPI::n_mpi_processes(com_all)
+          << std::endl;
+
+  {
+    parallel::distributed::Triangulation<dim> tr(com_all);
+
+    GridGenerator::subdivided_hyper_cube(tr, 2);
+    tr.load("file");
+
+    unsigned int handle =
+      tr.register_data_attach(pack_function<dim>,
+                              /*returns_variable_size_data=*/true);
+
+    tr.notify_ready_to_unpack(handle, unpack_function<dim>);
+
+    deallog << "#cells = " << tr.n_global_active_cells() << std::endl;
+    deallog << "Checksum: " << tr.get_checksum() << std::endl;
+  }
+
+  if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
+    deallog << "OK" << std::endl;
+}
+
+
+int
+main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+  MPILogInitAll                    log;
+
+  test<2>();
+}
diff --git a/tests/mpi/p4est_save_05.with_p4est=true.mpirun=5.output b/tests/mpi/p4est_save_05.with_p4est=true.mpirun=5.output
new file mode 100644 (file)
index 0000000..a11cc69
--- /dev/null
@@ -0,0 +1,66 @@
+
+DEAL:0::writing with 3
+DEAL:0::packing cell 0_2:00 with data size=4 accumulated data=0
+DEAL:0::packing cell 0_2:01 with data size=8 accumulated data=1
+DEAL:0::packing cell 0_2:02 with data size=12 accumulated data=3
+DEAL:0::packing cell 0_2:03 with data size=16 accumulated data=6
+DEAL:0::packing cell 0_1:1 with data size=20 accumulated data=10
+DEAL:0::#cells = 16
+DEAL:0::Checksum: 2822439380
+DEAL:0::reading with 5
+DEAL:0::unpacking cell 0_2:00 with data size=4 accumulated data=0
+DEAL:0::unpacking cell 0_2:01 with data size=8 accumulated data=1
+DEAL:0::unpacking cell 0_2:02 with data size=12 accumulated data=3
+DEAL:0::unpacking cell 0_2:03 with data size=16 accumulated data=6
+DEAL:0::#cells = 16
+DEAL:0::Checksum: 2822439380
+DEAL:0::OK
+
+DEAL:1::writing with 3
+DEAL:1::packing cell 0_1:2 with data size=4 accumulated data=0
+DEAL:1::packing cell 0_1:3 with data size=8 accumulated data=1
+DEAL:1::packing cell 1_1:0 with data size=12 accumulated data=3
+DEAL:1::packing cell 1_1:1 with data size=16 accumulated data=6
+DEAL:1::packing cell 1_1:2 with data size=20 accumulated data=10
+DEAL:1::packing cell 1_1:3 with data size=24 accumulated data=15
+DEAL:1::#cells = 16
+DEAL:1::Checksum: 0
+DEAL:1::reading with 5
+DEAL:1::unpacking cell 0_1:1 with data size=20 accumulated data=10
+DEAL:1::unpacking cell 0_1:2 with data size=4 accumulated data=0
+DEAL:1::#cells = 16
+DEAL:1::Checksum: 0
+
+
+DEAL:2::writing with 3
+DEAL:2::packing cell 2_1:0 with data size=4 accumulated data=0
+DEAL:2::packing cell 2_1:1 with data size=8 accumulated data=1
+DEAL:2::packing cell 2_1:2 with data size=12 accumulated data=3
+DEAL:2::packing cell 2_1:3 with data size=16 accumulated data=6
+DEAL:2::packing cell 3_0: with data size=20 accumulated data=10
+DEAL:2::#cells = 16
+DEAL:2::Checksum: 0
+DEAL:2::reading with 5
+DEAL:2::unpacking cell 0_1:3 with data size=8 accumulated data=1
+DEAL:2::unpacking cell 1_1:0 with data size=12 accumulated data=3
+DEAL:2::unpacking cell 1_1:1 with data size=16 accumulated data=6
+DEAL:2::unpacking cell 1_1:2 with data size=20 accumulated data=10
+DEAL:2::unpacking cell 1_1:3 with data size=24 accumulated data=15
+DEAL:2::#cells = 16
+DEAL:2::Checksum: 0
+
+
+DEAL:3::reading with 5
+DEAL:3::#cells = 16
+DEAL:3::Checksum: 0
+
+
+DEAL:4::reading with 5
+DEAL:4::unpacking cell 2_1:0 with data size=4 accumulated data=0
+DEAL:4::unpacking cell 2_1:1 with data size=8 accumulated data=1
+DEAL:4::unpacking cell 2_1:2 with data size=12 accumulated data=3
+DEAL:4::unpacking cell 2_1:3 with data size=16 accumulated data=6
+DEAL:4::unpacking cell 3_0: with data size=20 accumulated data=10
+DEAL:4::#cells = 16
+DEAL:4::Checksum: 0
+

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.