]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Refactored parallel::distributed::Triangulation w.r.t. the new p4est transfer API. 6864/head
authorMarc Fehling <m.fehling@fz-juelich.de>
Tue, 3 Jul 2018 21:02:53 +0000 (15:02 -0600)
committerMarc Fehling <m.fehling@fz-juelich.de>
Fri, 6 Jul 2018 17:35:28 +0000 (11:35 -0600)
Include new p4est transfer API to wrapper functions.

Introduced class DataTransfer in private scope of Triangulation.

Let API use 'vector<char>' instead of 'void*' for buffers.

Dynamic determination of pack/unpack sizes.

14 files changed:
include/deal.II/base/quadrature_point_data.h
include/deal.II/base/utilities.h
include/deal.II/distributed/p4est_wrappers.h
include/deal.II/distributed/solution_transfer.h
include/deal.II/distributed/tria.h
include/deal.II/particles/particle_handler.h
source/distributed/p4est_wrappers.cc
source/distributed/solution_transfer.cc
source/distributed/tria.cc
source/particles/particle_handler.cc
tests/mpi/attach_data_01.cc
tests/mpi/repartition_02.cc
tests/particles/particle_handler_05.cc
tests/serialization/particle_handler_01.cc

index 2f1a2e2c163124fa27a02a23928899fd4ce81c1f..3e07cd955bdadc74df668560adef551f24676265 100644 (file)
@@ -410,11 +410,12 @@ namespace parallel
        * repartitioning.
        */
       void
-      pack_function(const typename parallel::distributed::
-                      Triangulation<dim, dim>::cell_iterator &cell,
-                    const typename parallel::distributed::
-                      Triangulation<dim, dim>::CellStatus status,
-                    void *                                data);
+      pack_function(
+        const typename parallel::distributed::Triangulation<dim>::cell_iterator
+          &cell,
+        const typename parallel::distributed::Triangulation<dim>::CellStatus
+                           status,
+        std::vector<char> &data);
 
       /**
        * A callback function used to unpack the data on the current mesh that
@@ -422,11 +423,13 @@ namespace parallel
        * coarsening and repartitioning.
        */
       void
-      unpack_function(const typename parallel::distributed::
-                        Triangulation<dim, dim>::cell_iterator &cell,
-                      const typename parallel::distributed::
-                        Triangulation<dim, dim>::CellStatus status,
-                      const void *                          data);
+      unpack_function(
+        const typename parallel::distributed::Triangulation<dim>::cell_iterator
+          &cell,
+        const typename parallel::distributed::Triangulation<dim>::CellStatus
+          status,
+        const boost::iterator_range<std::vector<char>::const_iterator>
+          data_range);
 
       /**
        * FiniteElement used to project data from and to quadrature points.
@@ -790,16 +793,13 @@ namespace parallel
       matrix_dofs.reinit(dofs_per_cell, number_of_values);
       matrix_dofs_child.reinit(dofs_per_cell, number_of_values);
       matrix_quadrature.reinit(n_q_points, number_of_values);
-      data_size_in_bytes = sizeof(double) * dofs_per_cell * number_of_values;
 
-      handle = triangulation->register_data_attach(
-        data_size_in_bytes,
-        std::bind(
-          &ContinuousQuadratureDataTransfer<dim, DataType>::pack_function,
-          this,
-          std::placeholders::_1,
-          std::placeholders::_2,
-          std::placeholders::_3));
+      handle = triangulation->register_data_attach(std::bind(
+        &ContinuousQuadratureDataTransfer<dim, DataType>::pack_function,
+        this,
+        std::placeholders::_1,
+        std::placeholders::_2,
+        std::placeholders::_3));
     }
 
 
@@ -827,21 +827,20 @@ namespace parallel
     template <int dim, typename DataType>
     void
     ContinuousQuadratureDataTransfer<dim, DataType>::pack_function(
-      const typename parallel::distributed::Triangulation<dim,
-                                                          dim>::cell_iterator
+      const typename parallel::distributed::Triangulation<dim>::cell_iterator
         &cell,
-      const typename parallel::distributed::Triangulation<dim, dim>::
-        CellStatus /*status*/,
-      void *data)
+      const typename parallel::distributed::Triangulation<
+        dim>::CellStatus /*status*/,
+      std::vector<char> &data)
     {
-      double *data_store = reinterpret_cast<double *>(data);
-
       pack_cell_data(cell, data_storage, matrix_quadrature);
 
       // project to FE
       project_to_fe_matrix.mmult(matrix_dofs, matrix_quadrature);
 
-      std::memcpy(data_store, &matrix_dofs(0, 0), data_size_in_bytes);
+      // to get consistent data sizes on each cell for the fixed size transfer,
+      // we won't allow compression
+      Utilities::pack(matrix_dofs, data, /*allow_compression=*/false);
     }
 
 
@@ -849,21 +848,21 @@ namespace parallel
     template <int dim, typename DataType>
     void
     ContinuousQuadratureDataTransfer<dim, DataType>::unpack_function(
-      const typename parallel::distributed::Triangulation<dim,
-                                                          dim>::cell_iterator
+      const typename parallel::distributed::Triangulation<dim>::cell_iterator
         &cell,
-      const typename parallel::distributed::Triangulation<dim, dim>::CellStatus
-                  status,
-      const void *data)
+      const typename parallel::distributed::Triangulation<dim>::CellStatus
+                                                                     status,
+      const boost::iterator_range<std::vector<char>::const_iterator> data_range)
     {
       Assert((status !=
               parallel::distributed::Triangulation<dim, dim>::CELL_COARSEN),
              ExcNotImplemented());
       (void)status;
 
-      const double *data_store = reinterpret_cast<const double *>(data);
-
-      std::memcpy(&matrix_dofs(0, 0), data_store, data_size_in_bytes);
+      matrix_dofs =
+        Utilities::unpack<FullMatrix<double>>(data_range.begin(),
+                                              data_range.end(),
+                                              /*allow_compression=*/false);
 
       if (cell->has_children())
         {
index 2a5f984b454cd7b186575dad100845a77d492fd3..ca25b4dad6576a1807e10e8efcfd9a8c9f00b59f 100644 (file)
@@ -40,6 +40,7 @@
 #include <boost/archive/binary_iarchive.hpp>
 #include <boost/archive/binary_oarchive.hpp>
 #include <boost/serialization/array.hpp>
+#include <boost/serialization/complex.hpp>
 #include <boost/serialization/vector.hpp>
 
 #ifdef DEAL_II_WITH_ZLIB
@@ -448,11 +449,15 @@ namespace Utilities
    * Creates and returns a buffer solely for the given object, using the
    * above mentioned pack function.
    *
+   * If the library has been compiled with ZLIB enabled, then the output buffer
+   * can be compressed. This can be triggered with the parameter
+   * @p allow_compression, and is only of effect if ZLIB is enabled.
+   *
    * @author Timo Heister, Wolfgang Bangerth, 2017.
    */
   template <typename T>
   std::vector<char>
-  pack(const T &object);
+  pack(const T &object, const bool allow_compression = true);
 
   /**
    * Given a vector of characters, obtained through a call to the function
@@ -462,6 +467,10 @@ namespace Utilities
    * from a vector of characters, and it is the inverse of the function
    * Utilities::pack().
    *
+   * The @p allow_compression parameter denotes if the buffer to
+   * read from could have been previously compressed with ZLIB, and
+   * is only of effect if ZLIB is enabled.
+   *
    * @note Since no arguments to this function depend on the template type
    *  @p T, you must manually specify the template argument when calling
    *  this function.
@@ -484,7 +493,7 @@ namespace Utilities
    */
   template <typename T>
   T
-  unpack(const std::vector<char> &buffer);
+  unpack(const std::vector<char> &buffer, const bool allow_compression = true);
 
   /**
    * Same unpack function as above, but takes constant iterators on
@@ -510,6 +519,10 @@ namespace Utilities
    * from a vector of characters, and it is the inverse of the function
    * Utilities::pack().
    *
+   * The @p allow_compression parameter denotes if the buffer to
+   * read from could have been previously compressed with ZLIB, and
+   * is only of effect if ZLIB is enabled.
+   *
    * @note This function exists due to a quirk of C++. Specifically,
    *  if you want to pack() or unpack() arrays of objects, then the
    *  following works:
@@ -534,7 +547,9 @@ namespace Utilities
    */
   template <typename T, int N>
   void
-  unpack(const std::vector<char> &buffer, T (&unpacked_object)[N]);
+  unpack(const std::vector<char> &buffer,
+         T (&unpacked_object)[N],
+         const bool allow_compression = true);
 
   /**
    * Same unpack function as above, but takes constant iterators on
@@ -1082,10 +1097,10 @@ namespace Utilities
 
   template <typename T>
   std::vector<char>
-  pack(const T &object)
+  pack(const T &object, const bool allow_compression)
   {
     std::vector<char> buffer;
-    pack<T>(object, buffer);
+    pack<T>(object, buffer, allow_compression);
     return buffer;
   }
 
@@ -1152,9 +1167,9 @@ namespace Utilities
 
   template <typename T>
   T
-  unpack(const std::vector<char> &buffer)
+  unpack(const std::vector<char> &buffer, const bool allow_compression)
   {
-    return unpack<T>(buffer.cbegin(), buffer.cend());
+    return unpack<T>(buffer.cbegin(), buffer.cend(), allow_compression);
   }
 
 
@@ -1216,9 +1231,14 @@ namespace Utilities
 
   template <typename T, int N>
   void
-  unpack(const std::vector<char> &buffer, T (&unpacked_object)[N])
+  unpack(const std::vector<char> &buffer,
+         T (&unpacked_object)[N],
+         const bool allow_compression)
   {
-    unpack<T, N>(buffer.cbegin(), buffer.cend(), unpacked_object);
+    unpack<T, N>(buffer.cbegin(),
+                 buffer.cend(),
+                 unpacked_object,
+                 allow_compression);
   }
 
 } // namespace Utilities
index 58a82a5b5c851be409c726d29aa34438ec7e97ef..3b77dfeabb6b1728f7cff0e58334a99417d77db1 100644 (file)
@@ -65,29 +65,31 @@ namespace internal
     template <>
     struct types<2>
     {
-      using connectivity = p4est_connectivity_t;
-      using forest       = p4est_t;
-      using tree         = p4est_tree_t;
-      using quadrant     = p4est_quadrant_t;
-      using topidx       = p4est_topidx_t;
-      using locidx       = p4est_locidx_t;
-      using gloidx       = p4est_gloidx_t;
-      using balance_type = p4est_connect_type_t;
-      using ghost        = p4est_ghost_t;
+      using connectivity     = p4est_connectivity_t;
+      using forest           = p4est_t;
+      using tree             = p4est_tree_t;
+      using quadrant         = p4est_quadrant_t;
+      using topidx           = p4est_topidx_t;
+      using locidx           = p4est_locidx_t;
+      using gloidx           = p4est_gloidx_t;
+      using balance_type     = p4est_connect_type_t;
+      using ghost            = p4est_ghost_t;
+      using transfer_context = p4est_transfer_context_t;
     };
 
     template <>
     struct types<3>
     {
-      using connectivity = p8est_connectivity_t;
-      using forest       = p8est_t;
-      using tree         = p8est_tree_t;
-      using quadrant     = p8est_quadrant_t;
-      using topidx       = p4est_topidx_t;
-      using locidx       = p4est_locidx_t;
-      using gloidx       = p4est_gloidx_t;
-      using balance_type = p8est_connect_type_t;
-      using ghost        = p8est_ghost_t;
+      using connectivity     = p8est_connectivity_t;
+      using forest           = p8est_t;
+      using tree             = p8est_tree_t;
+      using quadrant         = p8est_quadrant_t;
+      using topidx           = p4est_topidx_t;
+      using locidx           = p4est_locidx_t;
+      using gloidx           = p4est_gloidx_t;
+      using balance_type     = p8est_connect_type_t;
+      using ghost            = p8est_ghost_t;
+      using transfer_context = p8est_transfer_context_t;
     };
 
 
@@ -228,6 +230,46 @@ namespace internal
                 void *                                     user_data);
 
       static constexpr unsigned int max_level = P4EST_MAXLEVEL;
+
+      static void (&transfer_fixed)(const types<2>::gloidx *dest_gfq,
+                                    const types<2>::gloidx *src_gfq,
+                                    MPI_Comm                mpicomm,
+                                    int                     tag,
+                                    void *                  dest_data,
+                                    const void *            src_data,
+                                    size_t                  data_size);
+
+      static types<2>::transfer_context *(&transfer_fixed_begin)(
+        const types<2>::gloidx *dest_gfq,
+        const types<2>::gloidx *src_gfq,
+        MPI_Comm                mpicomm,
+        int                     tag,
+        void *                  dest_data,
+        const void *            src_data,
+        size_t                  data_size);
+
+      static void (&transfer_fixed_end)(types<2>::transfer_context *tc);
+
+      static void (&transfer_custom)(const types<2>::gloidx *dest_gfq,
+                                     const types<2>::gloidx *src_gfq,
+                                     MPI_Comm                mpicomm,
+                                     int                     tag,
+                                     void *                  dest_data,
+                                     const int *             dest_sizes,
+                                     const void *            src_data,
+                                     const int *             src_sizes);
+
+      static types<2>::transfer_context *(&transfer_custom_begin)(
+        const types<2>::gloidx *dest_gfq,
+        const types<2>::gloidx *src_gfq,
+        MPI_Comm                mpicomm,
+        int                     tag,
+        void *                  dest_data,
+        const int *             dest_sizes,
+        const void *            src_data,
+        const int *             src_sizes);
+
+      static void (&transfer_custom_end)(types<2>::transfer_context *tc);
     };
 
 
@@ -349,9 +391,47 @@ namespace internal
 
       static size_t (&connectivity_memory_used)(types<3>::connectivity *p4est);
 
+      static constexpr unsigned int max_level = P8EST_MAXLEVEL;
 
+      static void (&transfer_fixed)(const types<3>::gloidx *dest_gfq,
+                                    const types<3>::gloidx *src_gfq,
+                                    MPI_Comm                mpicomm,
+                                    int                     tag,
+                                    void *                  dest_data,
+                                    const void *            src_data,
+                                    size_t                  data_size);
+
+      static types<3>::transfer_context *(&transfer_fixed_begin)(
+        const types<3>::gloidx *dest_gfq,
+        const types<3>::gloidx *src_gfq,
+        MPI_Comm                mpicomm,
+        int                     tag,
+        void *                  dest_data,
+        const void *            src_data,
+        size_t                  data_size);
+
+      static void (&transfer_fixed_end)(types<3>::transfer_context *tc);
+
+      static void (&transfer_custom)(const types<3>::gloidx *dest_gfq,
+                                     const types<3>::gloidx *src_gfq,
+                                     MPI_Comm                mpicomm,
+                                     int                     tag,
+                                     void *                  dest_data,
+                                     const int *             dest_sizes,
+                                     const void *            src_data,
+                                     const int *             src_sizes);
+
+      static types<3>::transfer_context *(&transfer_custom_begin)(
+        const types<3>::gloidx *dest_gfq,
+        const types<3>::gloidx *src_gfq,
+        MPI_Comm                mpicomm,
+        int                     tag,
+        void *                  dest_data,
+        const int *             dest_sizes,
+        const void *            src_data,
+        const int *             src_sizes);
 
-      static constexpr unsigned int max_level = P8EST_MAXLEVEL;
+      static void (&transfer_custom_end)(types<3>::transfer_context *tc);
     };
 
 
index 2508af919623017c27210b267fd532c959ab85dc..72891ce1cb504636eb5a751f547fdd38513ac8da 100644 (file)
@@ -197,13 +197,6 @@ namespace parallel
       interpolate(VectorType &out);
 
 
-      /**
-       * Return the size in bytes that need to be stored per cell.
-       */
-      unsigned int
-      get_data_size() const;
-
-
       /**
        * Prepare the serialization of the given vector. The serialization is
        * done by Triangulation::save(). The given vector needs all information
@@ -267,8 +260,8 @@ namespace parallel
         const typename Triangulation<dim, DoFHandlerType::space_dimension>::
           cell_iterator &cell,
         const typename Triangulation<dim, DoFHandlerType::space_dimension>::
-          CellStatus status,
-        void *       data);
+          CellStatus       status,
+        std::vector<char> &data);
 
       /**
        * A callback function used to unpack the data on the current mesh that
@@ -280,16 +273,19 @@ namespace parallel
         const typename Triangulation<dim, DoFHandlerType::space_dimension>::
           cell_iterator &cell,
         const typename Triangulation<dim, DoFHandlerType::space_dimension>::
-          CellStatus               status,
-        const void *               data,
+          CellStatus status,
+        const boost::iterator_range<std::vector<char>::const_iterator>
+                                   data_range,
         std::vector<VectorType *> &all_out);
 
 
       /**
-       *
+       * Registers the pack_callback() function to the
+       * parallel::distributed::Triangulation that has been assigned to the
+       * DoFHandler class member and stores the returning handle.
        */
       void
-      register_data_attach(const std::size_t size);
+      register_data_attach();
     };
 
 
index 0bbe14037f9ff178ff1a12751aafd222190cb3e1..e3ffca184f2136543e1686115130991c3d797692 100644 (file)
@@ -648,9 +648,7 @@ namespace parallel
        * Each of these parties registers a call-back function (the second
        * argument here, @p pack_callback) that will be called whenever the
        * triangulation's execute_coarsening_and_refinement() or save()
-       * functions are called. The first argument here specifies the number
-       * of bytes the interest party will want to store per cell. (This
-       * needs to be a constant per cell.)
+       * functions are called.
        *
        * The current function then returns an integer handle that corresponds
        * to the number of data set that the callback provided here will attach.
@@ -672,18 +670,28 @@ namespace parallel
        * will tell you if the given cell will be coarsened, refined, or will
        * persist as is. (This status may be different than the refinement
        * or coarsening flags set on that cell, to accommodate things such as
-       * the "one hanging node per edge" rule.). Specifically, the values for
-       * this argument mean the following:
+       * the "one hanging node per edge" rule.). These flags need to be
+       * read in context with the p4est quadrant they belong to, as their
+       * relations are gathered in local_quadrant_cell_relations.
+       *
+       * Specifically, the values for this argument mean the following:
        *
        * - `CELL_PERSIST`: The cell won't be refined/coarsened, but might be
        * moved to a different processor. If this is the case, the callback
        * will want to pack up the data on this cell into an array and store
        * it at the provided address for later unpacking wherever this cell
        * may land.
-       * - `CELL_REFINE`: this cell will be refined into 4 or 8 cells (in 2d
+       * - `CELL_REFINE`: This cell will be refined into 4 or 8 cells (in 2d
        * and 3d, respectively). However, because these children don't exist
        * yet, you cannot access them at the time when the callback is
-       * called. If the callback is called with this value, the callback
+       * called. Thus, in local_quadrant_cell_relations, the corresponding
+       * p4est quadrants of the children cells are linked to the deal.II
+       * cell which is going to be refined. To be specific, only the very
+       * first child is marked with `CELL_REFINE`, whereas the others will be
+       * marked with `CELL_INVALID`, which indicates that these cells will be
+       * ignored by default during the packing or unpacking process. This
+       * ensures that data is only transfered once onto or from the parent
+       * cell. If the callback is called with `CELL_REFINE`, the callback
        * will want to pack up the data on this cell into an array and store
        * it at the provided address for later unpacking in a way so that
        * it can then be transferred to the children of the cell that will
@@ -691,7 +699,7 @@ namespace parallel
        * will want to pack up corresponds to a finite element field, then
        * the prolongation from parent to (new) children will have to happen
        * during unpacking.
-       * - `CELL_COARSEN`: the children of this cell will be coarsened into the
+       * - `CELL_COARSEN`: The children of this cell will be coarsened into the
        * given cell. These children still exist, so if this is the value
        * given to the callback as second argument, the callback will want
        * to transfer data from the children to the current parent cell and
@@ -701,15 +709,14 @@ namespace parallel
        * will want to pack up corresponds to a finite element field, then
        * it will need to do the restriction from children to parent at
        * this point.
+       * - `CELL_INVALID`: See `CELL_REFINE`.
        *
        * @note If this function is used for serialization of data
        *   using save() and load(), then the cell status argument with which
-       *   the callback is called will always `CELL_PERSIST`.
+       *   the callback is called will always be `CELL_PERSIST`.
        *
-       * The third argument given to the callback is a pointer to a memory
-       * location at which the callback can store its data. It is allowed
-       * to store exactly as many bytes as were passed to the current
-       * function via the @p size argument.
+       * The third argument given to the callback is a reference to the
+       * buffer on which the packed data will be appended at the back.
        *
        * @note The purpose of this function is to register intent to
        *   attach data for a single, subsequent call to
@@ -721,10 +728,10 @@ namespace parallel
        *   another call to these functions.
        */
       unsigned int
-      register_data_attach(const std::size_t                  size,
-                           const std::function<void(const cell_iterator &,
-                                                    const CellStatus,
-                                                    void *)> &pack_callback);
+      register_data_attach(
+        const std::function<void(const cell_iterator &,
+                                 const CellStatus,
+                                 std::vector<char> &)> &pack_callback);
 
       /**
        * This function is the opposite of register_data_attach(). It is called
@@ -752,10 +759,10 @@ namespace parallel
        *
        * The supplied callback function is then called for each newly locally
        * owned cell. The first argument to the callback is an iterator that
-       * designated the cell; the second argument indicates the status of the
-       * cell in question; and the third argument points to a memory area that
-       * contains the data that was previously saved from the callback provided
-       * to register_data_attach().
+       * designates the cell; the second argument indicates the status of the
+       * cell in question; and the third argument localizes a memory area by
+       * two iterators that contains the data that was previously saved from
+       * the callback provided to register_data_attach().
        *
        * The CellStatus will indicate if the cell was refined, coarsened, or
        * persisted unchanged. The @p cell_iterator argument to the callback
@@ -776,10 +783,12 @@ namespace parallel
        */
       void
       notify_ready_to_unpack(
-        const unsigned int                       handle,
-        const std::function<void(const cell_iterator &,
-                                 const CellStatus,
-                                 const void *)> &unpack_callback);
+        const unsigned int handle,
+        const std::function<
+          void(const cell_iterator &,
+               const CellStatus,
+               const boost::iterator_range<std::vector<char>::const_iterator>)>
+          &unpack_callback);
 
       /**
        * Return a permutation vector for the order the coarse cells are handed
@@ -867,19 +876,11 @@ namespace parallel
        */
       struct CellAttachedData
       {
-        /**
-         * Cumulative size in bytes of the buffers that those functions that
-         * have called register_data_attach() want to attach to each cell.
-         * This number only pertains to fixed-sized buffers where the data
-         * attached to each cell has exactly the same size.
-         */
-        unsigned int cumulative_fixed_data_size;
-
         /**
          * number of functions that get attached to the Triangulation through
          * register_data_attach() for example SolutionTransfer.
          */
-        unsigned int n_attached_datas;
+        unsigned int n_attached_data_sets;
 
         /**
          * number of functions that need to unpack their data after a call from
@@ -890,17 +891,13 @@ namespace parallel
         using pack_callback_t = std::function<
           void(typename Triangulation<dim, spacedim>::cell_iterator,
                CellStatus,
-               void *)>;
+               std::vector<char> &)>;
 
         /**
-         * List of callback functions registered by register_data_attach() that
-         * are going to be called for packing data. The callback functions
-         * objects will be stored together with their offset at which each
-         * callback is allowed to write into the per-cell buffer (counted in
-         * bytes). These pairs will be stored in the order on how they have been
-         * registered.
+         * These callback functions will be stored in the order on how they have
+         * been registered with the register_data_attach() function.
          */
-        std::vector<std::pair<unsigned int, pack_callback_t>> pack_callbacks;
+        std::vector<pack_callback_t> pack_callbacks;
       };
 
       CellAttachedData cell_attached_data;
@@ -909,7 +906,7 @@ namespace parallel
        * This auxiliary data structure stores the relation between a p4est
        * quadrant, a deal.II cell and its current CellStatus. For an extensive
        * description of the latter, see the documentation for the member
-       * function register_data_attach.
+       * function register_data_attach().
        */
       using quadrant_cell_relation_t = typename std::tuple<
         typename dealii::internal::p4est::types<dim>::quadrant *,
@@ -938,6 +935,162 @@ namespace parallel
       void
       update_quadrant_cell_relations();
 
+      /**
+       * This class in the private scope of parallel::distributed::Triangulation
+       * is dedicated to the data transfer across repartitioned meshes
+       * and to the file system.
+       *
+       * It is designed to store all data buffers intended for transfer
+       * separated from the parallel_forest and to interface with p4est
+       * where it is absolutely necessary.
+       */
+      class DataTransfer
+      {
+      public:
+        DataTransfer(MPI_Comm mpi_communicator);
+
+        /**
+         * Prepare any data transfer by calling the pack callback function of
+         * each entry of @p pack_callback_sets on each cell registered in
+         * @p quad_cell_relations.
+         */
+        void
+        pack_data(
+          const std::vector<quadrant_cell_relation_t> &quad_cell_relations,
+          const std::vector<typename CellAttachedData::pack_callback_t>
+            &pack_callbacks);
+
+        /**
+         * Transfer data across forests.
+         *
+         * Besides the actual @parallel_forest, which has been already refined
+         * and repartitioned, this function also needs information about its
+         * previous state, i.e. the locally owned intervals in p4est's
+         * sc_array of each processor. These information need to be memcopyied
+         * out of the old p4est object and provided via the parameter
+         * @p previous_global_first_quadrant.
+         *
+         * Data has to be previously packed with pack_data().
+         */
+        void
+        execute_transfer(
+          const typename dealii::internal::p4est::types<dim>::forest
+            *parallel_forest,
+          const typename dealii::internal::p4est::types<dim>::gloidx
+            *previous_global_first_quadrant);
+
+        /**
+         * Unpack the CellStatus information on each entry of
+         * @p quad_cell_relations.
+         *
+         * Data has to be previously transferred with execute_transfer()
+         * or read from the file system via load().
+         */
+        void
+        unpack_cell_status(
+          std::vector<quadrant_cell_relation_t> &quad_cell_relations) const;
+
+        /**
+         * Unpack previously transferred data on each cell registered in
+         * @p quad_cell_relations with the provided @p unpack_callback function.
+         *
+         * The parameter @p handle corresponds to the position where the
+         * @p unpack_callback function is allowed to read from the memory. Its
+         * value needs to be in accordance with the corresponding pack_callback
+         * function that has been registered previously.
+         *
+         * Data has to be previously transferred with execute_transfer()
+         * or read from the file system via load().
+         */
+        void
+        unpack_data(
+          const std::vector<quadrant_cell_relation_t> &quad_cell_relations,
+          const unsigned int                           handle,
+          const std::function<void(
+            const typename dealii::Triangulation<dim, spacedim>::cell_iterator
+              &,
+            const typename dealii::Triangulation<dim, spacedim>::CellStatus &,
+            const boost::iterator_range<std::vector<char>::const_iterator>)>
+            &unpack_callback) const;
+
+        /**
+         * Transfer data to file system.
+         *
+         * The data will be written in a separate file, whose name
+         * consists of the stem @p filename and an attached identifier
+         * <tt>-fixed.data</tt>.
+         *
+         * All processors write into that file simultaneously via MPIIO.
+         * Each processor's position to write to will be determined
+         * from the provided @p parallel_forest.
+         *
+         * Data has to be previously packed with pack_data().
+         */
+        void
+        save(const typename dealii::internal::p4est::types<dim>::forest
+               *         parallel_forest,
+             const char *filename) const;
+
+        /**
+         * Transfer data from file system.
+         *
+         * The data will be read from separate file, whose name
+         * consists of the stem @p filename and an attached identifier
+         * <tt>-fixed.data</tt>. The @p n_attached_deserialize parameter
+         * is required to gather the memory offsets for each callback.
+         *
+         * All processors read from that file simultaneously via MPIIO.
+         * Each processor's position to read from will be determined
+         * from the provided @p parallel_forest.
+         *
+         * After loading, unpack_data() needs to be called to finally
+         * distribute data across the associated triangulation.
+         */
+        void
+        load(const typename dealii::internal::p4est::types<dim>::forest
+               *                parallel_forest,
+             const char *       filename,
+             const unsigned int n_attached_deserialize);
+
+        /**
+         * Clears all containers and associated data, and resets member
+         * values to their default state.
+         *
+         * Frees memory completely.
+         */
+        void
+        clear();
+
+      private:
+        MPI_Comm mpi_communicator;
+
+        /**
+         * Cumulative size in bytes that those functions that have called
+         * register_data_attach() want to attach to each cell. This number
+         * only pertains to fixed-sized buffers where the data attached to
+         * each cell has exactly the same size.
+         *
+         * The last entry of this container corresponds to the data size
+         * packed per cell in the fixed size buffer (which can be accessed
+         * calling <tt>cumulative_sizes_fixed.back()</tt>).
+         */
+        std::vector<unsigned int> cumulative_sizes_fixed;
+
+        /**
+         * Consecutive buffers designed for the fixed size transfer
+         * functions of p4est.
+         */
+        std::vector<char> src_data_fixed;
+        std::vector<char> dest_data_fixed;
+
+        // TODO: buffers for p4est variable size transfer
+        //       std::vector<std::vector<size_t>> cumulative_sizes_var_per_cell;
+        //       std::vector<size_t> src_sizes_var, dest_sizes_var;
+        //       std::vector<char> src_data_var, dest_data_var;
+      };
+
+      DataTransfer data_transfer;
+
       /**
        * Two arrays that store which p4est tree corresponds to which coarse
        * grid cell and vice versa. We need these arrays because p4est goes
@@ -988,27 +1141,6 @@ namespace parallel
       void
       copy_local_forest_to_triangulation();
 
-      /**
-       * Internal function notifying all registered classes to attach their
-       * data before repartitioning occurs. Called from
-       * execute_coarsening_and_refinement() and save(). The function
-       * recursively visits all deal.II cells and corresponding p4est
-       * quadrants and calls the callbacks registered via
-       * register_data_attach() on the ones where data needs to be
-       * stored.
-       *
-       * This function is odd in that it is called on a p4est triangulation
-       * and a deal.II triangulation that may not be in sync when it is called
-       * from execute_coarsening_and_refinement(). Specifically, the p4est
-       * trees have already been refined and coarsened, but the deal.II
-       * triangulation has not. Consequently, when walking the two recursively,
-       * it can reason about which cells will remain after the deal.II
-       * triangulation has been brought up to date with regard to the p4est
-       * trees.
-       */
-      void
-      attach_mesh_data();
-
       /**
        * Internal function notifying all registered slots to provide their
        * weights before repartitioning occurs. Called from
@@ -1157,11 +1289,10 @@ namespace parallel
        */
       unsigned int
       register_data_attach(
-        const std::size_t size,
         const std::function<void(
           const typename dealii::Triangulation<1, spacedim>::cell_iterator &,
           const typename dealii::Triangulation<1, spacedim>::CellStatus,
-          void *)> &      pack_callback);
+          std::vector<char> &)> &pack_callback);
 
       /**
        * This function is not implemented, but needs to be present for the
@@ -1169,11 +1300,12 @@ namespace parallel
        */
       void
       notify_ready_to_unpack(
-        const unsigned int offset,
+        const unsigned int handle,
         const std::function<void(
           const typename dealii::Triangulation<1, spacedim>::cell_iterator &,
           const typename dealii::Triangulation<1, spacedim>::CellStatus,
-          const void *)> & unpack_callback);
+          const boost::iterator_range<std::vector<char>::const_iterator>)>
+          &unpack_callback);
 
       /**
        * Dummy arrays. This class isn't usable but the compiler wants to see
index 47df14e26d1198b3f821bb1b0c202d3140d35c7d..0b6f8963fc427f42b434b1b723975380364c9a3f 100644 (file)
@@ -342,11 +342,11 @@ namespace Particles
 
     /**
      * Callback function that should be called before every refinement
-     * and when writing checkpoints.  This function is used to
+     * and when writing checkpoints. This function is used to
      * register store_particles() with the triangulation.
      */
     void
-    register_store_callback_function(const bool serialization);
+    register_store_callback_function();
 
     /**
      * Callback function that should be called after every refinement
@@ -511,7 +511,7 @@ namespace Particles
     store_particles(
       const typename Triangulation<dim, spacedim>::cell_iterator &cell,
       const typename Triangulation<dim, spacedim>::CellStatus     status,
-      void *                                                      data) const;
+      std::vector<char> &                                         data) const;
 
     /**
      * Called by listener functions after a refinement step. The local map
@@ -521,7 +521,8 @@ namespace Particles
     load_particles(
       const typename Triangulation<dim, spacedim>::cell_iterator &cell,
       const typename Triangulation<dim, spacedim>::CellStatus     status,
-      const void *                                                data);
+      const boost::iterator_range<std::vector<char>::const_iterator>
+        data_range);
   };
 
   /* ---------------------- inline and template functions ------------------ */
index 86b721501219cca345b3a67f9bb80b94cd552307..ba7dfebc5b28be18105887006e799318dcc7d55e 100644 (file)
@@ -509,6 +509,50 @@ namespace internal
 
     constexpr unsigned int functions<2>::max_level;
 
+    void (&functions<2>::transfer_fixed)(const types<2>::gloidx *dest_gfq,
+                                         const types<2>::gloidx *src_gfq,
+                                         MPI_Comm                mpicomm,
+                                         int                     tag,
+                                         void *                  dest_data,
+                                         const void *            src_data,
+                                         size_t                  data_size) =
+      p4est_transfer_fixed;
+
+    types<2>::transfer_context *(&functions<2>::transfer_fixed_begin)(
+      const types<2>::gloidx *dest_gfq,
+      const types<2>::gloidx *src_gfq,
+      MPI_Comm                mpicomm,
+      int                     tag,
+      void *                  dest_data,
+      const void *            src_data,
+      size_t                  data_size) = p4est_transfer_fixed_begin;
+
+    void (&functions<2>::transfer_fixed_end)(types<2>::transfer_context *tc) =
+      p4est_transfer_fixed_end;
+
+    void (&functions<2>::transfer_custom)(const types<2>::gloidx *dest_gfq,
+                                          const types<2>::gloidx *src_gfq,
+                                          MPI_Comm                mpicomm,
+                                          int                     tag,
+                                          void *                  dest_data,
+                                          const int *             dest_sizes,
+                                          const void *            src_data,
+                                          const int *             src_sizes) =
+      p4est_transfer_custom;
+
+    types<2>::transfer_context *(&functions<2>::transfer_custom_begin)(
+      const types<2>::gloidx *dest_gfq,
+      const types<2>::gloidx *src_gfq,
+      MPI_Comm                mpicomm,
+      int                     tag,
+      void *                  dest_data,
+      const int *             dest_sizes,
+      const void *            src_data,
+      const int *             src_sizes) = p4est_transfer_custom_begin;
+
+    void (&functions<2>::transfer_custom_end)(types<2>::transfer_context *tc) =
+      p4est_transfer_custom_end;
+
 
 
     int (&functions<3>::quadrant_compare)(const void *v1, const void *v2) =
@@ -652,6 +696,50 @@ namespace internal
 
     constexpr unsigned int functions<3>::max_level;
 
+    void (&functions<3>::transfer_fixed)(const types<3>::gloidx *dest_gfq,
+                                         const types<3>::gloidx *src_gfq,
+                                         MPI_Comm                mpicomm,
+                                         int                     tag,
+                                         void *                  dest_data,
+                                         const void *            src_data,
+                                         size_t                  data_size) =
+      p8est_transfer_fixed;
+
+    types<3>::transfer_context *(&functions<3>::transfer_fixed_begin)(
+      const types<3>::gloidx *dest_gfq,
+      const types<3>::gloidx *src_gfq,
+      MPI_Comm                mpicomm,
+      int                     tag,
+      void *                  dest_data,
+      const void *            src_data,
+      size_t                  data_size) = p8est_transfer_fixed_begin;
+
+    void (&functions<3>::transfer_fixed_end)(types<3>::transfer_context *tc) =
+      p8est_transfer_fixed_end;
+
+    void (&functions<3>::transfer_custom)(const types<3>::gloidx *dest_gfq,
+                                          const types<3>::gloidx *src_gfq,
+                                          MPI_Comm                mpicomm,
+                                          int                     tag,
+                                          void *                  dest_data,
+                                          const int *             dest_sizes,
+                                          const void *            src_data,
+                                          const int *             src_sizes) =
+      p8est_transfer_custom;
+
+    types<3>::transfer_context *(&functions<3>::transfer_custom_begin)(
+      const types<3>::gloidx *dest_gfq,
+      const types<3>::gloidx *src_gfq,
+      MPI_Comm                mpicomm,
+      int                     tag,
+      void *                  dest_data,
+      const int *             dest_sizes,
+      const void *            src_data,
+      const int *             src_sizes) = p8est_transfer_custom_begin;
+
+    void (&functions<3>::transfer_custom_end)(types<3>::transfer_context *tc) =
+      p8est_transfer_custom_end;
+
 
 
     template <int dim>
index 05e60d74610f5b830faddfe12c4754da5fa6b297..add27f1cdd9bd5b82cf8b398a287fd59caf80c39 100644 (file)
@@ -67,18 +67,15 @@ namespace parallel
         const std::vector<const VectorType *> &all_in)
     {
       input_vectors = all_in;
-      register_data_attach(get_data_size() * input_vectors.size());
+      register_data_attach();
     }
 
 
 
     template <int dim, typename VectorType, typename DoFHandlerType>
     void
-    SolutionTransfer<dim, VectorType, DoFHandlerType>::register_data_attach(
-      const std::size_t size)
+    SolutionTransfer<dim, VectorType, DoFHandlerType>::register_data_attach()
     {
-      Assert(size > 0, ExcMessage("Please transfer at least one vector!"));
-
       // TODO: casting away constness is bad
       parallel::distributed::Triangulation<dim, DoFHandlerType::space_dimension>
         *tria = (dynamic_cast<parallel::distributed::Triangulation<
@@ -88,14 +85,12 @@ namespace parallel
                        *>(&dof_handler->get_triangulation())));
       Assert(tria != nullptr, ExcInternalError());
 
-      handle = tria->register_data_attach(
-        size,
-        std::bind(
-          &SolutionTransfer<dim, VectorType, DoFHandlerType>::pack_callback,
-          this,
-          std::placeholders::_1,
-          std::placeholders::_2,
-          std::placeholders::_3));
+      handle = tria->register_data_attach(std::bind(
+        &SolutionTransfer<dim, VectorType, DoFHandlerType>::pack_callback,
+        this,
+        std::placeholders::_1,
+        std::placeholders::_2,
+        std::placeholders::_3));
     }
 
 
@@ -148,7 +143,7 @@ namespace parallel
     SolutionTransfer<dim, VectorType, DoFHandlerType>::deserialize(
       std::vector<VectorType *> &all_in)
     {
-      register_data_attach(get_data_size() * all_in.size());
+      register_data_attach();
 
       // this makes interpolate() happy
       input_vectors.resize(all_in.size());
@@ -206,15 +201,6 @@ namespace parallel
 
 
 
-    template <int dim, typename VectorType, typename DoFHandlerType>
-    unsigned int
-    SolutionTransfer<dim, VectorType, DoFHandlerType>::get_data_size() const
-    {
-      return sizeof(typename VectorType::value_type) *
-             DoFTools::max_dofs_per_cell(*dof_handler);
-    }
-
-
     template <int dim, typename VectorType, typename DoFHandlerType>
     void
     SolutionTransfer<dim, VectorType, DoFHandlerType>::pack_callback(
@@ -222,30 +208,31 @@ namespace parallel
         cell_iterator &cell_,
       const typename Triangulation<dim, DoFHandlerType::space_dimension>::
         CellStatus /*status*/,
-      void *data)
+      std::vector<char> &data)
     {
-      typename VectorType::value_type *data_store =
-        reinterpret_cast<typename VectorType::value_type *>(data);
-
       typename DoFHandlerType::cell_iterator cell(*cell_, dof_handler);
 
       const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell;
-      ::dealii::Vector<typename VectorType::value_type> dofvalues(
-        dofs_per_cell);
-      for (typename std::vector<const VectorType *>::iterator it =
-             input_vectors.begin();
-           it != input_vectors.end();
-           ++it)
+
+      // create buffer for each individual object
+      std::vector<::dealii::Vector<typename VectorType::value_type>> dofvalues(
+        input_vectors.size());
+
+      auto cit_input = input_vectors.cbegin();
+      auto it_output = dofvalues.begin();
+      for (; cit_input != input_vectors.cend(); ++cit_input, ++it_output)
         {
-          cell->get_interpolated_dof_values(*(*it), dofvalues);
-          std::memcpy(data_store,
-                      &dofvalues(0),
-                      sizeof(typename VectorType::value_type) * dofs_per_cell);
-          data_store += dofs_per_cell;
+          it_output->reinit(dofs_per_cell);
+          cell->get_interpolated_dof_values(*(*cit_input), *it_output);
         }
+
+      // to get consistent data sizes on each cell for the fixed size transfer,
+      // we won't allow compression
+      Utilities::pack(dofvalues, data, /*allow_compression=*/false);
     }
 
 
+
     template <int dim, typename VectorType, typename DoFHandlerType>
     void
     SolutionTransfer<dim, VectorType, DoFHandlerType>::unpack_callback(
@@ -253,26 +240,44 @@ namespace parallel
         cell_iterator &cell_,
       const typename Triangulation<dim, DoFHandlerType::space_dimension>::
         CellStatus /*status*/,
-      const void *               data,
-      std::vector<VectorType *> &all_out)
+      const boost::iterator_range<std::vector<char>::const_iterator> data_range,
+      std::vector<VectorType *> &                                    all_out)
     {
       typename DoFHandlerType::cell_iterator cell(*cell_, dof_handler);
 
       const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell;
-      ::dealii::Vector<typename VectorType::value_type> dofvalues(
-        dofs_per_cell);
-      const typename VectorType::value_type *data_store =
-        reinterpret_cast<const typename VectorType::value_type *>(data);
 
-      for (typename std::vector<VectorType *>::iterator it = all_out.begin();
-           it != all_out.end();
-           ++it)
+      std::vector<::dealii::Vector<typename VectorType::value_type>> dofvalues =
+        Utilities::unpack<
+          std::vector<::dealii::Vector<typename VectorType::value_type>>>(
+          data_range.begin(), data_range.end(), /*allow_compression=*/false);
+
+      // check if sizes match
+      Assert(dofvalues.size() == all_out.size(), ExcInternalError());
+
+      // check if we have enough dofs provided by the FE object
+      // to interpolate the transferred data correctly
+      for (auto it_dofvalues = dofvalues.begin();
+           it_dofvalues != dofvalues.end();
+           ++it_dofvalues)
+        Assert(dofs_per_cell <= it_dofvalues->size(),
+               ExcMessage(
+                 "The transferred data was packed on fewer dofs than the"
+                 "currently registered FE object assigned to the DoFHandler."));
+
+      // distribute data for each registered vector on mesh
+      auto it_input  = dofvalues.begin();
+      auto it_output = all_out.begin();
+      for (; it_input != dofvalues.end(); ++it_input, ++it_output)
         {
-          std::memcpy(&dofvalues(0),
-                      data_store,
-                      sizeof(typename VectorType::value_type) * dofs_per_cell);
-          cell->set_dof_values_by_interpolation(dofvalues, *(*it));
-          data_store += dofs_per_cell;
+          // Adjust size of vector to the order of current FE object
+          // associated with the registered DofHandler.
+          // Fixes the following tests:
+          //  - p4est_2d_constraintmatrix_04.*.debug
+          //  - p4est_3d_constraintmatrix_03.*.debug
+          it_input->reinit(dofs_per_cell, /*omit_zeroing_entries=*/true);
+
+          cell->set_dof_values_by_interpolation(*it_input, *(*it_output));
         }
     }
 
index 069268e8757957234516f31145a8f489f56e0538..c4b959893be0ff5630ba35a62a492da6902d9fb4 100644 (file)
@@ -1124,10 +1124,544 @@ namespace
 } // namespace
 
 
+
 namespace parallel
 {
   namespace distributed
   {
+    /* ------------------ class DataTransfer<dim,spacedim> ----------------- */
+
+
+    template <int dim, int spacedim>
+    Triangulation<dim, spacedim>::DataTransfer::DataTransfer(
+      MPI_Comm mpi_communicator)
+      : mpi_communicator(mpi_communicator)
+    {}
+
+
+
+    template <int dim, int spacedim>
+    void
+    Triangulation<dim, spacedim>::DataTransfer::pack_data(
+      const std::vector<quadrant_cell_relation_t> &quad_cell_relations,
+      const std::vector<typename CellAttachedData::pack_callback_t>
+        &pack_callbacks)
+    {
+      Assert(src_data_fixed.size() == 0,
+             ExcMessage("Previously packed data has not been released yet!"));
+
+      // Prepare the buffer structure, in which each callback function will
+      // store its data for each active cell.
+      // The outmost shell in this container construct corresponds to the
+      // data packed per cell. The next layer resembles the data that
+      // each callback function packs on the corresponding cell. These
+      // buffers are chains of chars stored in an std::vector<char>.
+      // A visualisation of the data structure:
+      /* clang-format off */
+      // |             cell_1                | |             cell_2                | ...
+      // ||  callback_1  ||  callback_2  |...| ||  callback_1  ||  callback_2  |...| ...
+      // |||char|char|...|||char|char|...|...| |||char|char|...|||char|char|...|...| ...
+      /* clang-format on */
+      std::vector<std::vector<std::vector<char>>> packed_data(
+        quad_cell_relations.size());
+
+      // Iterate over all cells, call all callback functions on each cell,
+      // and store their data in the corresponding buffer scope.
+      {
+        auto quad_cell_rel_it = quad_cell_relations.cbegin();
+        auto data_cell_it     = packed_data.begin();
+        for (; quad_cell_rel_it != quad_cell_relations.cend();
+             ++quad_cell_rel_it, ++data_cell_it)
+          {
+            const auto &cell_status = std::get<1>(*quad_cell_rel_it);
+            const auto &dealii_cell = std::get<2>(*quad_cell_rel_it);
+
+            // Assertions about the tree structure.
+            switch (cell_status)
+              {
+                case parallel::distributed::Triangulation<dim, spacedim>::
+                  CELL_PERSIST:
+                case parallel::distributed::Triangulation<dim, spacedim>::
+                  CELL_REFINE:
+                  // double check the condition that we will only ever attach
+                  // data to active cells when we get here
+                  Assert(dealii_cell->active(), ExcInternalError());
+                  break;
+
+                case parallel::distributed::Triangulation<dim, spacedim>::
+                  CELL_COARSEN:
+                  // double check the condition that we will only ever attach
+                  // data to cells with children when we get here. however, we
+                  // can only tolerate one level of coarsening at a time, so
+                  // check that the children are all active
+                  Assert(dealii_cell->active() == false, ExcInternalError());
+                  for (unsigned int c = 0;
+                       c < GeometryInfo<dim>::max_children_per_cell;
+                       ++c)
+                    Assert(dealii_cell->child(c)->active(), ExcInternalError());
+                  break;
+
+                case parallel::distributed::Triangulation<dim, spacedim>::
+                  CELL_INVALID:
+                  // do nothing on invalid cells
+                  break;
+
+                default:
+                  Assert(false, ExcInternalError());
+                  break;
+              }
+
+            // Reserve memory corresponding to the number of callback
+            // functions that will be called.
+            // On cells flagged with CELL_INVALID, only its CellStatus
+            // will be stored.
+            const unsigned int n_callbacks_on_cell =
+              1 +
+              ((cell_status ==
+                parallel::distributed::Triangulation<dim,
+                                                     spacedim>::CELL_INVALID) ?
+                 0 :
+                 pack_callbacks.size());
+            data_cell_it->resize(n_callbacks_on_cell);
+
+            // We continue with packing all data on this specific cell.
+            // First, we pack the CellStatus information.
+            auto data_it = data_cell_it->begin();
+            // to get consistent data sizes on each cell for the fixed size
+            // transfer, we won't allow compression
+            *data_it =
+              Utilities::pack(cell_status, /*allow_compression=*/false);
+            ++data_it;
+
+            // Proceed with all registered callback functions.
+            // Skip cells with the CELL_INVALID flag.
+            if (cell_status !=
+                parallel::distributed::Triangulation<dim,
+                                                     spacedim>::CELL_INVALID)
+              {
+                for (auto callback_it = pack_callbacks.cbegin();
+                     callback_it != pack_callbacks.cend();
+                     ++callback_it, ++data_it)
+                  {
+                    (*callback_it)(dealii_cell, cell_status, *data_it);
+                  }
+              }
+          } // loop over quad_cell_relations
+      }
+
+      // Generate a vector which stores the sizes of each callback function,
+      // including the packed CellStatus transfer.
+      // Find the very first cell that we wrote to with all callback
+      // functions (i.e. a cell that was not flagged with CELL_INVALID)
+      // and store the sizes of each buffer.
+      //
+      // To deal with the case that at least one of the processors does not own
+      // any cell at all, we will exchange the information about the data sizes
+      // among them later. The code inbetween is still well-defined, since the
+      // following loops will be skipped.
+      std::vector<unsigned int> local_sizes_fixed(1 + pack_callbacks.size());
+      for (auto data_cell_it = packed_data.cbegin();
+           data_cell_it != packed_data.cend();
+           ++data_cell_it)
+        {
+          if (data_cell_it->size() == local_sizes_fixed.size())
+            {
+              auto sizes_fixed_it = local_sizes_fixed.begin();
+              auto data_it        = data_cell_it->cbegin();
+              for (; data_it != data_cell_it->cend();
+                   ++data_it, ++sizes_fixed_it)
+                {
+                  *sizes_fixed_it = data_it->size();
+                }
+
+              break;
+            }
+        }
+
+      // Check if all cells have valid sizes.
+      for (auto data_cell_it = packed_data.cbegin();
+           data_cell_it != packed_data.cend();
+           ++data_cell_it)
+        {
+          Assert(data_cell_it->size() == 1 ||
+                   data_cell_it->size() == local_sizes_fixed.size(),
+                 ExcInternalError());
+        }
+
+      // Share information about the packed data sizes
+      // of all callback functions across all processors, in case one
+      // of them does not own any cells at all.
+      std::vector<unsigned int> global_sizes_fixed(local_sizes_fixed.size());
+      Utilities::MPI::max(local_sizes_fixed,
+                          this->mpi_communicator,
+                          global_sizes_fixed);
+
+      // Construct cumulative sizes, since this is the only information
+      // we need from now on.
+      cumulative_sizes_fixed.resize(global_sizes_fixed.size());
+      std::partial_sum(global_sizes_fixed.begin(),
+                       global_sizes_fixed.end(),
+                       cumulative_sizes_fixed.begin());
+
+      // Move every piece of packed data into the consecutive buffer.
+      src_data_fixed.reserve(quad_cell_relations.size() *
+                             cumulative_sizes_fixed.back());
+      for (auto data_cell_it = packed_data.begin();
+           data_cell_it != packed_data.end();
+           ++data_cell_it)
+        {
+          // Move every fraction of packed data into the buffer
+          // reserved for this particular cell.
+          for (auto data_it = data_cell_it->begin();
+               data_it != data_cell_it->end();
+               ++data_it)
+            std::move(data_it->begin(),
+                      data_it->end(),
+                      std::back_inserter(src_data_fixed));
+
+          // If we only packed the CellStatus information
+          // (i.e. encountered a cell flagged CELL_INVALID),
+          // fill the remaining space with invalid entries.
+          if (data_cell_it->size() == 1)
+            {
+              const std::size_t bytes_skipped =
+                cumulative_sizes_fixed.back() - cumulative_sizes_fixed.front();
+
+              src_data_fixed.insert(src_data_fixed.end(),
+                                    bytes_skipped,
+                                    static_cast<char>(-1)); // invalid_char
+            }
+        }
+
+      // Double check that we packed everything correctly.
+      Assert(src_data_fixed.size() == src_data_fixed.capacity(),
+             ExcInternalError());
+    }
+
+
+
+    template <int dim, int spacedim>
+    void
+    Triangulation<dim, spacedim>::DataTransfer::execute_transfer(
+      const typename dealii::internal::p4est::types<dim>::forest
+        *parallel_forest,
+      const typename dealii::internal::p4est::types<dim>::gloidx
+        *previous_global_first_quadrant)
+    {
+      Assert(cumulative_sizes_fixed.size() > 0,
+             ExcMessage("No data has been packed!"));
+
+      // Resize memory according to the data that we will receive.
+      dest_data_fixed.resize(parallel_forest->local_num_quadrants *
+                             cumulative_sizes_fixed.back());
+
+      // Execute fixed size transfer.
+      dealii::internal::p4est::functions<dim>::transfer_fixed(
+        parallel_forest->global_first_quadrant,
+        previous_global_first_quadrant,
+        parallel_forest->mpicomm,
+        0,
+        dest_data_fixed.data(),
+        src_data_fixed.data(),
+        cumulative_sizes_fixed.back());
+
+      // Release memory of previously packed data.
+      src_data_fixed.clear();
+      src_data_fixed.shrink_to_fit();
+
+      // TODO: for variable transfer
+      // allocate memory for transfer
+      // p4est transfer fixed_begin for transfer of var data sizes
+      // p4est transfer custom for transfer of var data
+    }
+
+
+
+    template <int dim, int spacedim>
+    void
+    Triangulation<dim, spacedim>::DataTransfer::unpack_cell_status(
+      std::vector<quadrant_cell_relation_t> &quad_cell_relations) const
+    {
+      Assert(cumulative_sizes_fixed.size() > 0,
+             ExcMessage("No data has been packed!"));
+      Assert((quad_cell_relations.size() == 0) || (dest_data_fixed.size() > 0),
+             ExcMessage("No data has been received!"));
+
+      // Size of CellStatus object that will be unpacked on each cell.
+      const std::size_t size = sizeof(
+        typename parallel::distributed::Triangulation<dim,
+                                                      spacedim>::CellStatus);
+
+      // Iterate over all cells and overwrite the CellStatus
+      // information from the transferred data.
+      // Proceed buffer iterator position to next cell after
+      // each iteration.
+      auto quad_cell_rel_it = quad_cell_relations.begin();
+      auto dest_fixed_it    = dest_data_fixed.cbegin();
+      for (; quad_cell_rel_it != quad_cell_relations.end();
+           ++quad_cell_rel_it, dest_fixed_it += cumulative_sizes_fixed.back())
+        {
+          std::get<1>(*quad_cell_rel_it) = // cell_status
+            Utilities::unpack<typename parallel::distributed::
+                                Triangulation<dim, spacedim>::CellStatus>(
+              dest_fixed_it,
+              dest_fixed_it + size,
+              /*allow_compression=*/false);
+        }
+    }
+
+
+
+    template <int dim, int spacedim>
+    void
+    Triangulation<dim, spacedim>::DataTransfer::unpack_data(
+      const std::vector<quadrant_cell_relation_t> &quad_cell_relations,
+      const unsigned int                           handle,
+      const std::function<void(
+        const typename dealii::Triangulation<dim, spacedim>::cell_iterator &,
+        const typename dealii::Triangulation<dim, spacedim>::CellStatus &,
+        const boost::iterator_range<std::vector<char>::const_iterator>)>
+        &unpack_callback) const
+    {
+      Assert(cumulative_sizes_fixed.size() > 0,
+             ExcMessage("No data has been packed!"));
+      Assert((quad_cell_relations.size() == 0) || (dest_data_fixed.size() > 0),
+             ExcMessage("No data has been received!"));
+
+      // Calculate offset and size from cumulated sizes.
+      const unsigned int offset = cumulative_sizes_fixed[handle];
+      const unsigned int size   = cumulative_sizes_fixed[handle + 1] - offset;
+
+      // Iterate over all cells and unpack the transferred data.
+      // Adjust buffer iterator to the offset of the callback
+      // function and advance its position to the next cell
+      // after each iteration.
+      auto quad_cell_rel_it = quad_cell_relations.begin();
+      auto dest_fixed_it    = dest_data_fixed.cbegin() + offset;
+      for (; quad_cell_rel_it != quad_cell_relations.end();
+           ++quad_cell_rel_it, dest_fixed_it += cumulative_sizes_fixed.back())
+        {
+          const auto &cell_status = std::get<1>(*quad_cell_rel_it);
+          const auto &dealii_cell = std::get<2>(*quad_cell_rel_it);
+
+          switch (cell_status)
+            {
+              case parallel::distributed::Triangulation<dim,
+                                                        spacedim>::CELL_PERSIST:
+              case parallel::distributed::Triangulation<dim,
+                                                        spacedim>::CELL_COARSEN:
+                unpack_callback(dealii_cell,
+                                cell_status,
+                                boost::make_iterator_range(dest_fixed_it,
+                                                           dest_fixed_it +
+                                                             size));
+                break;
+
+              case parallel::distributed::Triangulation<dim,
+                                                        spacedim>::CELL_REFINE:
+                unpack_callback(dealii_cell->parent(),
+                                cell_status,
+                                boost::make_iterator_range(dest_fixed_it,
+                                                           dest_fixed_it +
+                                                             size));
+                break;
+
+              case parallel::distributed::Triangulation<dim,
+                                                        spacedim>::CELL_INVALID:
+                // Skip this cell.
+                break;
+
+              default:
+                Assert(false, ExcInternalError());
+                break;
+            }
+        }
+    }
+
+
+
+    template <int dim, int spacedim>
+    void
+    Triangulation<dim, spacedim>::DataTransfer::save(
+      const typename dealii::internal::p4est::types<dim>::forest
+        *         parallel_forest,
+      const char *filename) const
+    {
+      Assert(cumulative_sizes_fixed.size() > 0,
+             ExcMessage("No data has been packed!"));
+
+      const std::string fname_fixed = std::string(filename) + "_fixed.data";
+
+      // ----- copied -----
+      // from DataOutInterface::write_vtu_parallel
+      // TODO: write general MPIIO interface
+      int myrank, nproc;
+      int ierr = MPI_Comm_rank(mpi_communicator, &myrank);
+      AssertThrowMPI(ierr);
+      ierr = MPI_Comm_size(mpi_communicator, &nproc);
+      AssertThrowMPI(ierr);
+
+      MPI_Info info;
+      ierr = MPI_Info_create(&info);
+      AssertThrowMPI(ierr);
+      MPI_File fh;
+      ierr = MPI_File_open(mpi_communicator,
+                           const_cast<char *>(fname_fixed.c_str()),
+                           MPI_MODE_CREATE | MPI_MODE_WRONLY,
+                           info,
+                           &fh);
+      AssertThrowMPI(ierr);
+
+      ierr = MPI_File_set_size(fh, 0); // delete the file contents
+      AssertThrowMPI(ierr);
+      // this barrier is necessary, because otherwise others might already
+      // write while one core is still setting the size to zero.
+      ierr = MPI_Barrier(mpi_communicator);
+      AssertThrowMPI(ierr);
+      ierr = MPI_Info_free(&info);
+      AssertThrowMPI(ierr);
+      // ------------------
+
+      // Check if number of processors is lined up with p4est partitioning.
+      Assert(myrank < parallel_forest->mpisize, ExcInternalError());
+
+      // Write cumulative sizes to file.
+      // Since each processor owns the same information about the data sizes,
+      // it is sufficient to let only the first processor perform this task.
+      if (myrank == 0)
+        {
+          ierr = MPI_File_write_at(fh,
+                                   0,
+                                   cumulative_sizes_fixed.data(),
+                                   cumulative_sizes_fixed.size(),
+                                   MPI_UNSIGNED,
+                                   MPI_STATUS_IGNORE);
+          AssertThrowMPI(ierr);
+        }
+
+      // Write packed data to file simultaneously.
+      const unsigned int offset =
+        cumulative_sizes_fixed.size() * sizeof(unsigned int);
+
+      ierr = MPI_File_write_at(
+        fh,
+        offset + parallel_forest->global_first_quadrant[myrank] *
+                   cumulative_sizes_fixed.back(), // global position in file
+        src_data_fixed.data(),
+        src_data_fixed.size(), // local buffer
+        MPI_CHAR,
+        MPI_STATUS_IGNORE);
+      AssertThrowMPI(ierr);
+
+      ierr = MPI_File_close(&fh);
+      AssertThrowMPI(ierr);
+    }
+
+
+
+    template <int dim, int spacedim>
+    void
+    Triangulation<dim, spacedim>::DataTransfer::load(
+      const typename dealii::internal::p4est::types<dim>::forest
+        *                parallel_forest,
+      const char *       filename,
+      const unsigned int n_attached_deserialize)
+    {
+      Assert(dest_data_fixed.size() == 0,
+             ExcMessage("Previously loaded data has not been released yet!"));
+
+      const std::string fname_fixed = std::string(filename) + "_fixed.data";
+
+      // ----- copied -----
+      // from DataOutInterface::write_vtu_parallel
+      // TODO: write general MPIIO interface
+      int myrank, nproc;
+      int ierr = MPI_Comm_rank(mpi_communicator, &myrank);
+      AssertThrowMPI(ierr);
+      ierr = MPI_Comm_size(mpi_communicator, &nproc);
+      AssertThrowMPI(ierr);
+
+      MPI_Info info;
+      ierr = MPI_Info_create(&info);
+      AssertThrowMPI(ierr);
+      MPI_File fh;
+      ierr = MPI_File_open(mpi_communicator,
+                           const_cast<char *>(fname_fixed.c_str()),
+                           MPI_MODE_RDONLY,
+                           info,
+                           &fh);
+      AssertThrowMPI(ierr);
+
+      ierr = MPI_Info_free(&info);
+      AssertThrowMPI(ierr);
+      // ------------------
+
+      // Check if number of processors is lined up with p4est partitioning.
+      Assert(myrank < parallel_forest->mpisize, ExcInternalError());
+
+      // Read cumulative sizes from file.
+      // Since all processors need the same information about the data sizes,
+      // let each of them gain it by reading from the same location in the file.
+      cumulative_sizes_fixed.resize(1 + n_attached_deserialize);
+      ierr = MPI_File_read_at(fh,
+                              0,
+                              cumulative_sizes_fixed.data(),
+                              cumulative_sizes_fixed.size(),
+                              MPI_UNSIGNED,
+                              MPI_STATUS_IGNORE);
+      AssertThrowMPI(ierr);
+
+      // Allocate sufficient memory.
+      dest_data_fixed.resize(parallel_forest->local_num_quadrants *
+                             cumulative_sizes_fixed.back());
+
+      // Read packed data from file simultaneously.
+      const unsigned int offset =
+        cumulative_sizes_fixed.size() * sizeof(unsigned int);
+
+      ierr = MPI_File_read_at(
+        fh,
+        offset + parallel_forest->global_first_quadrant[myrank] *
+                   cumulative_sizes_fixed.back(), // global position in file
+        dest_data_fixed.data(),
+        dest_data_fixed.size(), // local buffer
+        MPI_CHAR,
+        MPI_STATUS_IGNORE);
+      AssertThrowMPI(ierr);
+
+      ierr = MPI_File_close(&fh);
+      AssertThrowMPI(ierr);
+    }
+
+
+
+    template <int dim, int spacedim>
+    void
+    Triangulation<dim, spacedim>::DataTransfer::clear()
+    {
+      cumulative_sizes_fixed.clear();
+      cumulative_sizes_fixed.shrink_to_fit();
+
+      src_data_fixed.clear();
+      src_data_fixed.shrink_to_fit();
+
+      dest_data_fixed.clear();
+      dest_data_fixed.shrink_to_fit();
+
+      // TODO: for variable transfer
+      //      src_sizes_var.clear();
+      //      src_sizes_var.shrink_to_fit();
+      //      src_data_var.clear();
+      //      src_data_var.shrink_to_fit();
+      //
+      //      dest_sizes_var.clear();
+      //      dest_sizes_var.shrink_to_fit();
+      //      dest_data_var.clear();
+      //      dest_data_var.shrink_to_fit();
+    }
+
+
+
     /* ----------------- class Triangulation<dim,spacedim> ----------------- */
 
 
@@ -1153,7 +1687,8 @@ namespace parallel
       , triangulation_has_content(false)
       , connectivity(nullptr)
       , parallel_forest(nullptr)
-      , cell_attached_data({0, 0, 0, {}})
+      , cell_attached_data({0, 0, {}})
+      , data_transfer(mpi_communicator)
     {
       parallel_ghost = nullptr;
     }
@@ -1689,34 +2224,46 @@ namespace parallel
 
       if (this->my_subdomain == 0)
         {
-          const unsigned int buffer_size_per_cell =
-            (cell_attached_data.cumulative_fixed_data_size > 0 ?
-               cell_attached_data.cumulative_fixed_data_size +
-                 sizeof(CellStatus) :
-               0);
-
           std::string   fname = std::string(filename) + ".info";
           std::ofstream f(fname.c_str());
-          f << "version nproc attached_bytes n_attached_objs n_coarse_cells"
-            << std::endl
-            << 2 << " "
+          f << "version nproc n_attached_objs n_coarse_cells" << std::endl
+            << 3 << " "
             << Utilities::MPI::n_mpi_processes(this->mpi_communicator) << " "
-            << buffer_size_per_cell << " "
             << cell_attached_data.pack_callbacks.size() << " "
             << this->n_cells(0) << std::endl;
         }
 
-      if (cell_attached_data.cumulative_fixed_data_size > 0)
+      // each cell should have been flagged `CELL_PERSIST`
+      for (const auto &quad_cell_rel : local_quadrant_cell_relations)
         {
-          const_cast<
-            dealii::parallel::distributed::Triangulation<dim, spacedim> *>(this)
-            ->attach_mesh_data();
+          (void)quad_cell_rel;
+          Assert(
+            (std::get<1>(quad_cell_rel) == // cell_status
+             parallel::distributed::Triangulation<dim, spacedim>::CELL_PERSIST),
+            ExcInternalError());
         }
 
-      dealii::internal::p4est::functions<dim>::save(
-        filename,
-        parallel_forest,
-        cell_attached_data.cumulative_fixed_data_size > 0);
+      if (cell_attached_data.n_attached_data_sets > 0)
+        {
+          // cast away constness
+          auto tria = const_cast<
+            dealii::parallel::distributed::Triangulation<dim, spacedim> *>(
+            this);
+
+          // pack attached data first
+          tria->data_transfer.pack_data(local_quadrant_cell_relations,
+                                        cell_attached_data.pack_callbacks);
+
+          // then store buffers in file
+          tria->data_transfer.save(parallel_forest, filename);
+
+          // and release the memory afterwards
+          tria->data_transfer.clear();
+        }
+
+      dealii::internal::p4est::functions<dim>::save(filename,
+                                                    parallel_forest,
+                                                    false);
 
       // clear all of the callback data, as explained in the documentation of
       // register_data_attach()
@@ -1726,18 +2273,9 @@ namespace parallel
             dealii::parallel::distributed::Triangulation<dim, spacedim> *>(
             this);
 
-        tria->cell_attached_data.n_attached_datas           = 0;
-        tria->cell_attached_data.cumulative_fixed_data_size = 0;
+        tria->cell_attached_data.n_attached_data_sets = 0;
         tria->cell_attached_data.pack_callbacks.clear();
       }
-
-      // and release the data
-      void *userptr = parallel_forest->user_pointer;
-      dealii::internal::p4est::functions<dim>::reset_data(parallel_forest,
-                                                          0,
-                                                          nullptr,
-                                                          nullptr);
-      parallel_forest->user_pointer = userptr;
     }
 
 
@@ -1769,34 +2307,31 @@ namespace parallel
         connectivity);
       connectivity = nullptr;
 
-      unsigned int version, numcpus, attached_size, attached_count,
-        n_coarse_cells;
+      unsigned int version, numcpus, attached_count, n_coarse_cells;
       {
         std::string   fname = std::string(filename) + ".info";
         std::ifstream f(fname.c_str());
         AssertThrow(f, ExcIO());
         std::string firstline;
         getline(f, firstline); // skip first line
-        f >> version >> numcpus >> attached_size >> attached_count >>
-          n_coarse_cells;
+        f >> version >> numcpus >> attached_count >> n_coarse_cells;
       }
 
-      Assert(version == 2,
+      Assert(version == 3,
              ExcMessage("Incompatible version found in .info file."));
       Assert(this->n_cells(0) == n_coarse_cells,
              ExcMessage("Number of coarse cells differ!"));
 
       // clear all of the callback data, as explained in the documentation of
       // register_data_attach()
-      cell_attached_data.cumulative_fixed_data_size = 0;
-      cell_attached_data.n_attached_datas           = 0;
-      cell_attached_data.n_attached_deserialize     = attached_count;
+      cell_attached_data.n_attached_data_sets   = 0;
+      cell_attached_data.n_attached_deserialize = attached_count;
 
       parallel_forest = dealii::internal::p4est::functions<dim>::load_ext(
         filename,
         this->mpi_communicator,
-        attached_size,
-        attached_size > 0,
+        0,
+        false,
         autopartition,
         0,
         this,
@@ -1823,6 +2358,25 @@ namespace parallel
           Assert(false, ExcInternalError());
         }
 
+      // load saved data, if any was stored
+      if (attached_count > 0)
+        {
+          data_transfer.load(parallel_forest, filename, attached_count);
+
+          data_transfer.unpack_cell_status(local_quadrant_cell_relations);
+
+          // the CellStatus of all stored cells should always be CELL_PERSIST.
+          for (const auto &quad_cell_rel : local_quadrant_cell_relations)
+            {
+              (void)quad_cell_rel;
+              Assert(
+                (std::get<1>(quad_cell_rel) == // cell_status
+                 parallel::distributed::Triangulation<dim,
+                                                      spacedim>::CELL_PERSIST),
+                ExcInternalError());
+            }
+        }
+
       this->update_number_cache();
 
       // signal that de-serialization is finished
@@ -2866,9 +3420,27 @@ namespace parallel
       // has happened, we need to update the quadrant cell relations
       update_quadrant_cell_relations();
 
-      // before repartitioning the mesh let others attach mesh related info
-      // (such as SolutionTransfer data) to the p4est
-      attach_mesh_data();
+      // before repartitioning the mesh, store the current distribution
+      // of the p4est quadrants and let others attach mesh related info
+      // (such as SolutionTransfer data)
+      std::vector<typename dealii::internal::p4est::types<dim>::gloidx>
+        previous_global_first_quadrant;
+
+      // pack data only if anything has been attached
+      if (cell_attached_data.n_attached_data_sets > 0)
+        {
+          data_transfer.pack_data(local_quadrant_cell_relations,
+                                  cell_attached_data.pack_callbacks);
+
+          // before repartitioning the p4est object, save a copy of the
+          // positions of the global first quadrants for data transfer later
+          previous_global_first_quadrant.resize(parallel_forest->mpisize + 1);
+          std::memcpy(previous_global_first_quadrant.data(),
+                      parallel_forest->global_first_quadrant,
+                      sizeof(
+                        typename dealii::internal::p4est::types<dim>::gloidx) *
+                        (parallel_forest->mpisize + 1));
+        }
 
       if (!(settings & no_automatic_repartitioning))
         {
@@ -2897,6 +3469,9 @@ namespace parallel
                 /* weight_callback */
                 &PartitionWeights<dim, spacedim>::cell_weight);
 
+              // release data
+              dealii::internal::p4est::functions<dim>::reset_data(
+                parallel_forest, 0, nullptr, nullptr);
               // reset the user pointer to its previous state
               parallel_forest->user_pointer = this;
             }
@@ -2925,6 +3500,18 @@ namespace parallel
           Assert(false, ExcInternalError());
         }
 
+      // transfer data
+      // only if anything has been attached
+      if (cell_attached_data.n_attached_data_sets > 0)
+        {
+          // execute transfer after triangulation got updated
+          data_transfer.execute_transfer(parallel_forest,
+                                         previous_global_first_quadrant.data());
+
+          // also update the CellStatus information on the new mesh
+          data_transfer.unpack_cell_status(local_quadrant_cell_relations);
+        }
+
 #  ifdef DEBUG
       // Check that we know the level subdomain ids of all our neighbors. This
       // also involves coarser cells that share a vertex if they are active.
@@ -3010,7 +3597,24 @@ namespace parallel
 
       // before repartitioning the mesh let others attach mesh related info
       // (such as SolutionTransfer data) to the p4est
-      attach_mesh_data();
+      std::vector<typename dealii::internal::p4est::types<dim>::gloidx>
+        previous_global_first_quadrant;
+
+      // pack data only if anything has been attached
+      if (cell_attached_data.n_attached_data_sets > 0)
+        {
+          data_transfer.pack_data(local_quadrant_cell_relations,
+                                  cell_attached_data.pack_callbacks);
+
+          // before repartitioning the p4est object, save a copy of the
+          // positions of quadrant for data transfer later
+          previous_global_first_quadrant.resize(parallel_forest->mpisize + 1);
+          std::memcpy(previous_global_first_quadrant.data(),
+                      parallel_forest->global_first_quadrant,
+                      sizeof(
+                        typename dealii::internal::p4est::types<dim>::gloidx) *
+                        (parallel_forest->mpisize + 1));
+        }
 
       if (this->signals.cell_weight.num_slots() == 0)
         {
@@ -3054,6 +3658,15 @@ namespace parallel
           Assert(false, ExcInternalError());
         }
 
+      // transfer data
+      // only if anything has been attached
+      if (cell_attached_data.n_attached_data_sets > 0)
+        {
+          // execute transfer after triangulation got updated
+          data_transfer.execute_transfer(parallel_forest,
+                                         previous_global_first_quadrant.data());
+        }
+
       // update how many cells, edges, etc, we store locally
       this->update_number_cache();
       this->update_periodic_face_map();
@@ -3235,32 +3848,13 @@ namespace parallel
     template <int dim, int spacedim>
     unsigned int
     Triangulation<dim, spacedim>::register_data_attach(
-      const std::size_t size,
-      const std::function<void(const cell_iterator &, const CellStatus, void *)>
-        &pack_callback)
+      const std::function<void(const cell_iterator &,
+                               const CellStatus,
+                               std::vector<char> &)> &pack_callback)
     {
-#  ifdef DEBUG
-      Assert(size > 0, ExcMessage("register_data_attach(), size==0"));
-
-      unsigned int remaining_callback_counter = 0;
-      for (auto it : cell_attached_data.pack_callbacks)
-        if (std::get<0>(it) != numbers::invalid_unsigned_int)
-          ++remaining_callback_counter;
-
-      Assert(
-        remaining_callback_counter == cell_attached_data.n_attached_datas,
-        ExcMessage(
-          "register_data_attach(), not all data has been unpacked last time?"));
-#  endif
-
-      // add new pair with offset and pack_callback
-      cell_attached_data.pack_callbacks.push_back(
-        {cell_attached_data.cumulative_fixed_data_size + sizeof(CellStatus),
-         pack_callback});
-
-      // increase counters
-      ++cell_attached_data.n_attached_datas;
-      cell_attached_data.cumulative_fixed_data_size += size;
+      // add new callback_set
+      cell_attached_data.pack_callbacks.push_back(pack_callback);
+      ++cell_attached_data.n_attached_data_sets;
 
       // return the position of the callback in the register vector as a handle
       return cell_attached_data.pack_callbacks.size() - 1;
@@ -3271,14 +3865,17 @@ namespace parallel
     template <int dim, int spacedim>
     void
     Triangulation<dim, spacedim>::notify_ready_to_unpack(
-      const unsigned int                       handle,
-      const std::function<void(const cell_iterator &,
-                               const CellStatus,
-                               const void *)> &unpack_callback)
+      const unsigned int handle,
+      const std::function<
+        void(const cell_iterator &,
+             const CellStatus,
+             const boost::iterator_range<std::vector<char>::const_iterator>)>
+        &unpack_callback)
     {
+#  ifdef DEBUG
       Assert(handle < cell_attached_data.pack_callbacks.size(),
              ExcMessage("Invalid handle."));
-      Assert(cell_attached_data.n_attached_datas > 0,
+      Assert(cell_attached_data.n_attached_data_sets > 0,
              ExcMessage("The notify_ready_to_unpack() has already been called "
                         "once for each registered callback."));
 
@@ -3288,82 +3885,42 @@ namespace parallel
                static_cast<unsigned int>(parallel_forest->local_num_quadrants),
              ExcInternalError());
 
-      const unsigned int offset =
-        std::get<0>(cell_attached_data.pack_callbacks[handle]);
-      // check if unpack function has been previously called
-      Assert(offset != dealii::numbers::invalid_unsigned_int,
+      // deregister pack_callback function
+      // first reset with invalid entries to preserve ambiguity of
+      // handles, then free memory when all were unpacked (see below)
+      // consider_cell_invalid is irrelevant in this context
+      Assert(cell_attached_data.pack_callbacks[handle] != nullptr,
              ExcInternalError());
+      cell_attached_data.pack_callbacks[handle] = nullptr;
+#  endif
 
-      for (const auto &it_rel : local_quadrant_cell_relations)
-        {
-          const auto &q       = std::get<0>(it_rel);
-          const auto &cell_it = std::get<2>(it_rel);
-
-          const auto cell_status =
-            *static_cast<typename parallel::distributed::
-                           Triangulation<dim, spacedim>::CellStatus *>(
-              q->p.user_data);
-
-          void *ptr = static_cast<char *>(q->p.user_data) + offset;
-
-          switch (cell_status)
-            {
-              case parallel::distributed::Triangulation<dim,
-                                                        spacedim>::CELL_PERSIST:
-              case parallel::distributed::Triangulation<dim,
-                                                        spacedim>::CELL_COARSEN:
-                unpack_callback(cell_it, cell_status, ptr);
-                break;
-
-              case parallel::distributed::Triangulation<dim,
-                                                        spacedim>::CELL_REFINE:
-                unpack_callback(cell_it->parent(), cell_status, ptr);
-                break;
-
-              case parallel::distributed::Triangulation<dim,
-                                                        spacedim>::CELL_INVALID:
-                // do nothing on these cells
-                break;
+      // perform unpacking
+      data_transfer.unpack_data(local_quadrant_cell_relations,
+                                handle,
+                                unpack_callback);
 
-              default:
-                Assert(false, ExcInternalError());
-                break;
-            }
-        }
-
-      --cell_attached_data.n_attached_datas;
+      // decrease counters
+      --cell_attached_data.n_attached_data_sets;
       if (cell_attached_data.n_attached_deserialize > 0)
         --cell_attached_data.n_attached_deserialize;
 
-      // deregister corresponding pack_callback function
-      // first reset with invalid entries to preserve ambiguity of handles,
-      // then free memory when all were unpacked (see below)
-      std::get<0>(cell_attached_data.pack_callbacks[handle]) // offset value
-        = dealii::numbers::invalid_unsigned_int;
-      std::get<1>(
-        cell_attached_data.pack_callbacks[handle]) // pack_callback function
-        = typename CellAttachedData::pack_callback_t();
-
       // important: only remove data if we are not in the deserialization
       // process. There, each SolutionTransfer registers and unpacks before
-      // the next one does this, so n_attached_datas is only 1 here.  This
+      // the next one does this, so n_attached_data_sets is only 1 here.  This
       // would destroy the saved data before the second SolutionTransfer can
       // get it. This created a bug that is documented in
       // tests/mpi/p4est_save_03 with more than one SolutionTransfer.
-      if (cell_attached_data.n_attached_datas == 0 &&
+      if (cell_attached_data.n_attached_data_sets == 0 &&
           cell_attached_data.n_attached_deserialize == 0)
         {
           // everybody got their data, time for cleanup!
-          cell_attached_data.cumulative_fixed_data_size = 0;
           cell_attached_data.pack_callbacks.clear();
+          data_transfer.clear();
 
-          // and release the data
-          void *userptr = parallel_forest->user_pointer;
-          dealii::internal::p4est::functions<dim>::reset_data(parallel_forest,
-                                                              0,
-                                                              nullptr,
-                                                              nullptr);
-          parallel_forest->user_pointer = userptr;
+          // reset all cell_status entries after coarsening/refinement
+          for (auto &quad_cell_rel : local_quadrant_cell_relations)
+            std::get<1>(quad_cell_rel) =
+              parallel::distributed::Triangulation<dim, spacedim>::CELL_PERSIST;
         }
     }
 
@@ -3658,13 +4215,12 @@ namespace parallel
         MemoryConsumption::memory_consumption(connectivity) +
         MemoryConsumption::memory_consumption(parallel_forest) +
         MemoryConsumption::memory_consumption(
-          cell_attached_data.cumulative_fixed_data_size) +
+          cell_attached_data.n_attached_data_sets) +
+        // MemoryConsumption::memory_consumption(cell_attached_data.pack_callbacks)
+        // +
+        // TODO[TH]: how?
         MemoryConsumption::memory_consumption(
-          cell_attached_data.n_attached_datas)
-        //      + MemoryConsumption::memory_consumption(pack_callbacks)
-        //      //TODO[TH]: how?
-        + MemoryConsumption::memory_consumption(
-            coarse_cell_to_p4est_tree_permutation) +
+          coarse_cell_to_p4est_tree_permutation) +
         MemoryConsumption::memory_consumption(
           p4est_tree_to_coarse_cell_permutation) +
         memory_consumption_p4est();
@@ -3724,10 +4280,8 @@ namespace parallel
             other_tria_x->coarse_cell_to_p4est_tree_permutation;
           p4est_tree_to_coarse_cell_permutation =
             other_tria_x->p4est_tree_to_coarse_cell_permutation;
-          cell_attached_data.cumulative_fixed_data_size =
-            other_tria_x->cell_attached_data.cumulative_fixed_data_size;
-          cell_attached_data.n_attached_datas =
-            other_tria_x->cell_attached_data.n_attached_datas;
+          cell_attached_data = other_tria_x->cell_attached_data;
+          data_transfer      = other_tria_x->data_transfer;
 
           settings = other_tria_x->settings;
         }
@@ -3792,94 +4346,6 @@ namespace parallel
 
 
 
-    template <int dim, int spacedim>
-    void
-    Triangulation<dim, spacedim>::attach_mesh_data()
-    {
-      // check if there is anything at all to do here
-      if (cell_attached_data.cumulative_fixed_data_size == 0)
-        {
-          Assert(cell_attached_data.n_attached_datas == 0, ExcInternalError());
-          return;
-        }
-
-      // check if local_quadrant_cell_relations have been previously gathered
-      // correctly
-      Assert(local_quadrant_cell_relations.size() ==
-               static_cast<unsigned int>(parallel_forest->local_num_quadrants),
-             ExcInternalError());
-
-      // reallocate user_data in p4est
-      void *userptr = parallel_forest->user_pointer;
-      dealii::internal::p4est::functions<dim>::reset_data(
-        parallel_forest,
-        cell_attached_data.cumulative_fixed_data_size + sizeof(CellStatus),
-        nullptr,
-        nullptr);
-      parallel_forest->user_pointer = userptr;
-
-      for (const auto &it_rel : local_quadrant_cell_relations)
-        {
-          const auto &q           = std::get<0>(it_rel);
-          const auto &cell_status = std::get<1>(it_rel);
-          const auto &cell_it     = std::get<2>(it_rel);
-
-          *static_cast<typename parallel::distributed::
-                         Triangulation<dim, spacedim>::CellStatus *>(
-            q->p.user_data) = cell_status;
-
-          switch (cell_status)
-            {
-              case parallel::distributed::Triangulation<dim,
-                                                        spacedim>::CELL_PERSIST:
-              case parallel::distributed::Triangulation<dim,
-                                                        spacedim>::CELL_COARSEN:
-              case parallel::distributed::Triangulation<dim,
-                                                        spacedim>::CELL_REFINE:
-#  ifdef DEBUG
-                if (cell_status == parallel::distributed::
-                                     Triangulation<dim, spacedim>::CELL_COARSEN)
-                  {
-                    // double check the condition that we will only ever attach
-                    // data to cells with children when we get here. however, we
-                    // can only tolerate one level of coarsening at a time, so
-                    // check that the children are all active
-                    Assert(cell_it->active() == false, ExcInternalError());
-                    for (unsigned int c = 0;
-                         c < GeometryInfo<dim>::max_children_per_cell;
-                         ++c)
-                      Assert(cell_it->child(c)->active(), ExcInternalError());
-                  }
-                else
-                  {
-                    // double check the condition that we will only ever attach
-                    // data to active cells when we get here
-                    Assert(cell_it->active(), ExcInternalError());
-                  }
-#  endif
-                // call all callbacks
-                for (const auto &it_pack : cell_attached_data.pack_callbacks)
-                  {
-                    void *ptr = static_cast<char *>(q->p.user_data) +
-                                it_pack.first; // add offset
-                    (it_pack.second)(cell_it, cell_status, ptr);
-                  }
-                break;
-
-              case parallel::distributed::Triangulation<dim,
-                                                        spacedim>::CELL_INVALID:
-                // do nothing on these cells
-                break;
-
-              default:
-                Assert(false, ExcInternalError());
-                break;
-            }
-        }
-    }
-
-
-
     template <int dim, int spacedim>
     std::vector<unsigned int>
     Triangulation<dim, spacedim>::get_cell_weights() const
@@ -3904,10 +4370,10 @@ namespace parallel
       // Note that we need to follow the p4est ordering
       // instead of the deal.II ordering to get the cell_weights
       // in the same order p4est will encounter them during repartitioning.
-      for (const auto &it_rel : local_quadrant_cell_relations)
+      for (const auto &quad_cell_rel : local_quadrant_cell_relations)
         {
-          const auto &cell_status = std::get<1>(it_rel);
-          const auto &cell_it     = std::get<2>(it_rel);
+          const auto &cell_status = std::get<1>(quad_cell_rel);
+          const auto &cell_it     = std::get<2>(quad_cell_rel);
 
           switch (cell_status)
             {
@@ -3992,11 +4458,10 @@ namespace parallel
     template <int spacedim>
     unsigned int
     Triangulation<1, spacedim>::register_data_attach(
-      const std::size_t /*size*/,
       const std::function<
         void(const typename dealii::Triangulation<1, spacedim>::cell_iterator &,
              const typename dealii::Triangulation<1, spacedim>::CellStatus,
-             void *)> & /*pack_callback*/)
+             std::vector<char> &)> & /*pack_callback*/)
     {
       Assert(false, ExcNotImplemented());
       return 0;
@@ -4007,11 +4472,12 @@ namespace parallel
     template <int spacedim>
     void
     Triangulation<1, spacedim>::notify_ready_to_unpack(
-      const unsigned int /*offset*/,
+      const unsigned int /*handle*/,
       const std::function<
         void(const typename dealii::Triangulation<1, spacedim>::cell_iterator &,
              const typename dealii::Triangulation<1, spacedim>::CellStatus,
-             const void *)> & /*unpack_callback*/)
+             const boost::iterator_range<std::vector<char>::const_iterator>)>
+        & /*unpack_callback*/)
     {
       Assert(false, ExcNotImplemented());
     }
index cd6f3833312f56e014f75c6cb7651660c7c5cfce..0941499e2fccb6d3d8dd5dc340ec75be362dd167 100644 (file)
@@ -1024,8 +1024,7 @@ namespace Particles
 
   template <int dim, int spacedim>
   void
-  ParticleHandler<dim, spacedim>::register_store_callback_function(
-    const bool serialization)
+  ParticleHandler<dim, spacedim>::register_store_callback_function()
   {
     parallel::distributed::Triangulation<dim, spacedim>
       *non_const_triangulation =
@@ -1041,7 +1040,7 @@ namespace Particles
         const std::function<
           void(const typename Triangulation<dim, spacedim>::cell_iterator &,
                const typename Triangulation<dim, spacedim>::CellStatus,
-               void *)>
+               std::vector<char> &)>
           callback_function =
             std::bind(&ParticleHandler<dim, spacedim>::store_particles,
                       std::cref(*this),
@@ -1049,28 +1048,8 @@ namespace Particles
                       std::placeholders::_2,
                       std::placeholders::_3);
 
-        // Compute the size per serialized particle. This is simple if we own
-        // particles, simply ask one of them. Otherwise create a temporary
-        // particle, ask it for its size and add the size of its properties.
-        const std::size_t size_per_particle =
-          (particles.size() > 0) ?
-            begin()->serialized_size_in_bytes() :
-            Particle<dim, spacedim>().serialized_size_in_bytes() +
-              property_pool->n_properties_per_slot() * sizeof(double);
-
-        // We need to transfer the number of particles for this cell and
-        // the particle data itself. If we are in the process of refinement
-        // (i.e. not in serialization) we need to provide 2^dim times the
-        // space for the data in case a cell is coarsened and all particles
-        // of the children have to be stored in the parent cell.
-        const std::size_t transfer_size_per_cell =
-          sizeof(unsigned int) +
-          (size_per_particle * global_max_particles_per_cell) *
-            (serialization ? 1 : std::pow(2, dim));
-
         handle =
-          non_const_triangulation->register_data_attach(transfer_size_per_cell,
-                                                        callback_function);
+          non_const_triangulation->register_data_attach(callback_function);
       }
   }
 
@@ -1099,7 +1078,7 @@ namespace Particles
         const std::function<
           void(const typename Triangulation<dim, spacedim>::cell_iterator &,
                const typename Triangulation<dim, spacedim>::CellStatus,
-               void *)>
+               std::vector<char> &)>
           callback_function =
             std::bind(&ParticleHandler<dim, spacedim>::store_particles,
                       std::cref(*this),
@@ -1107,24 +1086,8 @@ namespace Particles
                       std::placeholders::_2,
                       std::placeholders::_3);
 
-        // Compute the size per serialized particle. This is simple if we own
-        // particles, simply ask one of them. Otherwise create a temporary
-        // particle, ask it for its size and add the size of its properties.
-        const std::size_t size_per_particle =
-          (particles.size() > 0) ?
-            begin()->serialized_size_in_bytes() :
-            Particle<dim, spacedim>().serialized_size_in_bytes() +
-              property_pool->n_properties_per_slot() * sizeof(double);
-
-        // We need to transfer the number of particles for this cell and
-        // the particle data itself and we need to provide 2^dim times the
-        // space for the data in case a cell is coarsened
-        const std::size_t transfer_size_per_cell =
-          sizeof(unsigned int) +
-          (size_per_particle * global_max_particles_per_cell);
         handle =
-          non_const_triangulation->register_data_attach(transfer_size_per_cell,
-                                                        callback_function);
+          non_const_triangulation->register_data_attach(callback_function);
       }
 
     // Check if something was stored and load it
@@ -1133,7 +1096,7 @@ namespace Particles
         const std::function<
           void(const typename Triangulation<dim, spacedim>::cell_iterator &,
                const typename Triangulation<dim, spacedim>::CellStatus,
-               const void *)>
+               const boost::iterator_range<std::vector<char>::const_iterator>)>
           callback_function =
             std::bind(&ParticleHandler<dim, spacedim>::load_particles,
                       std::ref(*this),
@@ -1158,8 +1121,39 @@ namespace Particles
   ParticleHandler<dim, spacedim>::store_particles(
     const typename Triangulation<dim, spacedim>::cell_iterator &cell,
     const typename Triangulation<dim, spacedim>::CellStatus     status,
-    void *                                                      data) const
+    std::vector<char> &data_vector) const
   {
+    // -------------------
+    // HOTFIX: resize memory and static_cast std::vector to void*
+    // TODO: Apply ParticleHandler to variable size transfer
+
+    // ----- Copied from the old register_store_callback_function -----
+    // Compute the size per serialized particle. This is simple if we own
+    // particles, simply ask one of them. Otherwise create a temporary
+    // particle, ask it for its size and add the size of its properties.
+    const std::size_t size_per_particle =
+      (particles.size() > 0) ?
+        begin()->serialized_size_in_bytes() :
+        Particle<dim, spacedim>().serialized_size_in_bytes() +
+          property_pool->n_properties_per_slot() * sizeof(double);
+
+    // We need to transfer the number of particles for this cell and
+    // the particle data itself. If we are in the process of refinement
+    // (i.e. not in serialization) we need to provide 2^dim times the
+    // space for the data in case a cell is coarsened and all particles
+    // of the children have to be stored in the parent cell.
+    // (Note: In this hotfix we always reserve sufficient memory)
+    const bool        serialization = false;
+    const std::size_t transfer_size_per_cell =
+      sizeof(unsigned int) +
+      (size_per_particle * global_max_particles_per_cell) *
+        (serialization ? 1 : std::pow(2, dim));
+
+    const size_t previous_size = data_vector.size();
+    data_vector.resize(previous_size + transfer_size_per_cell);
+    void *data = static_cast<void *>(data_vector.data() + previous_size);
+    // --------------------
+
     unsigned int n_particles(0);
 
     // If the cell persist or is refined store all particles of the current
@@ -1228,10 +1222,17 @@ namespace Particles
   template <int dim, int spacedim>
   void
   ParticleHandler<dim, spacedim>::load_particles(
-    const typename Triangulation<dim, spacedim>::cell_iterator &cell,
-    const typename Triangulation<dim, spacedim>::CellStatus     status,
-    const void *                                                data)
+    const typename Triangulation<dim, spacedim>::cell_iterator &   cell,
+    const typename Triangulation<dim, spacedim>::CellStatus        status,
+    const boost::iterator_range<std::vector<char>::const_iterator> data_range)
   {
+    // -------------------
+    // HOTFIX: static_cast std::vector to void*
+    // TODO: Apply ParticleHandler to variable size transfer
+    //    const void *data = static_cast<const void *>(data_vector.data());
+    const void *data = static_cast<const void *>(&*(data_range.begin()));
+    // -------------------
+
     const unsigned int *n_particles_in_cell_ptr =
       static_cast<const unsigned int *>(data);
     const void *pdata =
index a91f3d207e2974b91fe4ad9c593fcf781b48f15c..06fdafdca42735eadf9f238d3717eb23a583c8a0 100644 (file)
@@ -38,8 +38,8 @@ pack_function(
   const typename parallel::distributed::Triangulation<dim, dim>::cell_iterator
     &cell,
   const typename parallel::distributed::Triangulation<dim, dim>::CellStatus
-        status,
-  void *data)
+                     status,
+  std::vector<char> &data)
 {
   static int some_number = 0;
   deallog << "packing cell " << cell->id() << " with data=" << some_number
@@ -63,8 +63,7 @@ pack_function(
       Assert(!cell->has_children(), ExcInternalError());
     }
 
-  int *intdata = reinterpret_cast<int *>(data);
-  *intdata     = some_number;
+  Utilities::pack(some_number, data, /*allow_compression=*/false);
 
   ++some_number;
 }
@@ -75,12 +74,14 @@ unpack_function(
   const typename parallel::distributed::Triangulation<dim, dim>::cell_iterator
     &cell,
   const typename parallel::distributed::Triangulation<dim, dim>::CellStatus
-              status,
-  const void *data)
+                                                                 status,
+  const boost::iterator_range<std::vector<char>::const_iterator> data_range)
 {
-  const int *intdata = reinterpret_cast<const int *>(data);
+  const int intdata = Utilities::unpack<int>(data_range.begin(),
+                                             data_range.end(),
+                                             /*allow_compression=*/false);
 
-  deallog << "unpacking cell " << cell->id() << " with data=" << (*intdata)
+  deallog << "unpacking cell " << cell->id() << " with data=" << intdata
           << " status=";
   if (status == parallel::distributed::Triangulation<dim, dim>::CELL_PERSIST)
     deallog << "PERSIST";
@@ -139,8 +140,7 @@ test()
             }
         }
 
-      unsigned int handle =
-        tr.register_data_attach(sizeof(int), pack_function<dim>);
+      unsigned int handle = tr.register_data_attach(pack_function<dim>);
 
       deallog << "handle=" << handle << std::endl;
       tr.execute_coarsening_and_refinement();
index 33d326c8f223ffb5c6deeae6f4b4e6c999522f3c..e01a3fd2843c9975389cd99650c1f90bd3e7f7a5 100644 (file)
@@ -38,8 +38,8 @@ pack_function(
   const typename parallel::distributed::Triangulation<dim, dim>::cell_iterator
     &cell,
   const typename parallel::distributed::Triangulation<dim, dim>::CellStatus
-        status,
-  void *data)
+                     status,
+  std::vector<char> &data)
 {
   static int some_number = cell->index();
   deallog << "packing cell " << cell->id() << " with data=" << some_number
@@ -63,8 +63,7 @@ pack_function(
       Assert(!cell->has_children(), ExcInternalError());
     }
 
-  int *intdata = reinterpret_cast<int *>(data);
-  *intdata     = some_number;
+  Utilities::pack(some_number, data, /*allow_compression=*/false);
 
   ++some_number;
 }
@@ -75,12 +74,14 @@ unpack_function(
   const typename parallel::distributed::Triangulation<dim, dim>::cell_iterator
     &cell,
   const typename parallel::distributed::Triangulation<dim, dim>::CellStatus
-              status,
-  const void *data)
+                                                                 status,
+  const boost::iterator_range<std::vector<char>::const_iterator> data_range)
 {
-  const int *intdata = reinterpret_cast<const int *>(data);
+  const int number = Utilities::unpack<int>(data_range.begin(),
+                                            data_range.end(),
+                                            /*allow_compression=*/false);
 
-  deallog << "unpacking cell " << cell->id() << " with data=" << (*intdata)
+  deallog << "unpacking cell " << cell->id() << " with data=" << number
           << " status=";
   if (status == parallel::distributed::Triangulation<dim, dim>::CELL_PERSIST)
     deallog << "PERSIST";
@@ -123,15 +124,14 @@ test()
 
       deallog << "* global refine:" << std::endl;
 
-      unsigned int offset =
-        tr.register_data_attach(sizeof(int), pack_function<dim>);
+      unsigned int handle = tr.register_data_attach(pack_function<dim>);
 
       tr.refine_global(1);
 
       deallog << "locally owned cells: " << tr.n_locally_owned_active_cells()
               << " / " << tr.n_global_active_cells() << std::endl;
 
-      tr.notify_ready_to_unpack(offset, unpack_function<dim>);
+      tr.notify_ready_to_unpack(handle, unpack_function<dim>);
 
       // tr.write_mesh_vtk("a");
 
@@ -139,7 +139,7 @@ test()
 
       deallog << "* repartition:" << std::endl;
 
-      offset = tr.register_data_attach(sizeof(int), pack_function<dim>);
+      handle = tr.register_data_attach(pack_function<dim>);
 
       tr.repartition();
 
@@ -148,7 +148,7 @@ test()
       deallog << "locally owned cells: " << tr.n_locally_owned_active_cells()
               << " / " << tr.n_global_active_cells() << std::endl;
 
-      tr.notify_ready_to_unpack(offset, unpack_function<dim>);
+      tr.notify_ready_to_unpack(handle, unpack_function<dim>);
 
       const unsigned int checksum = tr.get_checksum();
       if (myid == 0)
index ef45baf57a4bded59588c487fe2814626bae98e3..043c8b6dbfd718b50c314a8ce255aecf85f7eff3 100644 (file)
@@ -106,8 +106,7 @@ test()
     tr.signals.pre_distributed_refinement.connect(std::bind(
       &Particles::ParticleHandler<dim,
                                   spacedim>::register_store_callback_function,
-      &particle_handler,
-      false));
+      &particle_handler));
 
     tr.signals.post_distributed_refinement.connect(std::bind(
       &Particles::ParticleHandler<dim,
index fe94b3cd21290d801cf579b2ca03cf31a7c2d944..0c61a3a9815490bb2816b5fdb4adc64c53517e1b 100644 (file)
@@ -111,8 +111,7 @@ test()
   tr.signals.pre_distributed_save.connect(std::bind(
     &Particles::ParticleHandler<dim,
                                 spacedim>::register_store_callback_function,
-    &particle_handler,
-    true));
+    &particle_handler));
 
   // save data to archive
   std::ostringstream oss;

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.