]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Enable 'hp::DofHandler' for 'p::d::SolutionTransfer'. 7061/head
authorMarc Fehling <marc.fehling@gmx.net>
Thu, 19 Jul 2018 20:22:40 +0000 (14:22 -0600)
committerMarc Fehling <marc.fehling@gmx.net>
Thu, 16 Aug 2018 20:31:23 +0000 (14:31 -0600)
doc/news/changes/major/20180815Fehling [new file with mode: 0644]
include/deal.II/distributed/active_fe_indices_transfer.h
include/deal.II/distributed/solution_transfer.h
include/deal.II/dofs/dof_handler.h
include/deal.II/hp/dof_handler.h
source/distributed/solution_transfer.cc
source/distributed/solution_transfer.inst.in
tests/mpi/p4est_save_06.cc [new file with mode: 0644]
tests/mpi/p4est_save_06.with_p4est=true.with_petsc=true.mpirun=5.output [new file with mode: 0644]
tests/mpi/solution_transfer_04.cc [new file with mode: 0644]
tests/mpi/solution_transfer_04.with_p4est=true.with_trilinos=true.mpirun=2.output [new file with mode: 0644]

diff --git a/doc/news/changes/major/20180815Fehling b/doc/news/changes/major/20180815Fehling
new file mode 100644 (file)
index 0000000..d932182
--- /dev/null
@@ -0,0 +1,6 @@
+New: The parallel::distributed::SolutionTransfer class is now capable
+of transferring data that has been associated with a hp::DoFHandler
+object. See the class documentation of the former on how to use this
+new functionality.
+<br>
+(Marc Fehling, 2018/08/15)
index 3409082b7d84bef94d01eb3d3c65a4ecfd2812ba..3392015b76bad11ddb5442b8ea4cd691da4144fd 100644 (file)
@@ -38,6 +38,12 @@ namespace parallel
      * cell what parallel::distributed::SolutionTransfer does for the values
      * of degrees of freedom defined on a parallel::distributed::Triangulation.
      *
+     * If refinement is involved in the data transfer process, the children of
+     * a refined cell inherit the `active_fe_index` from their parent. If
+     * cells get coarsened into one, the latter will get the least dominating
+     * `active_fe_index` amongst its children, as determined by the function
+     * hp::FECollection::find_least_face_dominating_fe().
+     *
      * @note If you use more than one object to attach data to a
      * parallel::distributed::Triangulation at the same time (e.g. a
      * parallel::distributed::SolutionTransfer object), the calls to
index d358a0182866d9b511ab58a1dd3c0a3d8a51264a..925769dcaee15df57b19478e8d9d2dd43f6a6f86 100644 (file)
@@ -52,7 +52,7 @@ namespace parallel
      *
      * <h3>Transferring a solution</h3> Here VectorType is your favorite
      * vector type, e.g. PETScWrappers::MPI::Vector,
-     * TrilinosWrappers::MPI::Vector, or corresponding blockvectors.
+     * TrilinosWrappers::MPI::Vector, or corresponding block vectors.
      * @code
      * SolutionTransfer<dim, VectorType> soltrans(dof_handler);
      * // flag some cells for refinement and coarsening, e.g.
@@ -106,8 +106,9 @@ namespace parallel
      * solution.reinit(locally_owned_dofs,
      *                 mpi_communicator);
      * ...
-     * // Transfer solution to vector that provides access to locally relevant
-     * DoFs TrilinosWrappers::MPI::Vector old_solution;
+     * // Transfer solution to vector that provides access to
+     * // locally relevant DoFs
+     * TrilinosWrappers::MPI::Vector old_solution;
      * old_solution.reinit(locally_owned_dofs,
      *                     locally_relevant_dofs,
      *                     mpi_communicator);
@@ -148,6 +149,113 @@ namespace parallel
      * @endcode
      *
      *
+     * <h3>Note on usage with hp::DoFHandler</h3>
+     *
+     * If an object of the hp::DoFHandler class is registered with an
+     * instantiation of this parallel::distributed::SolutionTransfer
+     * class, the following requirements have to be met:
+     * <ul>
+     *   <li>
+     *     The hp::DoFHandler needs to be explicitely mentioned
+     *     in the parallel::distributed::SolutionTransfer type, i.e.:
+     *     @code
+     *     parallel::distributed::SolutionsTransfer<dim, VectorType,
+     *       hp::DoFHandler<dim, spacedim>> sol_trans(hp_dof_handler);
+     *     @endcode
+     *   </li>
+     *   <li>
+     *     The transfer of the <tt>active_fe_index</tt> of each cell
+     *     has to be scheduled as well in the parallel::distributed case,
+     *     since ownerships of cells change during mesh repartitioning.
+     *     This can be achieved with the
+     *     parallel::distributed::ActiveFEIndicesTransfer class. The order in
+     *     which both objects append data during the packing process does not
+     *     matter. However, the unpacking process after refinement or
+     *     deserialization requires the `active_fe_index` to be distributed
+     *     before interpolating the data of the
+     *     parallel::distributed::SolutionTransfer object, so that the correct
+     *     FiniteElement object is associated with each cell. See the
+     *     documentation on parallel::distributed::ActiveFEIndicesTransfer
+     *     for further instructions.
+     *   </li>
+     * </ul>
+     *
+     * Since data on hp::DoFHandler objects is associated with many different
+     * FiniteElement objects, each cell's data has to be processed with its
+     * correpsonding `active_fe_index`. Further, if refinement is involved,
+     * data will be packed on the parent cell with its `active_fe_index` and
+     * unpacked later with the same index on its children. If cells get
+     * coarsened into one, data will be packed on the latter with the least
+     * dominating `active_fe_index` amongst its children, as determined by the
+     * function hp::FECollection::find_least_face_dominating_fe(), and unpacked
+     * later on the same cell with the same index.
+     *
+     * Code snippets to demonstrate the usage of the
+     * parallel::distributed::SolutionTransfer class with hp::DoFHandler
+     * objects are provided in the following. Here VectorType
+     * is your favorite vector type, e.g. PETScWrappers::MPI::Vector,
+     * TrilinosWrappers::MPI::Vector, or corresponding block vectors.
+     *
+     * After refinement, the order in which to unpack the transferred data
+     * is important:
+     * @code
+     * //[prepare triangulation for refinement ...]
+     *
+     * parallel::distributed::ActiveFEIndicesTransfer<dim,spacedim>
+     *   feidx_trans(hp_dof_handler);
+     * parallel::distributed::
+     *   SolutionTransfer<dim,VectorType,hp::DoFHandler<dim,spacedim>>
+     *     sol_trans(hp_dof_handler);
+     *
+     * feidx_trans.prepare_for_transfer();
+     * sol_trans.prepare_for_coarsening_and_refinement(solution);
+     * triangulation.execute_coarsening_and_refinement();
+     *
+     * feidx_trans.unpack();
+     * hp_dof_handler.distribute_dofs(fe_collection);
+     *
+     * VectorType interpolated_solution;
+     * //[create VectorType in the right size here ...]
+     * soltrans.interpolate(interpolated_solution);
+     * @endcode
+     *
+     * If vector has the locally relevant DoFs, serialization works as follows:
+     * @code
+     * parallel::distributed::ActiveFEIndicesTransfer<dim,spacedim>
+     *   feidx_trans(hp_dof_handler);
+     * parallel::distributed::
+     *   SolutionTransfer<dim, VectorType, hp::DoFHandler<dim,spacedim>>
+     *     sol_trans(hp_dof_handler);
+     *
+     * feidx_trans.prepare_for_transfer();
+     * sol_trans.prepare_serialization(vector);
+     *
+     * triangulation.save(filename);
+     * @endcode
+     *
+     * For deserialization the vector needs to be a distributed vector
+     * (without ghost elements):
+     * @code
+     * //[create coarse mesh...]
+     * triangulation.load(filename);
+     *
+     * hp::DoFHandler<dim,spacedim>   hp_dof_handler(triangulation);
+     * hp::FECollection<dim,spacedim> fe_collection;
+     * //[prepare identical fe_collection...]
+     * hp_dof_handler.distribute_dofs(fe_collection);
+     *
+     * parallel::distributed::ActiveFEIndicesTransfer<dim,spacedim>
+     *   feidx_trans(hp_dof_handler);
+     * feidx_trans.deserialize();
+     * hp_dof_handler.distribute_dofs(fe_collection);
+     *
+     * parallel::distributed::
+     *   SolutionTransfer<dim,VectorType,hp::DoFHandler<dim,spacedim>>
+     *     sol_trans(dof_handler);
+     * sol_trans.deserialize(distributed_vector);
+     * @endcode
+     *
+     *
      * <h3>Interaction with hanging nodes</h3>
      *
      * In essence, this class implements the same steps as does
index f4a3744c12cad14562b5b1a74a12b1a54d65d781..585aa54c0109a6ef82e0418eb639146469c1f2af 100644 (file)
@@ -315,6 +315,11 @@ public:
    */
   static const unsigned int space_dimension = spacedim;
 
+  /**
+   * Make the type of this DoFHandler available in function templates.
+   */
+  static const bool is_hp_dof_handler = false;
+
   /**
    * When the arrays holding the DoF indices are set up, but before they are
    * filled with actual values, they are set to an invalid value, in order to
index a314768fc55ac259bc4305ee791c57b7fac2975f..5aef145d0c91d99ac9f4c5c2956f74ea3c0098f6 100644 (file)
@@ -237,6 +237,11 @@ namespace hp
      */
     static const unsigned int space_dimension = spacedim;
 
+    /**
+     * Make the type of this DoFHandler available in function templates.
+     */
+    static const bool is_hp_dof_handler = true;
+
     /**
      * When the arrays holding the DoF indices are set up, but before they are
      * filled with actual values, they are set to an invalid value, in order
index 1aef15b15ba461ff6b7e59aa4cc9e776a0713e24..40ebf5782365bd147f85fa06db979139f44624d1 100644 (file)
@@ -27,6 +27,8 @@
 #  include <deal.II/grid/tria_accessor.h>
 #  include <deal.II/grid/tria_iterator.h>
 
+#  include <deal.II/hp/dof_handler.h>
+
 #  include <deal.II/lac/block_vector.h>
 #  include <deal.II/lac/la_parallel_block_vector.h>
 #  include <deal.II/lac/la_parallel_vector.h>
@@ -91,7 +93,7 @@ namespace parallel
           this,
           std::placeholders::_1,
           std::placeholders::_2),
-        /*returns_variable_size_data=*/false);
+        /*returns_variable_size_data=*/DoFHandlerType::is_hp_dof_handler);
     }
 
 
@@ -207,8 +209,9 @@ namespace parallel
     SolutionTransfer<dim, VectorType, DoFHandlerType>::pack_callback(
       const typename Triangulation<dim, DoFHandlerType::space_dimension>::
         cell_iterator &cell_,
-      const typename Triangulation<dim, DoFHandlerType::space_dimension>::
-        CellStatus /*status*/)
+      const typename Triangulation<dim,
+                                   DoFHandlerType::space_dimension>::CellStatus
+        status)
     {
       typename DoFHandlerType::cell_iterator cell(*cell_, dof_handler);
 
@@ -216,17 +219,87 @@ namespace parallel
       std::vector<::dealii::Vector<typename VectorType::value_type>> dofvalues(
         input_vectors.size());
 
-      auto cit_input = input_vectors.cbegin();
-      auto it_output = dofvalues.begin();
-      for (; cit_input != input_vectors.cend(); ++cit_input, ++it_output)
+      if (DoFHandlerType::is_hp_dof_handler)
         {
-          it_output->reinit(cell->get_fe().dofs_per_cell);
-          cell->get_interpolated_dof_values(*(*cit_input), *it_output);
+          unsigned int dofs_per_cell = numbers::invalid_unsigned_int;
+          unsigned int fe_index      = numbers::invalid_unsigned_int;
+
+          switch (status)
+            {
+              case parallel::distributed::Triangulation<
+                dim,
+                DoFHandlerType::space_dimension>::CELL_PERSIST:
+              case parallel::distributed::Triangulation<
+                dim,
+                DoFHandlerType::space_dimension>::CELL_REFINE:
+                {
+                  dofs_per_cell = cell->get_fe().dofs_per_cell;
+                  fe_index      = cell->active_fe_index();
+                  break;
+                }
+
+              case parallel::distributed::Triangulation<
+                dim,
+                DoFHandlerType::space_dimension>::CELL_COARSEN:
+                {
+                  // In case of coarsening, we need to find a suitable fe index
+                  // for the parent cell. We choose the 'least face dominating
+                  // fe index' on all children from the associated FECollection.
+                  std::set<unsigned int> fe_indices_children;
+                  for (unsigned int child_index = 0;
+                       child_index < GeometryInfo<dim>::max_children_per_cell;
+                       ++child_index)
+                    fe_indices_children.insert(
+                      cell->child(child_index)->active_fe_index());
+
+                  fe_index =
+                    dof_handler->get_fe_collection()
+                      .find_least_face_dominating_fe(fe_indices_children);
+
+                  Assert(
+                    fe_index != numbers::invalid_unsigned_int,
+                    ExcMessage(
+                      "No FiniteElement has been found in your FECollection "
+                      "that dominates all children of a cell you are trying "
+                      "to coarsen!"));
+
+                  dofs_per_cell = dof_handler->get_fe(fe_index).dofs_per_cell;
+                }
+                break;
+
+              default:
+                Assert(false, ExcInternalError());
+                break;
+            }
+
+          auto it_input  = input_vectors.cbegin();
+          auto it_output = dofvalues.begin();
+          for (; it_input != input_vectors.cend(); ++it_input, ++it_output)
+            {
+              it_output->reinit(dofs_per_cell);
+              cell->get_interpolated_dof_values(*(*it_input),
+                                                *it_output,
+                                                fe_index);
+            }
+        }
+      else
+        {
+          auto it_input  = input_vectors.cbegin();
+          auto it_output = dofvalues.begin();
+          for (; it_input != input_vectors.cend(); ++it_input, ++it_output)
+            {
+              it_output->reinit(cell->get_fe().dofs_per_cell);
+              cell->get_interpolated_dof_values(*(*it_input), *it_output);
+            }
         }
 
-      // to get consistent data sizes on each cell for the fixed size transfer,
-      // we won't allow compression
-      return Utilities::pack(dofvalues, /*allow_compression=*/false);
+      // We choose to compress the packed data depending on the registered
+      // dofhandler object. In case of hp, we write in the variable size buffer
+      // and thus allow compression. In the other case, we use the fixed size
+      // buffer and require consistent data sizes on each cell, so we leave it
+      // uncompressed.
+      return Utilities::pack(
+        dofvalues, /*allow_compression=*/DoFHandlerType::is_hp_dof_handler);
     }
 
 
@@ -236,8 +309,9 @@ namespace parallel
     SolutionTransfer<dim, VectorType, DoFHandlerType>::unpack_callback(
       const typename Triangulation<dim, DoFHandlerType::space_dimension>::
         cell_iterator &cell_,
-      const typename Triangulation<dim, DoFHandlerType::space_dimension>::
-        CellStatus /*status*/,
+      const typename Triangulation<dim,
+                                   DoFHandlerType::space_dimension>::CellStatus
+        status,
       const boost::iterator_range<std::vector<char>::const_iterator>
         &                        data_range,
       std::vector<VectorType *> &all_out)
@@ -247,27 +321,93 @@ namespace parallel
       const std::vector<::dealii::Vector<typename VectorType::value_type>>
         dofvalues = Utilities::unpack<
           std::vector<::dealii::Vector<typename VectorType::value_type>>>(
-          data_range.begin(), data_range.end(), /*allow_compression=*/false);
+          data_range.begin(),
+          data_range.end(),
+          /*allow_compression=*/DoFHandlerType::is_hp_dof_handler);
 
       // check if sizes match
       Assert(dofvalues.size() == all_out.size(), ExcInternalError());
 
-      // check if we have enough dofs provided by the FE object
-      // to interpolate the transferred data correctly
-      for (auto it_dofvalues = dofvalues.begin();
-           it_dofvalues != dofvalues.end();
-           ++it_dofvalues)
-        Assert(
-          cell->get_fe().dofs_per_cell == it_dofvalues->size(),
-          ExcMessage(
-            "The transferred data was packed with a different number of dofs than the"
-            "currently registered FE object assigned to the DoFHandler has."));
-
-      // distribute data for each registered vector on mesh
-      auto it_input  = dofvalues.cbegin();
-      auto it_output = all_out.begin();
-      for (; it_input != dofvalues.cend(); ++it_input, ++it_output)
-        cell->set_dof_values_by_interpolation(*it_input, *(*it_output));
+      if (DoFHandlerType::is_hp_dof_handler)
+        {
+          unsigned int fe_index = numbers::invalid_unsigned_int;
+
+          switch (status)
+            {
+              case parallel::distributed::Triangulation<
+                dim,
+                DoFHandlerType::space_dimension>::CELL_PERSIST:
+              case parallel::distributed::Triangulation<
+                dim,
+                DoFHandlerType::space_dimension>::CELL_COARSEN:
+                {
+                  fe_index = cell->active_fe_index();
+                  break;
+                }
+
+              case parallel::distributed::Triangulation<
+                dim,
+                DoFHandlerType::space_dimension>::CELL_REFINE:
+                {
+                  // After refinement, this particular cell is no longer active,
+                  // and its children have inherited its fe index. However, to
+                  // unpack the data on the old cell, we need to recover its fe
+                  // index from one of the children. Just to be sure, we also
+                  // check if all children have the same fe index.
+                  fe_index = cell->child(0)->active_fe_index();
+                  for (unsigned int child_index = 1;
+                       child_index < GeometryInfo<dim>::max_children_per_cell;
+                       ++child_index)
+                    Assert(cell->child(child_index)->active_fe_index() ==
+                             fe_index,
+                           ExcInternalError());
+                  break;
+                }
+
+              default:
+                Assert(false, ExcInternalError());
+                break;
+            }
+
+          // check if we have enough dofs provided by the FE object
+          // to interpolate the transferred data correctly
+          for (auto it_dofvalues = dofvalues.begin();
+               it_dofvalues != dofvalues.end();
+               ++it_dofvalues)
+            Assert(
+              dof_handler->get_fe(fe_index).dofs_per_cell ==
+                it_dofvalues->size(),
+              ExcMessage(
+                "The transferred data was packed with a different number of dofs than the"
+                "currently registered FE object assigned to the DoFHandler has."));
+
+          // distribute data for each registered vector on mesh
+          auto it_input  = dofvalues.cbegin();
+          auto it_output = all_out.begin();
+          for (; it_input != dofvalues.cend(); ++it_input, ++it_output)
+            cell->set_dof_values_by_interpolation(*it_input,
+                                                  *(*it_output),
+                                                  fe_index);
+        }
+      else
+        {
+          // check if we have enough dofs provided by the FE object
+          // to interpolate the transferred data correctly
+          for (auto it_dofvalues = dofvalues.begin();
+               it_dofvalues != dofvalues.end();
+               ++it_dofvalues)
+            Assert(
+              cell->get_fe().dofs_per_cell == it_dofvalues->size(),
+              ExcMessage(
+                "The transferred data was packed with a different number of dofs than the"
+                "currently registered FE object assigned to the DoFHandler has."));
+
+          // distribute data for each registered vector on mesh
+          auto it_input  = dofvalues.cbegin();
+          auto it_output = all_out.begin();
+          for (; it_input != dofvalues.cend(); ++it_input, ++it_output)
+            cell->set_dof_values_by_interpolation(*it_input, *(*it_output));
+        }
     }
 
 
index 0396a91b99acdb11c13b6d4f9766309d57d5a026..2a67cedaf72df7df3cb120873e3adefffae56272 100644 (file)
@@ -43,17 +43,46 @@ for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : SPACE_DIMENSIONS)
           ::dealii::LinearAlgebra::distributed::BlockVector<float>,
           DoFHandler<deal_II_dimension, deal_II_space_dimension>>;
 
+        template class SolutionTransfer<
+          deal_II_dimension,
+          ::dealii::Vector<double>,
+          hp::DoFHandler<deal_II_dimension, deal_II_space_dimension>>;
+        template class SolutionTransfer<
+          deal_II_dimension,
+          ::dealii::LinearAlgebra::distributed::Vector<double>,
+          hp::DoFHandler<deal_II_dimension, deal_II_space_dimension>>;
+        template class SolutionTransfer<
+          deal_II_dimension,
+          ::dealii::LinearAlgebra::distributed::Vector<float>,
+          hp::DoFHandler<deal_II_dimension, deal_II_space_dimension>>;
+        template class SolutionTransfer<
+          deal_II_dimension,
+          ::dealii::LinearAlgebra::distributed::BlockVector<double>,
+          hp::DoFHandler<deal_II_dimension, deal_II_space_dimension>>;
+        template class SolutionTransfer<
+          deal_II_dimension,
+          ::dealii::LinearAlgebra::distributed::BlockVector<float>,
+          hp::DoFHandler<deal_II_dimension, deal_II_space_dimension>>;
+
 
 #  ifdef DEAL_II_WITH_PETSC
         template class SolutionTransfer<
           deal_II_dimension,
           PETScWrappers::MPI::Vector,
           DoFHandler<deal_II_dimension, deal_II_space_dimension>>;
-
         template class SolutionTransfer<
           deal_II_dimension,
           PETScWrappers::MPI::BlockVector,
           DoFHandler<deal_II_dimension, deal_II_space_dimension>>;
+
+        template class SolutionTransfer<
+          deal_II_dimension,
+          PETScWrappers::MPI::Vector,
+          hp::DoFHandler<deal_II_dimension, deal_II_space_dimension>>;
+        template class SolutionTransfer<
+          deal_II_dimension,
+          PETScWrappers::MPI::BlockVector,
+          hp::DoFHandler<deal_II_dimension, deal_II_space_dimension>>;
 #  endif
 
 #  ifdef DEAL_II_WITH_TRILINOS
@@ -61,11 +90,19 @@ for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : SPACE_DIMENSIONS)
           deal_II_dimension,
           TrilinosWrappers::MPI::Vector,
           DoFHandler<deal_II_dimension, deal_II_space_dimension>>;
-
         template class SolutionTransfer<
           deal_II_dimension,
           TrilinosWrappers::MPI::BlockVector,
           DoFHandler<deal_II_dimension, deal_II_space_dimension>>;
+
+        template class SolutionTransfer<
+          deal_II_dimension,
+          TrilinosWrappers::MPI::Vector,
+          hp::DoFHandler<deal_II_dimension, deal_II_space_dimension>>;
+        template class SolutionTransfer<
+          deal_II_dimension,
+          TrilinosWrappers::MPI::BlockVector,
+          hp::DoFHandler<deal_II_dimension, deal_II_space_dimension>>;
 #  endif
 
 #endif
diff --git a/tests/mpi/p4est_save_06.cc b/tests/mpi/p4est_save_06.cc
new file mode 100644 (file)
index 0000000..96eafd5
--- /dev/null
@@ -0,0 +1,177 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+// save and load a triangulation with a different number of cpus
+// using variable size data transfer with an associated hp::DoFHandler
+
+#include <deal.II/base/tensor.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/distributed/active_fe_indices_transfer.h>
+#include <deal.II/distributed/solution_transfer.h>
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+
+#include <deal.II/lac/petsc_parallel_vector.h>
+
+#include "../tests.h"
+
+
+
+template <int dim>
+void
+test()
+{
+  unsigned int myid    = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+  MPI_Comm     com_all = MPI_COMM_WORLD;
+  MPI_Comm     com_small;
+
+  // split the communicator in proc 0,1,2 and 3,4
+  MPI_Comm_split(com_all, (myid < 3) ? 0 : 1, myid, &com_small);
+
+  // write with small com
+  if (myid < 3)
+    {
+      deallog << "writing with " << Utilities::MPI::n_mpi_processes(com_small)
+              << std::endl;
+
+      parallel::distributed::Triangulation<dim> tr(com_small);
+      GridGenerator::hyper_cube(tr);
+      tr.refine_global(2);
+      for (typename Triangulation<dim>::active_cell_iterator cell =
+             tr.begin_active();
+           cell != tr.end();
+           ++cell)
+        if (!cell->is_ghost() && !cell->is_artificial())
+          if (cell->center().norm() < 0.3)
+            {
+              cell->set_refine_flag();
+            }
+
+      tr.execute_coarsening_and_refinement();
+
+      hp::DoFHandler<dim>   dh(tr);
+      hp::FECollection<dim> fe_collection;
+
+      // prepare FECollection with arbitrary number of entries
+      const unsigned int max_degree = 1 + std::pow(2, dim);
+      for (unsigned int i = 0; i < max_degree; ++i)
+        fe_collection.push_back(FE_Q<dim>(max_degree - i));
+
+      dh.distribute_dofs(fe_collection);
+
+      IndexSet locally_owned_dofs = dh.locally_owned_dofs();
+      IndexSet locally_relevant_dofs;
+      DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs);
+
+      PETScWrappers::MPI::Vector x(locally_owned_dofs, com_small);
+      PETScWrappers::MPI::Vector rel_x(locally_owned_dofs,
+                                       locally_relevant_dofs,
+                                       com_small);
+
+      parallel::distributed::ActiveFEIndicesTransfer<dim, dim> feidx_transfer(
+        dh);
+      parallel::distributed::
+        SolutionTransfer<dim, PETScWrappers::MPI::Vector, hp::DoFHandler<dim>>
+          soltrans(dh);
+
+      for (unsigned int i = 0; i < locally_owned_dofs.n_elements(); ++i)
+        {
+          unsigned int idx = locally_owned_dofs.nth_index_in_set(i);
+          x(idx)           = idx;
+        }
+
+
+      x.compress(VectorOperation::insert);
+      rel_x = x;
+
+      feidx_transfer.prepare_for_transfer();
+      soltrans.prepare_serialization(rel_x);
+
+      tr.save("file");
+      deallog << "#cells: " << tr.n_global_active_cells() << std::endl
+              << "norm: " << x.l2_norm() << std::endl
+              << "Checksum: " << tr.get_checksum() << std::endl;
+    }
+
+  MPI_Barrier(MPI_COMM_WORLD);
+
+  deallog << "reading with " << Utilities::MPI::n_mpi_processes(com_all)
+          << std::endl;
+
+  {
+    parallel::distributed::Triangulation<dim> tr(com_all);
+
+    GridGenerator::hyper_cube(tr);
+    tr.load("file");
+
+    hp::DoFHandler<dim>   dh(tr);
+    hp::FECollection<dim> fe_collection;
+
+    // prepare FECollection with arbitrary number of entries
+    const unsigned int max_degree = 1 + std::pow(2, dim);
+    for (unsigned int i = 0; i < max_degree; ++i)
+      fe_collection.push_back(FE_Q<dim>(max_degree - i));
+
+    dh.distribute_dofs(fe_collection);
+
+    parallel::distributed::ActiveFEIndicesTransfer<dim> feidx_transfer(dh);
+    feidx_transfer.deserialize();
+
+    dh.distribute_dofs(fe_collection);
+
+    IndexSet locally_owned_dofs = dh.locally_owned_dofs();
+    IndexSet locally_relevant_dofs;
+
+    DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs);
+
+    PETScWrappers::MPI::Vector solution(locally_owned_dofs, com_all);
+    solution = PetscScalar();
+
+    parallel::distributed::
+      SolutionTransfer<dim, PETScWrappers::MPI::Vector, hp::DoFHandler<dim>>
+        soltrans(dh);
+
+    soltrans.deserialize(solution);
+
+    deallog << "#cells: " << tr.n_global_active_cells() << std::endl
+            << "norm: " << solution.l2_norm() << std::endl
+            << "Checksum: " << tr.get_checksum() << std::endl;
+  }
+
+  deallog << "OK" << std::endl;
+}
+
+
+int
+main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+  MPILogInitAll                    log;
+
+  test<2>();
+}
diff --git a/tests/mpi/p4est_save_06.with_p4est=true.with_petsc=true.mpirun=5.output b/tests/mpi/p4est_save_06.with_p4est=true.with_petsc=true.mpirun=5.output
new file mode 100644 (file)
index 0000000..95faa23
--- /dev/null
@@ -0,0 +1,46 @@
+
+DEAL:0::writing with 3
+DEAL:0::#cells: 19
+DEAL:0::norm: 7114.44
+DEAL:0::Checksum: 136119115
+DEAL:0::reading with 5
+DEAL:0::#cells: 19
+DEAL:0::norm: 7114.44
+DEAL:0::Checksum: 136119115
+DEAL:0::OK
+
+DEAL:1::writing with 3
+DEAL:1::#cells: 19
+DEAL:1::norm: 7114.44
+DEAL:1::Checksum: 0
+DEAL:1::reading with 5
+DEAL:1::#cells: 19
+DEAL:1::norm: 7114.44
+DEAL:1::Checksum: 0
+DEAL:1::OK
+
+
+DEAL:2::writing with 3
+DEAL:2::#cells: 19
+DEAL:2::norm: 7114.44
+DEAL:2::Checksum: 0
+DEAL:2::reading with 5
+DEAL:2::#cells: 19
+DEAL:2::norm: 7114.44
+DEAL:2::Checksum: 0
+DEAL:2::OK
+
+
+DEAL:3::reading with 5
+DEAL:3::#cells: 19
+DEAL:3::norm: 7114.44
+DEAL:3::Checksum: 0
+DEAL:3::OK
+
+
+DEAL:4::reading with 5
+DEAL:4::#cells: 19
+DEAL:4::norm: 7114.44
+DEAL:4::Checksum: 0
+DEAL:4::OK
+
diff --git a/tests/mpi/solution_transfer_04.cc b/tests/mpi/solution_transfer_04.cc
new file mode 100644 (file)
index 0000000..d800458
--- /dev/null
@@ -0,0 +1,143 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+// Test to check if SolutionTransfer works in parallel with hp::DoFHandler.
+// This tests is based on mpi/feindices_transfer.cc
+
+
+#include <deal.II/distributed/active_fe_indices_transfer.h>
+#include <deal.II/distributed/solution_transfer.h>
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+
+#include <deal.II/hp/dof_handler.h>
+
+#include <deal.II/lac/trilinos_vector.h>
+
+#include "../tests.h"
+
+
+template <int dim>
+void
+test()
+{
+  const unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+  // ------ setup ------
+  parallel::distributed::Triangulation<dim> tria(MPI_COMM_WORLD);
+
+  GridGenerator::subdivided_hyper_cube(tria, 2);
+  tria.refine_global(1);
+
+  hp::DoFHandler<dim>   dh(tria);
+  hp::FECollection<dim> fe_collection;
+
+  // prepare FECollection with arbitrary number of entries
+  const unsigned int max_degree = 1 + std::pow(2, dim);
+  for (unsigned int i = 0; i < max_degree; ++i)
+    fe_collection.push_back(FE_Q<dim>(max_degree - i));
+
+  typename hp::DoFHandler<dim, dim>::active_cell_iterator cell;
+  unsigned int                                            i = 0;
+
+  for (cell = dh.begin_active(); cell != dh.end(); ++cell)
+    {
+      if (cell->is_locally_owned())
+        {
+          // set active fe index
+          if (!(cell->is_artificial()))
+            {
+              if (i >= fe_collection.size())
+                i = 0;
+              cell->set_active_fe_index(i++);
+            }
+
+          // set refinement/coarsening flags
+          if (cell->id().to_string() == "0_1:0")
+            cell->set_refine_flag();
+          else if (cell->parent()->id().to_string() ==
+                   ((dim == 2) ? "3_0:" : "7_0:"))
+            cell->set_coarsen_flag();
+        }
+    }
+
+
+  // ----- prepare solution -----
+  dh.distribute_dofs(fe_collection);
+  IndexSet locally_owned_dofs = dh.locally_owned_dofs();
+  IndexSet locally_relevant_dofs;
+  DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs);
+
+  TrilinosWrappers::MPI::Vector solution;
+  solution.reinit(locally_owned_dofs, MPI_COMM_WORLD);
+  for (unsigned int i = 0; i < solution.size(); ++i)
+    if (locally_owned_dofs.is_element(i))
+      solution(i) = i;
+
+  TrilinosWrappers::MPI::Vector old_solution;
+  old_solution.reinit(locally_owned_dofs,
+                      locally_relevant_dofs,
+                      MPI_COMM_WORLD);
+  old_solution = solution;
+
+
+  // ----- transfer -----
+  parallel::distributed::ActiveFEIndicesTransfer<dim> feidx_transfer(dh);
+  parallel::distributed::
+    SolutionTransfer<dim, TrilinosWrappers::MPI::Vector, hp::DoFHandler<dim>>
+      soltrans(dh);
+
+  feidx_transfer.prepare_for_transfer();
+  soltrans.prepare_for_coarsening_and_refinement(old_solution);
+  tria.execute_coarsening_and_refinement();
+
+  feidx_transfer.unpack();
+
+  dh.distribute_dofs(fe_collection);
+  locally_owned_dofs = dh.locally_owned_dofs();
+  DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs);
+
+  TrilinosWrappers::MPI::Vector new_solution;
+  new_solution.reinit(locally_owned_dofs, MPI_COMM_WORLD);
+  soltrans.interpolate(new_solution);
+
+  // make sure no processor is hanging
+  MPI_Barrier(MPI_COMM_WORLD);
+
+  deallog << "OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+  MPILogInitAll                    log;
+
+  deallog.push("2d");
+  test<2>();
+  deallog.pop();
+  deallog.push("3d");
+  test<3>();
+  deallog.pop();
+}
diff --git a/tests/mpi/solution_transfer_04.with_p4est=true.with_trilinos=true.mpirun=2.output b/tests/mpi/solution_transfer_04.with_p4est=true.with_trilinos=true.mpirun=2.output
new file mode 100644 (file)
index 0000000..0a49536
--- /dev/null
@@ -0,0 +1,7 @@
+
+DEAL:0:2d:: OK
+DEAL:0:3d:: OK
+
+DEAL:1:2d:: OK
+DEAL:1:3d:: OK
+

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.