From f34ac783cf6b29efaee0a77f72e1193464f03019 Mon Sep 17 00:00:00 2001 From: Marc Fehling Date: Thu, 19 Jul 2018 14:22:40 -0600 Subject: [PATCH] Enable 'hp::DofHandler' for 'p::d::SolutionTransfer'. --- doc/news/changes/major/20180815Fehling | 6 + .../distributed/active_fe_indices_transfer.h | 6 + .../deal.II/distributed/solution_transfer.h | 114 +++++++++- include/deal.II/dofs/dof_handler.h | 5 + include/deal.II/hp/dof_handler.h | 5 + source/distributed/solution_transfer.cc | 200 +++++++++++++++--- source/distributed/solution_transfer.inst.in | 41 +++- tests/mpi/p4est_save_06.cc | 177 ++++++++++++++++ ...p4est=true.with_petsc=true.mpirun=5.output | 46 ++++ tests/mpi/solution_transfer_04.cc | 143 +++++++++++++ ...st=true.with_trilinos=true.mpirun=2.output | 7 + 11 files changed, 715 insertions(+), 35 deletions(-) create mode 100644 doc/news/changes/major/20180815Fehling create mode 100644 tests/mpi/p4est_save_06.cc create mode 100644 tests/mpi/p4est_save_06.with_p4est=true.with_petsc=true.mpirun=5.output create mode 100644 tests/mpi/solution_transfer_04.cc create mode 100644 tests/mpi/solution_transfer_04.with_p4est=true.with_trilinos=true.mpirun=2.output diff --git a/doc/news/changes/major/20180815Fehling b/doc/news/changes/major/20180815Fehling new file mode 100644 index 0000000000..d93218212e --- /dev/null +++ b/doc/news/changes/major/20180815Fehling @@ -0,0 +1,6 @@ +New: The parallel::distributed::SolutionTransfer class is now capable +of transferring data that has been associated with a hp::DoFHandler +object. See the class documentation of the former on how to use this +new functionality. +
+(Marc Fehling, 2018/08/15) diff --git a/include/deal.II/distributed/active_fe_indices_transfer.h b/include/deal.II/distributed/active_fe_indices_transfer.h index 3409082b7d..3392015b76 100644 --- a/include/deal.II/distributed/active_fe_indices_transfer.h +++ b/include/deal.II/distributed/active_fe_indices_transfer.h @@ -38,6 +38,12 @@ namespace parallel * cell what parallel::distributed::SolutionTransfer does for the values * of degrees of freedom defined on a parallel::distributed::Triangulation. * + * If refinement is involved in the data transfer process, the children of + * a refined cell inherit the `active_fe_index` from their parent. If + * cells get coarsened into one, the latter will get the least dominating + * `active_fe_index` amongst its children, as determined by the function + * hp::FECollection::find_least_face_dominating_fe(). + * * @note If you use more than one object to attach data to a * parallel::distributed::Triangulation at the same time (e.g. a * parallel::distributed::SolutionTransfer object), the calls to diff --git a/include/deal.II/distributed/solution_transfer.h b/include/deal.II/distributed/solution_transfer.h index d358a01828..925769dcae 100644 --- a/include/deal.II/distributed/solution_transfer.h +++ b/include/deal.II/distributed/solution_transfer.h @@ -52,7 +52,7 @@ namespace parallel * *

Transferring a solution

Here VectorType is your favorite * vector type, e.g. PETScWrappers::MPI::Vector, - * TrilinosWrappers::MPI::Vector, or corresponding blockvectors. + * TrilinosWrappers::MPI::Vector, or corresponding block vectors. * @code * SolutionTransfer soltrans(dof_handler); * // flag some cells for refinement and coarsening, e.g. @@ -106,8 +106,9 @@ namespace parallel * solution.reinit(locally_owned_dofs, * mpi_communicator); * ... - * // Transfer solution to vector that provides access to locally relevant - * DoFs TrilinosWrappers::MPI::Vector old_solution; + * // Transfer solution to vector that provides access to + * // locally relevant DoFs + * TrilinosWrappers::MPI::Vector old_solution; * old_solution.reinit(locally_owned_dofs, * locally_relevant_dofs, * mpi_communicator); @@ -148,6 +149,113 @@ namespace parallel * @endcode * * + *

Note on usage with hp::DoFHandler

+ * + * If an object of the hp::DoFHandler class is registered with an + * instantiation of this parallel::distributed::SolutionTransfer + * class, the following requirements have to be met: + * + * + * Since data on hp::DoFHandler objects is associated with many different + * FiniteElement objects, each cell's data has to be processed with its + * correpsonding `active_fe_index`. Further, if refinement is involved, + * data will be packed on the parent cell with its `active_fe_index` and + * unpacked later with the same index on its children. If cells get + * coarsened into one, data will be packed on the latter with the least + * dominating `active_fe_index` amongst its children, as determined by the + * function hp::FECollection::find_least_face_dominating_fe(), and unpacked + * later on the same cell with the same index. + * + * Code snippets to demonstrate the usage of the + * parallel::distributed::SolutionTransfer class with hp::DoFHandler + * objects are provided in the following. Here VectorType + * is your favorite vector type, e.g. PETScWrappers::MPI::Vector, + * TrilinosWrappers::MPI::Vector, or corresponding block vectors. + * + * After refinement, the order in which to unpack the transferred data + * is important: + * @code + * //[prepare triangulation for refinement ...] + * + * parallel::distributed::ActiveFEIndicesTransfer + * feidx_trans(hp_dof_handler); + * parallel::distributed:: + * SolutionTransfer> + * sol_trans(hp_dof_handler); + * + * feidx_trans.prepare_for_transfer(); + * sol_trans.prepare_for_coarsening_and_refinement(solution); + * triangulation.execute_coarsening_and_refinement(); + * + * feidx_trans.unpack(); + * hp_dof_handler.distribute_dofs(fe_collection); + * + * VectorType interpolated_solution; + * //[create VectorType in the right size here ...] + * soltrans.interpolate(interpolated_solution); + * @endcode + * + * If vector has the locally relevant DoFs, serialization works as follows: + * @code + * parallel::distributed::ActiveFEIndicesTransfer + * feidx_trans(hp_dof_handler); + * parallel::distributed:: + * SolutionTransfer> + * sol_trans(hp_dof_handler); + * + * feidx_trans.prepare_for_transfer(); + * sol_trans.prepare_serialization(vector); + * + * triangulation.save(filename); + * @endcode + * + * For deserialization the vector needs to be a distributed vector + * (without ghost elements): + * @code + * //[create coarse mesh...] + * triangulation.load(filename); + * + * hp::DoFHandler hp_dof_handler(triangulation); + * hp::FECollection fe_collection; + * //[prepare identical fe_collection...] + * hp_dof_handler.distribute_dofs(fe_collection); + * + * parallel::distributed::ActiveFEIndicesTransfer + * feidx_trans(hp_dof_handler); + * feidx_trans.deserialize(); + * hp_dof_handler.distribute_dofs(fe_collection); + * + * parallel::distributed:: + * SolutionTransfer> + * sol_trans(dof_handler); + * sol_trans.deserialize(distributed_vector); + * @endcode + * + * *

Interaction with hanging nodes

* * In essence, this class implements the same steps as does diff --git a/include/deal.II/dofs/dof_handler.h b/include/deal.II/dofs/dof_handler.h index f4a3744c12..585aa54c01 100644 --- a/include/deal.II/dofs/dof_handler.h +++ b/include/deal.II/dofs/dof_handler.h @@ -315,6 +315,11 @@ public: */ static const unsigned int space_dimension = spacedim; + /** + * Make the type of this DoFHandler available in function templates. + */ + static const bool is_hp_dof_handler = false; + /** * When the arrays holding the DoF indices are set up, but before they are * filled with actual values, they are set to an invalid value, in order to diff --git a/include/deal.II/hp/dof_handler.h b/include/deal.II/hp/dof_handler.h index a314768fc5..5aef145d0c 100644 --- a/include/deal.II/hp/dof_handler.h +++ b/include/deal.II/hp/dof_handler.h @@ -237,6 +237,11 @@ namespace hp */ static const unsigned int space_dimension = spacedim; + /** + * Make the type of this DoFHandler available in function templates. + */ + static const bool is_hp_dof_handler = true; + /** * When the arrays holding the DoF indices are set up, but before they are * filled with actual values, they are set to an invalid value, in order diff --git a/source/distributed/solution_transfer.cc b/source/distributed/solution_transfer.cc index 1aef15b15b..40ebf57823 100644 --- a/source/distributed/solution_transfer.cc +++ b/source/distributed/solution_transfer.cc @@ -27,6 +27,8 @@ # include # include +# include + # include # include # include @@ -91,7 +93,7 @@ namespace parallel this, std::placeholders::_1, std::placeholders::_2), - /*returns_variable_size_data=*/false); + /*returns_variable_size_data=*/DoFHandlerType::is_hp_dof_handler); } @@ -207,8 +209,9 @@ namespace parallel SolutionTransfer::pack_callback( const typename Triangulation:: cell_iterator &cell_, - const typename Triangulation:: - CellStatus /*status*/) + const typename Triangulation::CellStatus + status) { typename DoFHandlerType::cell_iterator cell(*cell_, dof_handler); @@ -216,17 +219,87 @@ namespace parallel std::vector<::dealii::Vector> dofvalues( input_vectors.size()); - auto cit_input = input_vectors.cbegin(); - auto it_output = dofvalues.begin(); - for (; cit_input != input_vectors.cend(); ++cit_input, ++it_output) + if (DoFHandlerType::is_hp_dof_handler) { - it_output->reinit(cell->get_fe().dofs_per_cell); - cell->get_interpolated_dof_values(*(*cit_input), *it_output); + unsigned int dofs_per_cell = numbers::invalid_unsigned_int; + unsigned int fe_index = numbers::invalid_unsigned_int; + + switch (status) + { + case parallel::distributed::Triangulation< + dim, + DoFHandlerType::space_dimension>::CELL_PERSIST: + case parallel::distributed::Triangulation< + dim, + DoFHandlerType::space_dimension>::CELL_REFINE: + { + dofs_per_cell = cell->get_fe().dofs_per_cell; + fe_index = cell->active_fe_index(); + break; + } + + case parallel::distributed::Triangulation< + dim, + DoFHandlerType::space_dimension>::CELL_COARSEN: + { + // In case of coarsening, we need to find a suitable fe index + // for the parent cell. We choose the 'least face dominating + // fe index' on all children from the associated FECollection. + std::set fe_indices_children; + for (unsigned int child_index = 0; + child_index < GeometryInfo::max_children_per_cell; + ++child_index) + fe_indices_children.insert( + cell->child(child_index)->active_fe_index()); + + fe_index = + dof_handler->get_fe_collection() + .find_least_face_dominating_fe(fe_indices_children); + + Assert( + fe_index != numbers::invalid_unsigned_int, + ExcMessage( + "No FiniteElement has been found in your FECollection " + "that dominates all children of a cell you are trying " + "to coarsen!")); + + dofs_per_cell = dof_handler->get_fe(fe_index).dofs_per_cell; + } + break; + + default: + Assert(false, ExcInternalError()); + break; + } + + auto it_input = input_vectors.cbegin(); + auto it_output = dofvalues.begin(); + for (; it_input != input_vectors.cend(); ++it_input, ++it_output) + { + it_output->reinit(dofs_per_cell); + cell->get_interpolated_dof_values(*(*it_input), + *it_output, + fe_index); + } + } + else + { + auto it_input = input_vectors.cbegin(); + auto it_output = dofvalues.begin(); + for (; it_input != input_vectors.cend(); ++it_input, ++it_output) + { + it_output->reinit(cell->get_fe().dofs_per_cell); + cell->get_interpolated_dof_values(*(*it_input), *it_output); + } } - // to get consistent data sizes on each cell for the fixed size transfer, - // we won't allow compression - return Utilities::pack(dofvalues, /*allow_compression=*/false); + // We choose to compress the packed data depending on the registered + // dofhandler object. In case of hp, we write in the variable size buffer + // and thus allow compression. In the other case, we use the fixed size + // buffer and require consistent data sizes on each cell, so we leave it + // uncompressed. + return Utilities::pack( + dofvalues, /*allow_compression=*/DoFHandlerType::is_hp_dof_handler); } @@ -236,8 +309,9 @@ namespace parallel SolutionTransfer::unpack_callback( const typename Triangulation:: cell_iterator &cell_, - const typename Triangulation:: - CellStatus /*status*/, + const typename Triangulation::CellStatus + status, const boost::iterator_range::const_iterator> & data_range, std::vector &all_out) @@ -247,27 +321,93 @@ namespace parallel const std::vector<::dealii::Vector> dofvalues = Utilities::unpack< std::vector<::dealii::Vector>>( - data_range.begin(), data_range.end(), /*allow_compression=*/false); + data_range.begin(), + data_range.end(), + /*allow_compression=*/DoFHandlerType::is_hp_dof_handler); // check if sizes match Assert(dofvalues.size() == all_out.size(), ExcInternalError()); - // check if we have enough dofs provided by the FE object - // to interpolate the transferred data correctly - for (auto it_dofvalues = dofvalues.begin(); - it_dofvalues != dofvalues.end(); - ++it_dofvalues) - Assert( - cell->get_fe().dofs_per_cell == it_dofvalues->size(), - ExcMessage( - "The transferred data was packed with a different number of dofs than the" - "currently registered FE object assigned to the DoFHandler has.")); - - // distribute data for each registered vector on mesh - auto it_input = dofvalues.cbegin(); - auto it_output = all_out.begin(); - for (; it_input != dofvalues.cend(); ++it_input, ++it_output) - cell->set_dof_values_by_interpolation(*it_input, *(*it_output)); + if (DoFHandlerType::is_hp_dof_handler) + { + unsigned int fe_index = numbers::invalid_unsigned_int; + + switch (status) + { + case parallel::distributed::Triangulation< + dim, + DoFHandlerType::space_dimension>::CELL_PERSIST: + case parallel::distributed::Triangulation< + dim, + DoFHandlerType::space_dimension>::CELL_COARSEN: + { + fe_index = cell->active_fe_index(); + break; + } + + case parallel::distributed::Triangulation< + dim, + DoFHandlerType::space_dimension>::CELL_REFINE: + { + // After refinement, this particular cell is no longer active, + // and its children have inherited its fe index. However, to + // unpack the data on the old cell, we need to recover its fe + // index from one of the children. Just to be sure, we also + // check if all children have the same fe index. + fe_index = cell->child(0)->active_fe_index(); + for (unsigned int child_index = 1; + child_index < GeometryInfo::max_children_per_cell; + ++child_index) + Assert(cell->child(child_index)->active_fe_index() == + fe_index, + ExcInternalError()); + break; + } + + default: + Assert(false, ExcInternalError()); + break; + } + + // check if we have enough dofs provided by the FE object + // to interpolate the transferred data correctly + for (auto it_dofvalues = dofvalues.begin(); + it_dofvalues != dofvalues.end(); + ++it_dofvalues) + Assert( + dof_handler->get_fe(fe_index).dofs_per_cell == + it_dofvalues->size(), + ExcMessage( + "The transferred data was packed with a different number of dofs than the" + "currently registered FE object assigned to the DoFHandler has.")); + + // distribute data for each registered vector on mesh + auto it_input = dofvalues.cbegin(); + auto it_output = all_out.begin(); + for (; it_input != dofvalues.cend(); ++it_input, ++it_output) + cell->set_dof_values_by_interpolation(*it_input, + *(*it_output), + fe_index); + } + else + { + // check if we have enough dofs provided by the FE object + // to interpolate the transferred data correctly + for (auto it_dofvalues = dofvalues.begin(); + it_dofvalues != dofvalues.end(); + ++it_dofvalues) + Assert( + cell->get_fe().dofs_per_cell == it_dofvalues->size(), + ExcMessage( + "The transferred data was packed with a different number of dofs than the" + "currently registered FE object assigned to the DoFHandler has.")); + + // distribute data for each registered vector on mesh + auto it_input = dofvalues.cbegin(); + auto it_output = all_out.begin(); + for (; it_input != dofvalues.cend(); ++it_input, ++it_output) + cell->set_dof_values_by_interpolation(*it_input, *(*it_output)); + } } diff --git a/source/distributed/solution_transfer.inst.in b/source/distributed/solution_transfer.inst.in index 0396a91b99..2a67cedaf7 100644 --- a/source/distributed/solution_transfer.inst.in +++ b/source/distributed/solution_transfer.inst.in @@ -43,17 +43,46 @@ for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : SPACE_DIMENSIONS) ::dealii::LinearAlgebra::distributed::BlockVector, DoFHandler>; + template class SolutionTransfer< + deal_II_dimension, + ::dealii::Vector, + hp::DoFHandler>; + template class SolutionTransfer< + deal_II_dimension, + ::dealii::LinearAlgebra::distributed::Vector, + hp::DoFHandler>; + template class SolutionTransfer< + deal_II_dimension, + ::dealii::LinearAlgebra::distributed::Vector, + hp::DoFHandler>; + template class SolutionTransfer< + deal_II_dimension, + ::dealii::LinearAlgebra::distributed::BlockVector, + hp::DoFHandler>; + template class SolutionTransfer< + deal_II_dimension, + ::dealii::LinearAlgebra::distributed::BlockVector, + hp::DoFHandler>; + # ifdef DEAL_II_WITH_PETSC template class SolutionTransfer< deal_II_dimension, PETScWrappers::MPI::Vector, DoFHandler>; - template class SolutionTransfer< deal_II_dimension, PETScWrappers::MPI::BlockVector, DoFHandler>; + + template class SolutionTransfer< + deal_II_dimension, + PETScWrappers::MPI::Vector, + hp::DoFHandler>; + template class SolutionTransfer< + deal_II_dimension, + PETScWrappers::MPI::BlockVector, + hp::DoFHandler>; # endif # ifdef DEAL_II_WITH_TRILINOS @@ -61,11 +90,19 @@ for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : SPACE_DIMENSIONS) deal_II_dimension, TrilinosWrappers::MPI::Vector, DoFHandler>; - template class SolutionTransfer< deal_II_dimension, TrilinosWrappers::MPI::BlockVector, DoFHandler>; + + template class SolutionTransfer< + deal_II_dimension, + TrilinosWrappers::MPI::Vector, + hp::DoFHandler>; + template class SolutionTransfer< + deal_II_dimension, + TrilinosWrappers::MPI::BlockVector, + hp::DoFHandler>; # endif #endif diff --git a/tests/mpi/p4est_save_06.cc b/tests/mpi/p4est_save_06.cc new file mode 100644 index 0000000000..96eafd518f --- /dev/null +++ b/tests/mpi/p4est_save_06.cc @@ -0,0 +1,177 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2009 - 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +// save and load a triangulation with a different number of cpus +// using variable size data transfer with an associated hp::DoFHandler + +#include +#include + +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include + +#include "../tests.h" + + + +template +void +test() +{ + unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + MPI_Comm com_all = MPI_COMM_WORLD; + MPI_Comm com_small; + + // split the communicator in proc 0,1,2 and 3,4 + MPI_Comm_split(com_all, (myid < 3) ? 0 : 1, myid, &com_small); + + // write with small com + if (myid < 3) + { + deallog << "writing with " << Utilities::MPI::n_mpi_processes(com_small) + << std::endl; + + parallel::distributed::Triangulation tr(com_small); + GridGenerator::hyper_cube(tr); + tr.refine_global(2); + for (typename Triangulation::active_cell_iterator cell = + tr.begin_active(); + cell != tr.end(); + ++cell) + if (!cell->is_ghost() && !cell->is_artificial()) + if (cell->center().norm() < 0.3) + { + cell->set_refine_flag(); + } + + tr.execute_coarsening_and_refinement(); + + hp::DoFHandler dh(tr); + hp::FECollection fe_collection; + + // prepare FECollection with arbitrary number of entries + const unsigned int max_degree = 1 + std::pow(2, dim); + for (unsigned int i = 0; i < max_degree; ++i) + fe_collection.push_back(FE_Q(max_degree - i)); + + dh.distribute_dofs(fe_collection); + + IndexSet locally_owned_dofs = dh.locally_owned_dofs(); + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs); + + PETScWrappers::MPI::Vector x(locally_owned_dofs, com_small); + PETScWrappers::MPI::Vector rel_x(locally_owned_dofs, + locally_relevant_dofs, + com_small); + + parallel::distributed::ActiveFEIndicesTransfer feidx_transfer( + dh); + parallel::distributed:: + SolutionTransfer> + soltrans(dh); + + for (unsigned int i = 0; i < locally_owned_dofs.n_elements(); ++i) + { + unsigned int idx = locally_owned_dofs.nth_index_in_set(i); + x(idx) = idx; + } + + + x.compress(VectorOperation::insert); + rel_x = x; + + feidx_transfer.prepare_for_transfer(); + soltrans.prepare_serialization(rel_x); + + tr.save("file"); + deallog << "#cells: " << tr.n_global_active_cells() << std::endl + << "norm: " << x.l2_norm() << std::endl + << "Checksum: " << tr.get_checksum() << std::endl; + } + + MPI_Barrier(MPI_COMM_WORLD); + + deallog << "reading with " << Utilities::MPI::n_mpi_processes(com_all) + << std::endl; + + { + parallel::distributed::Triangulation tr(com_all); + + GridGenerator::hyper_cube(tr); + tr.load("file"); + + hp::DoFHandler dh(tr); + hp::FECollection fe_collection; + + // prepare FECollection with arbitrary number of entries + const unsigned int max_degree = 1 + std::pow(2, dim); + for (unsigned int i = 0; i < max_degree; ++i) + fe_collection.push_back(FE_Q(max_degree - i)); + + dh.distribute_dofs(fe_collection); + + parallel::distributed::ActiveFEIndicesTransfer feidx_transfer(dh); + feidx_transfer.deserialize(); + + dh.distribute_dofs(fe_collection); + + IndexSet locally_owned_dofs = dh.locally_owned_dofs(); + IndexSet locally_relevant_dofs; + + DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs); + + PETScWrappers::MPI::Vector solution(locally_owned_dofs, com_all); + solution = PetscScalar(); + + parallel::distributed:: + SolutionTransfer> + soltrans(dh); + + soltrans.deserialize(solution); + + deallog << "#cells: " << tr.n_global_active_cells() << std::endl + << "norm: " << solution.l2_norm() << std::endl + << "Checksum: " << tr.get_checksum() << std::endl; + } + + deallog << "OK" << std::endl; +} + + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll log; + + test<2>(); +} diff --git a/tests/mpi/p4est_save_06.with_p4est=true.with_petsc=true.mpirun=5.output b/tests/mpi/p4est_save_06.with_p4est=true.with_petsc=true.mpirun=5.output new file mode 100644 index 0000000000..95faa230b4 --- /dev/null +++ b/tests/mpi/p4est_save_06.with_p4est=true.with_petsc=true.mpirun=5.output @@ -0,0 +1,46 @@ + +DEAL:0::writing with 3 +DEAL:0::#cells: 19 +DEAL:0::norm: 7114.44 +DEAL:0::Checksum: 136119115 +DEAL:0::reading with 5 +DEAL:0::#cells: 19 +DEAL:0::norm: 7114.44 +DEAL:0::Checksum: 136119115 +DEAL:0::OK + +DEAL:1::writing with 3 +DEAL:1::#cells: 19 +DEAL:1::norm: 7114.44 +DEAL:1::Checksum: 0 +DEAL:1::reading with 5 +DEAL:1::#cells: 19 +DEAL:1::norm: 7114.44 +DEAL:1::Checksum: 0 +DEAL:1::OK + + +DEAL:2::writing with 3 +DEAL:2::#cells: 19 +DEAL:2::norm: 7114.44 +DEAL:2::Checksum: 0 +DEAL:2::reading with 5 +DEAL:2::#cells: 19 +DEAL:2::norm: 7114.44 +DEAL:2::Checksum: 0 +DEAL:2::OK + + +DEAL:3::reading with 5 +DEAL:3::#cells: 19 +DEAL:3::norm: 7114.44 +DEAL:3::Checksum: 0 +DEAL:3::OK + + +DEAL:4::reading with 5 +DEAL:4::#cells: 19 +DEAL:4::norm: 7114.44 +DEAL:4::Checksum: 0 +DEAL:4::OK + diff --git a/tests/mpi/solution_transfer_04.cc b/tests/mpi/solution_transfer_04.cc new file mode 100644 index 0000000000..d8004586de --- /dev/null +++ b/tests/mpi/solution_transfer_04.cc @@ -0,0 +1,143 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2009 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +// Test to check if SolutionTransfer works in parallel with hp::DoFHandler. +// This tests is based on mpi/feindices_transfer.cc + + +#include +#include +#include + +#include + +#include + +#include + +#include + +#include + +#include "../tests.h" + + +template +void +test() +{ + const unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + + // ------ setup ------ + parallel::distributed::Triangulation tria(MPI_COMM_WORLD); + + GridGenerator::subdivided_hyper_cube(tria, 2); + tria.refine_global(1); + + hp::DoFHandler dh(tria); + hp::FECollection fe_collection; + + // prepare FECollection with arbitrary number of entries + const unsigned int max_degree = 1 + std::pow(2, dim); + for (unsigned int i = 0; i < max_degree; ++i) + fe_collection.push_back(FE_Q(max_degree - i)); + + typename hp::DoFHandler::active_cell_iterator cell; + unsigned int i = 0; + + for (cell = dh.begin_active(); cell != dh.end(); ++cell) + { + if (cell->is_locally_owned()) + { + // set active fe index + if (!(cell->is_artificial())) + { + if (i >= fe_collection.size()) + i = 0; + cell->set_active_fe_index(i++); + } + + // set refinement/coarsening flags + if (cell->id().to_string() == "0_1:0") + cell->set_refine_flag(); + else if (cell->parent()->id().to_string() == + ((dim == 2) ? "3_0:" : "7_0:")) + cell->set_coarsen_flag(); + } + } + + + // ----- prepare solution ----- + dh.distribute_dofs(fe_collection); + IndexSet locally_owned_dofs = dh.locally_owned_dofs(); + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs); + + TrilinosWrappers::MPI::Vector solution; + solution.reinit(locally_owned_dofs, MPI_COMM_WORLD); + for (unsigned int i = 0; i < solution.size(); ++i) + if (locally_owned_dofs.is_element(i)) + solution(i) = i; + + TrilinosWrappers::MPI::Vector old_solution; + old_solution.reinit(locally_owned_dofs, + locally_relevant_dofs, + MPI_COMM_WORLD); + old_solution = solution; + + + // ----- transfer ----- + parallel::distributed::ActiveFEIndicesTransfer feidx_transfer(dh); + parallel::distributed:: + SolutionTransfer> + soltrans(dh); + + feidx_transfer.prepare_for_transfer(); + soltrans.prepare_for_coarsening_and_refinement(old_solution); + tria.execute_coarsening_and_refinement(); + + feidx_transfer.unpack(); + + dh.distribute_dofs(fe_collection); + locally_owned_dofs = dh.locally_owned_dofs(); + DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs); + + TrilinosWrappers::MPI::Vector new_solution; + new_solution.reinit(locally_owned_dofs, MPI_COMM_WORLD); + soltrans.interpolate(new_solution); + + // make sure no processor is hanging + MPI_Barrier(MPI_COMM_WORLD); + + deallog << "OK" << std::endl; +} + + + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll log; + + deallog.push("2d"); + test<2>(); + deallog.pop(); + deallog.push("3d"); + test<3>(); + deallog.pop(); +} diff --git a/tests/mpi/solution_transfer_04.with_p4est=true.with_trilinos=true.mpirun=2.output b/tests/mpi/solution_transfer_04.with_p4est=true.with_trilinos=true.mpirun=2.output new file mode 100644 index 0000000000..0a495369a8 --- /dev/null +++ b/tests/mpi/solution_transfer_04.with_p4est=true.with_trilinos=true.mpirun=2.output @@ -0,0 +1,7 @@ + +DEAL:0:2d:: OK +DEAL:0:3d:: OK + +DEAL:1:2d:: OK +DEAL:1:3d:: OK + -- 2.39.5