From: Marc Fehling Date: Tue, 18 Dec 2018 14:23:15 +0000 (+0100) Subject: Automatic transfer of active fe indices during refinement and serialization. X-Git-Tag: v9.1.0-rc1~362^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9d6dd6845c3bc9eabd4d27ac4e4caa76f3d78436;p=dealii.git Automatic transfer of active fe indices during refinement and serialization. --- diff --git a/doc/news/changes/major/20190127Fehling b/doc/news/changes/major/20190127Fehling new file mode 100644 index 0000000000..74c4c55f90 --- /dev/null +++ b/doc/news/changes/major/20190127Fehling @@ -0,0 +1,10 @@ +Changed: Class hp::DoFHandler now transfers the active_fe_index of each +cell automatically when refining/coarsening a Triangulation, +parallel::shared::Triangulation, or +parallel::distributed::Triangulation. However, serialization of a +parallel::distributed::Triangulation still requires a user to +explicitly call the functions +hp::DoFHandler::prepare_for_serialization_of_active_fe_indices() and +hp::DoFHandler::deserialize_active_fe_indices(). +
+(Marc Fehling, 2019/01/27) diff --git a/doc/news/changes/minor/20180723MarcFehling b/doc/news/changes/minor/20180723MarcFehling deleted file mode 100644 index fd54c397db..0000000000 --- a/doc/news/changes/minor/20180723MarcFehling +++ /dev/null @@ -1,6 +0,0 @@ -New: Class parallel::distributed::ActiveFEIndicesTransfer -has been introduced to transfer the active_fe_index of each -cell across meshes in case of refinement/serialization, if a -hp::DoFHandler has been used with a parallel::distributed::Triangulation. -
-(Marc Fehling, 2018/07/23) diff --git a/include/deal.II/distributed/active_fe_indices_transfer.h b/include/deal.II/distributed/active_fe_indices_transfer.h deleted file mode 100644 index c1c8702fe4..0000000000 --- a/include/deal.II/distributed/active_fe_indices_transfer.h +++ /dev/null @@ -1,211 +0,0 @@ -// --------------------------------------------------------------------- -// -// Copyright (C) 2018 by the deal.II authors -// -// This file is part of the deal.II library. -// -// The deal.II library is free software; you can use it, redistribute -// it, and/or modify it under the terms of the GNU Lesser General -// Public License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// The full text of the license can be found in the file LICENSE.md at -// the top level directory of deal.II. -// -// --------------------------------------------------------------------- - -#ifndef dealii_distributed_active_fe_indices_transfer_h -#define dealii_distributed_active_fe_indices_transfer_h - -#include - -#include - -#include - - -DEAL_II_NAMESPACE_OPEN - -namespace parallel -{ - namespace distributed - { - /** - * This class transfers each cell's `active_fe_index` of a hp::FECollection - * attached to a hp::DoFHandler while refining and/or coarsening a - * distributed grid and handles the necessary communication. - * - * This class therefore does for the `active_fe_index` information of each - * cell what parallel::distributed::SolutionTransfer does for the values - * of degrees of freedom defined on a parallel::distributed::Triangulation. - * - * If refinement is involved in the data transfer process, the children of - * a refined cell inherit the `active_fe_index` from their parent. If - * cells get coarsened into one, the latter will get the least dominating - * `active_fe_index` amongst its children, as determined by the function - * hp::FECollection::find_least_face_dominating_fe_in_collection(). - * - * @note If you use more than one object to attach data to a - * parallel::distributed::Triangulation at the same time (e.g. a - * parallel::distributed::SolutionTransfer object), the calls to - * parallel::distributed::ActiveFEIndicesTransfer::prepare_for_transfer(), - * parallel::distributed::SolutionTransfer::prepare_for_coarsening_and_refinement() - * and parallel::distributed::SolutionTransfer::prepare_serialization(), - * as well as parallel::distributed::ActiveFEIndicesTransfer::unpack() and - * parallel::distributed::SolutionTransfer::interpolate(), or - * parallel::distributed::ActiveFEIndicesTransfer::deserialize() and - * parallel::distributed::SolutionTransfer::deserialize() for serialization, - * need to be in the same order, respectively. - * - *

Transferring each cell's active_fe_index

- * - * The following code snippet demonstrates how to transfer all active FE - * indices across refinement/coarsening of the triangulation that is - * registered to the hp::DoFHandler member object of this class. - * - * @code - * parallel::distributed::ActiveFEIndicesTransfer - * feidx_trans(hp_dof_handler); - * // flag some cells for refinement and coarsening, e.g. - * GridRefinement::refine_and_coarsen_fixed_fraction(tria, - * error_indicators, - * 0.3, - * 0.05); - * - * // prepare the triangulation, - * tria.prepare_coarsening_and_refinement(); - * - * // prepare the SolutionTransfer object for coarsening and refinement - * // and give the solution vector that we intend to interpolate later, - * feidx_trans.prepare_for_transfer(); - * - * // actually execute the refinement, - * tria.execute_coarsening_and_refinement(); - * - * // unpack all active fe indices, - * feidx_trans.unpack(); - * - * // redistribute dofs for further use, - * // using the active FE indices just restored - * hp_dof_handler.distribute_dofs(fe_collection); - * @endcode - * - * - *

Use for serialization

- * - * This class can be used to serialize and later deserialize a distributed - * mesh with attached data to separate files. If you use more than one - * hp::DoFHandler and therefore more than one - * parallel::distributed::ActiveFEIndicesTransfer object, they need to be - * serialized and deserialized in the same order. - * - * For serialization, the following code snippet saves not only the - * triangulation itself, but also the active FE indices of a hp::DoFHandler - * that works on this triangulation: - * @code - * parallel::distributed::ActiveFEIndicesTransfer - * feidx_trans(hp_dof_handler); - * feidx_trans.prepare_for_transfer(); - * - * triangulation.save(filename); - * @endcode - * - * Later, during deserialization, both the triangulation and all active FE - * indices of the hp::DoFHandler can be restored as follows: - * @code - * //[create coarse mesh...] - * triangulation.load(filename); - * - * parallel::distributed::ActiveFEIndicesTransfer - * feidx_trans(hp_dof_handler); - * feidx_trans.deserialize(); - * - * // distribute dofs for further use, - * // using the active FE indices just restored - * hp_dof_handler.distribute_dofs(fe_collection); - * @endcode - * - * @note See documentation of parallel::distributed::SolutionTransfer for - * matching code snippets in both cases. - * - * @ingroup distributed - * @author Marc Fehling, 2018 - */ - template - class ActiveFEIndicesTransfer - { - public: - /** - * Constructor. - * - * @param[in] dof The hp::DoFHandler on which all - * operations will happen. At the time when this constructor - * is called, the hp::DoFHandler still points to the triangulation - * before the refinement in question happens. - */ - ActiveFEIndicesTransfer(const hp::DoFHandler &dof_handler); - - /** - * Prepare the current object for coarsening and refinement or - * serialization. - */ - void - prepare_for_transfer(); - - /** - * Unpack the information previously stored in this object before - * the mesh was refined or coarsened onto the current set of cells. - */ - void - unpack(); - - /** - * Execute the deserialization of the stored information. - * This needs to be done after calling Triangulation::load(). - */ - void - deserialize(); - - private: - /** - * Pointer to the hp::DoFHandler to work with. - */ - SmartPointer, - ActiveFEIndicesTransfer> - dof_handler; - - /** - * The handle that the parallel::distributed::Triangulation has - * assigned to this object with which we can access our memory - * offset and our pack function. - */ - unsigned int handle; - - /** - * A callback function used to pack the data on the current mesh into - * objects that can later be retrieved after refinement, coarsening and - * repartitioning. - */ - std::vector - pack_callback( - const typename Triangulation::cell_iterator &cell, - const typename Triangulation::CellStatus status); - - /** - * A callback function used to unpack the data on the current mesh that - * has been packed up previously on the mesh before refinement, - * coarsening and repartitioning. - */ - void - unpack_callback( - const typename Triangulation::cell_iterator &cell, - const typename Triangulation::CellStatus status, - const boost::iterator_range::const_iterator> - &data_range); - }; - } // namespace distributed -} // namespace parallel - - -DEAL_II_NAMESPACE_CLOSE - -#endif diff --git a/include/deal.II/distributed/solution_transfer.h b/include/deal.II/distributed/solution_transfer.h index a0d47d7544..558ebeed29 100644 --- a/include/deal.II/distributed/solution_transfer.h +++ b/include/deal.II/distributed/solution_transfer.h @@ -153,32 +153,12 @@ namespace parallel * * If an object of the hp::DoFHandler class is registered with an * instantiation of this parallel::distributed::SolutionTransfer - * class, the following requirements have to be met: - *
    - *
  • - * The hp::DoFHandler needs to be explicitly mentioned - * in the parallel::distributed::SolutionTransfer type, i.e.: - * @code - * parallel::distributed::SolutionsTransfer> sol_trans(hp_dof_handler); - * @endcode - *
  • - *
  • - * The transfer of the active_fe_index of each cell - * has to be scheduled as well in the parallel::distributed case, - * since ownerships of cells change during mesh repartitioning. - * This can be achieved with the - * parallel::distributed::ActiveFEIndicesTransfer class. The order in - * which both objects append data during the packing process does not - * matter. However, the unpacking process after refinement or - * deserialization requires the `active_fe_index` to be distributed - * before interpolating the data of the - * parallel::distributed::SolutionTransfer object, so that the correct - * FiniteElement object is associated with each cell. See the - * documentation on parallel::distributed::ActiveFEIndicesTransfer - * for further instructions. - *
  • - *
+ * class, it is necessary to explicitly specify this in the template + * argument list of this class, i.e.: + * @code + * parallel::distributed::SolutionsTransfer> sol_trans(hp_dof_handler); + * @endcode * * Since data on hp::DoFHandler objects is associated with many different * FiniteElement objects, each cell's data has to be processed with its @@ -190,44 +170,22 @@ namespace parallel * function hp::FECollection::find_least_face_dominating_fe_in_collection(), * and unpacked on the same cell with the same index. * - * Code snippets to demonstrate the usage of the - * parallel::distributed::SolutionTransfer class with hp::DoFHandler - * objects are provided in the following. Here VectorType - * is your favorite vector type, e.g. PETScWrappers::MPI::Vector, - * TrilinosWrappers::MPI::Vector, or corresponding block vectors. - * - * After refinement, the order in which to unpack the transferred data - * is important: - * @code - * //[prepare triangulation for refinement ...] - * - * parallel::distributed::ActiveFEIndicesTransfer - * feidx_trans(hp_dof_handler); - * parallel::distributed:: - * SolutionTransfer> - * sol_trans(hp_dof_handler); - * - * feidx_trans.prepare_for_transfer(); - * sol_trans.prepare_for_coarsening_and_refinement(solution); - * triangulation.execute_coarsening_and_refinement(); - * - * feidx_trans.unpack(); - * hp_dof_handler.distribute_dofs(fe_collection); - * - * VectorType interpolated_solution; - * //[create VectorType in the right size here ...] - * soltrans.interpolate(interpolated_solution); - * @endcode + * Transferring a solution across refinement works exactly like in the + * non-hp case. However, when considering serialization, we also have to + * store the active_fe_indices in an additional step. A code snippet + * demonstrating serialization with the + * parallel::distributed::SolutionTransfer class with hp::DoFHandler objects + * is provided in the following. Here VectorType is your favorite vector + * type, e.g. PETScWrappers::MPI::Vector, TrilinosWrappers::MPI::Vector, or + * corresponding block vectors. * * If vector has the locally relevant DoFs, serialization works as follows: * @code - * parallel::distributed::ActiveFEIndicesTransfer - * feidx_trans(hp_dof_handler); * parallel::distributed:: * SolutionTransfer> * sol_trans(hp_dof_handler); * - * feidx_trans.prepare_for_transfer(); + * hp_dof_handler.prepare_for_serialization_of_active_fe_indices(); * sol_trans.prepare_serialization(vector); * * triangulation.save(filename); @@ -239,14 +197,14 @@ namespace parallel * //[create coarse mesh...] * triangulation.load(filename); * - * hp::DoFHandler hp_dof_handler(triangulation); * hp::FECollection fe_collection; * //[prepare identical fe_collection...] - * hp_dof_handler.distribute_dofs(fe_collection); * - * parallel::distributed::ActiveFEIndicesTransfer - * feidx_trans(hp_dof_handler); - * feidx_trans.deserialize(); + * hp::DoFHandler hp_dof_handler(triangulation); + * // We need to introduce our dof_handler to the fe_collection + * // before setting all active_fe_indices. + * hp_dof_handler.distribute_dofs(fe_collection); + * hp_dof_handler.deserialize_active_fe_indices(); * hp_dof_handler.distribute_dofs(fe_collection); * * parallel::distributed:: diff --git a/include/deal.II/hp/dof_handler.h b/include/deal.II/hp/dof_handler.h index 95c33c30ed..29a3746ec8 100644 --- a/include/deal.II/hp/dof_handler.h +++ b/include/deal.II/hp/dof_handler.h @@ -26,6 +26,8 @@ #include #include +#include + #include #include #include @@ -36,6 +38,7 @@ #include #include +#include #include #include @@ -159,12 +162,23 @@ namespace hp * @ref GlossArtificialCell "the glossary entry on artificial cells" * for more information. * - * Using a parallel::distributed::Triangulation with an hp::DoFHandler - * requires additional attention during coarsening and refinement, since - * no information on active FE indices will be automatically transferred. - * This has to be done manually using the - * parallel::distributed::ActiveFEIndicesTransfer class. Consult its - * documentation for more information. + * During refinement and coarsening, information about the @p active_fe_index + * of each cell will be automatically transferred. + * + * However, using a parallel::distributed::Triangulation with an + * hp::DoFHandler requires additional attention during serialization, since no + * information on active FE indices will be automatically transferred. This + * has to be done manually using the + * prepare_for_serialization_of_active_fe_indices() and + * deserialize_active_fe_indices() functions. The former has to be called + * before parallel::distributed::Triangulation::save() is invoked, and the + * latter needs to be run after parallel::distributed::Triangulation::load(). + * If further data will be attached to the triangulation via the + * parallel::distributed::CellDataTransfer, + * parallel::distributed::SolutionTransfer, or Particles::ParticleHandler + * classes, all corresponding preparation and deserialization function calls + * need to happen in the same order. Consult the documentation of + * parallel::distributed::SolutionTransfer for more information. * * * @ingroup dofs @@ -884,6 +898,43 @@ namespace hp virtual std::size_t memory_consumption() const; + /** + * Whenever serialization with a parallel::distributed::Triangulation as the + * underlying triangulation is considered, we also need to consider storing + * the active_fe_indices on all active cells as well. + * + * This function registers that these indices are to be stored whenever the + * parallel::distributed::Triangulation::save() function is called on the + * underlying triangulation. + * + * @note Currently only implemented for triangulations of type + * parallel::distributed::Triangulation. An assertion will be triggered if + * a different type is registered. + * + * @see The documentation of parallel::distributed::SolutionTransfer has further + * information on serialization. + */ + void + prepare_for_serialization_of_active_fe_indices(); + + /** + * Whenever serialization with a parallel::distributed::Triangulation as the + * underlying triangulation is considered, we also need to consider storing + * the active_fe_indices on all active cells as well. + * + * This function deserializes and distributes the previously stored + * active_fe_indices on all active cells. + * + * @note Currently only implemented for triangulations of type + * parallel::distributed::Triangulation. An assertion will be triggered if + * a different type is registered. + * + * @see The documentation of parallel::distributed::SolutionTransfer has further + * information on serialization. + */ + void + deserialize_active_fe_indices(); + /** * Write the data of this object to a stream for the purpose of * serialization. @@ -1039,17 +1090,70 @@ namespace hp post_refinement_action(); /** - * Functions that will be triggered through signals whenever the - * triangulation is modified, with the restriction that it is not - * a parallel::distributed::Triangulation. + * A function that will be triggered through a triangulation + * signal just before the associated Triangulation is modified. + * + * The function that stores the active_fe_indices of all cells that will + * be refined or coarsened before the refinement happens, so that + * they can be set again after refinement. + */ + void + pre_active_fe_index_transfer(); + + /** + * A function that will be triggered through a triangulation + * signal just before the associated parallel::shared::Triangulation is + * modified. + * + * The function that stores the active_fe_indices of all cells that will + * be refined or coarsened before the refinement happens, so that + * they can be set again after refinement. + */ + void + pre_shared_active_fe_index_transfer(); + + /** + * A function that will be triggered through a triangulation + * signal just before the associated parallel::distributed::Triangulation is + * modified. + * + * The function that stores all active_fe_indices on locally owned cells for + * distribution over all participating processors. + */ + void + pre_distributed_active_fe_index_transfer(); + + /** + * A function that will be triggered through a triangulation + * signal just after the associated Triangulation is modified. + * + * The function that restores the active_fe_indices of all cells that + * were refined or coarsened. + */ + void + post_active_fe_index_transfer(); + + /** + * A function that will be triggered through a triangulation + * signal just after the associated parallel::shared::Triangulation is + * modified. * - * Here they are used to administrate the active_fe_indices during the - * spatial refinement. + * The function that restores the active_fe_indices of all cells that + * were refined or coarsened. */ void - pre_refinement_fe_index_update(); + post_shared_active_fe_index_transfer(); + + /** + * A function that will be triggered through a triangulation + * signal just after the associated parallel::distributed::Triangulation is + * modified. + * + * The function that restores all active_fe_indices on locally owned cells + * that have been communicated. + */ void - post_refinement_fe_index_update(); + post_distributed_active_fe_index_transfer(); /** * Space to store the DoF numbers for the different levels. Analogous to @@ -1120,6 +1224,23 @@ namespace hp */ std::map coarsened_cells_fe_index; + /** + * Container to temporarily store the active_fe_index of every locally + * owned cell for transfer across parallel::distributed::Triangulation + * objects. + */ + std::vector active_fe_indices; + + /** + * Helper object to transfer all active_fe_indices on + * parallel::distributed::Triangulation objects during refinement/coarsening + * and serialization. + */ + std::unique_ptr< + parallel::distributed:: + CellDataTransfer>> + cell_data_transfer; + /** * A list of connections with which this object connects to the * triangulation to get information about when the triangulation changes. diff --git a/source/distributed/CMakeLists.txt b/source/distributed/CMakeLists.txt index ac47675cbe..fec5610af2 100644 --- a/source/distributed/CMakeLists.txt +++ b/source/distributed/CMakeLists.txt @@ -18,7 +18,6 @@ INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_BINARY_DIR}) SET(_unity_include_src grid_refinement.cc cell_weights.cc - active_fe_indices_transfer.cc cell_data_transfer.cc solution_transfer.cc tria.cc @@ -42,7 +41,6 @@ SETUP_SOURCE_LIST("${_unity_include_src}" SET(_inst grid_refinement.inst.in cell_weights.inst.in - active_fe_indices_transfer.inst.in cell_data_transfer.inst.in solution_transfer.inst.in tria.inst.in diff --git a/source/distributed/active_fe_indices_transfer.cc b/source/distributed/active_fe_indices_transfer.cc deleted file mode 100644 index 60ea718f17..0000000000 --- a/source/distributed/active_fe_indices_transfer.cc +++ /dev/null @@ -1,215 +0,0 @@ -// --------------------------------------------------------------------- -// -// Copyright (C) 2018 by the deal.II authors -// -// This file is part of the deal.II library. -// -// The deal.II library is free software; you can use it, redistribute -// it, and/or modify it under the terms of the GNU Lesser General -// Public License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// The full text of the license can be found in the file LICENSE.md at -// the top level directory of deal.II. -// -// --------------------------------------------------------------------- - - -#include - -#ifdef DEAL_II_WITH_P4EST - -# include -# include - -# include - -# include - -DEAL_II_NAMESPACE_OPEN - -namespace parallel -{ - namespace distributed - { - template - ActiveFEIndicesTransfer::ActiveFEIndicesTransfer( - const hp::DoFHandler &dof_handler) - : dof_handler(&dof_handler, typeid(*this).name()) - , handle(numbers::invalid_unsigned_int) - { - Assert( - (dynamic_cast< - const parallel::distributed::Triangulation *>( - &(this->dof_handler)->get_triangulation()) != nullptr), - ExcMessage( - "parallel::distributed::ActiveFEIndicesTransfer requires a parallel::distributed::Triangulation object.")); - } - - - - template - void - ActiveFEIndicesTransfer::prepare_for_transfer() - { - parallel::distributed::Triangulation *tria = - (dynamic_cast *>( - const_cast *>( - &dof_handler->get_triangulation()))); - Assert(tria != nullptr, ExcInternalError()); - - handle = tria->register_data_attach( - std::bind(&ActiveFEIndicesTransfer::pack_callback, - this, - std::placeholders::_1, - std::placeholders::_2), - /*returns_variable_size_data=*/false); - } - - - - template - void - ActiveFEIndicesTransfer::unpack() - { - // TODO: casting away constness is bad - parallel::distributed::Triangulation *tria = - (dynamic_cast *>( - const_cast *>( - &dof_handler->get_triangulation()))); - Assert(tria != nullptr, ExcInternalError()); - - tria->notify_ready_to_unpack( - handle, - std::bind(&ActiveFEIndicesTransfer::unpack_callback, - this, - std::placeholders::_1, - std::placeholders::_2, - std::placeholders::_3)); - } - - - - template - void - ActiveFEIndicesTransfer::deserialize() - { - // For deserialization, we need to register this object - // to the triangulation first to get a valid handle for - // data access. - prepare_for_transfer(); - unpack(); - } - - - - template - std::vector - ActiveFEIndicesTransfer::pack_callback( - const typename Triangulation::cell_iterator &cell_, - const typename Triangulation::CellStatus status) - { - const typename hp::DoFHandler::cell_iterator cell( - *cell_, dof_handler); - - unsigned int fe_index = numbers::invalid_unsigned_int; - - switch (status) - { - case parallel::distributed::Triangulation::CELL_PERSIST: - case parallel::distributed::Triangulation::CELL_REFINE: - fe_index = cell->active_fe_index(); - break; - - case parallel::distributed::Triangulation::CELL_COARSEN: - // In this case, the callback function will be called on the parent - // cell which shall store the packed information. We need to choose - // from its children which ID to store. - { - std::set fe_indices_children; - for (unsigned int child_index = 0; - child_index < GeometryInfo::max_children_per_cell; - ++child_index) - { - typename hp::DoFHandler::cell_iterator child = - cell->child(child_index); - - fe_indices_children.insert(child->active_fe_index()); - } - - fe_index = - dof_handler->get_fe_collection() - .find_least_dominating_fe_in_collection(fe_indices_children, - /*codim=*/0); - - Assert(fe_index != numbers::invalid_unsigned_int, - ExcMessage( - "No FiniteElement has been found in your FECollection " - "that dominates all children of a cell you are trying " - "to coarsen!")); - } - break; - - default: - Assert(false, ExcInternalError()); - break; - } - - return Utilities::pack(fe_index, /*allow_compression=*/false); - } - - - - template - void - ActiveFEIndicesTransfer::unpack_callback( - const typename Triangulation::cell_iterator &cell_, - const typename Triangulation::CellStatus status, - const boost::iterator_range::const_iterator> - &data_range) - { - typename hp::DoFHandler::cell_iterator cell(*cell_, - dof_handler); - - const unsigned int fe_index = - Utilities::unpack(data_range.begin(), - data_range.end(), - /*allow_compression=*/false); - - Assert(fe_index <= dof_handler->get_fe().size(), ExcInternalError()); - - switch (status) - { - case parallel::distributed::Triangulation::CELL_PERSIST: - case parallel::distributed::Triangulation::CELL_COARSEN: - cell->set_active_fe_index(fe_index); - break; - - case parallel::distributed::Triangulation::CELL_REFINE: - // In this case, the callback function will be called on the parent - // cell which stores the packed information. We need to distribute - // it on its children. - for (unsigned int child_index = 0; - child_index < GeometryInfo::max_children_per_cell; - ++child_index) - cell->child(child_index)->set_active_fe_index(fe_index); - break; - - default: - Assert(false, ExcInternalError()); - break; - } - } - } // namespace distributed -} // namespace parallel - - -// explicit instantiations -# include "active_fe_indices_transfer.inst" - -DEAL_II_NAMESPACE_CLOSE - -#endif diff --git a/source/distributed/active_fe_indices_transfer.inst.in b/source/distributed/active_fe_indices_transfer.inst.in deleted file mode 100644 index 4e91a9b682..0000000000 --- a/source/distributed/active_fe_indices_transfer.inst.in +++ /dev/null @@ -1,30 +0,0 @@ -// --------------------------------------------------------------------- -// -// Copyright (C) 2018 by the deal.II authors -// -// This file is part of the deal.II library. -// -// The deal.II library is free software; you can use it, redistribute -// it, and/or modify it under the terms of the GNU Lesser General -// Public License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// The full text of the license can be found in the file LICENSE.md at -// the top level directory of deal.II. -// -// --------------------------------------------------------------------- - - - -for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : SPACE_DIMENSIONS) - { - namespace parallel - \{ - namespace distributed - \{ -#if deal_II_dimension <= deal_II_space_dimension - template class ActiveFEIndicesTransfer; -#endif - \} - \} - } diff --git a/source/hp/dof_handler.cc b/source/hp/dof_handler.cc index c7b5011f89..01900459d8 100644 --- a/source/hp/dof_handler.cc +++ b/source/hp/dof_handler.cc @@ -238,6 +238,7 @@ namespace internal } + /** * Do that part of reserving space that pertains to cells, * since this is the same in all space dimensions. @@ -652,6 +653,7 @@ namespace internal } + /** * Reserve enough space in the levels[] objects to * store the numbers of the degrees of freedom needed for the @@ -825,6 +827,7 @@ namespace internal } + /** * Implement the function of same name in the mother class. */ @@ -905,6 +908,7 @@ namespace internal } + template static unsigned int max_couplings_between_dofs(const DoFHandler<3, spacedim> &dof_handler) @@ -938,6 +942,7 @@ namespace internal } + /** * Given a hp::DoFHandler object, make sure that the active_fe_indices * that a user has set for locally owned cells are communicated to all @@ -1037,6 +1042,7 @@ namespace internal } // namespace internal + namespace hp { template @@ -1057,6 +1063,7 @@ namespace hp {} + template DoFHandler::DoFHandler( const Triangulation &tria) @@ -1069,6 +1076,7 @@ namespace hp } + template DoFHandler::~DoFHandler() { @@ -1086,8 +1094,10 @@ namespace hp } + /*------------------------ Cell iterator functions ------------------------*/ + template typename DoFHandler::cell_iterator DoFHandler::begin(const unsigned int level) const @@ -1121,6 +1131,7 @@ namespace hp } + template typename DoFHandler::cell_iterator DoFHandler::end(const unsigned int level) const @@ -1131,6 +1142,7 @@ namespace hp } + template typename DoFHandler::active_cell_iterator DoFHandler::end_active(const unsigned int level) const @@ -1151,6 +1163,7 @@ namespace hp } + template IteratorRange::active_cell_iterator> DoFHandler::active_cell_iterators() const @@ -1329,10 +1342,9 @@ namespace hp // protected data of this object, but for simplicity we use the // cell-wise access. this way we also have to pass some debug-mode // tests which we would have to duplicate ourselves otherwise - active_cell_iterator cell = begin_active(), endc = end(); - for (unsigned int i = 0; cell != endc; ++cell, ++i) + for (const auto &cell : active_cell_iterators()) if (cell->is_locally_owned()) - cell->set_active_fe_index(active_fe_indices[i]); + cell->set_active_fe_index(active_fe_indices[cell->active_cell_index()]); } @@ -1347,11 +1359,13 @@ namespace hp // we could try to extract the values directly, since they are // stored as protected data of this object, but for simplicity we // use the cell-wise access. - active_cell_iterator cell = begin_active(), endc = end(); - for (unsigned int i = 0; cell != endc; ++cell, ++i) - active_fe_indices[i] = cell->active_fe_index(); + for (const auto &cell : active_cell_iterators()) + if (cell->is_locally_owned()) + active_fe_indices[cell->active_cell_index()] = cell->active_fe_index(); } + + template void DoFHandler::initialize( @@ -1375,6 +1389,7 @@ namespace hp } + template void DoFHandler::distribute_dofs( @@ -1490,29 +1505,12 @@ namespace hp } + template void DoFHandler::setup_policy_and_listeners() { - // decide whether we need a sequential or a parallel shared/distributed - // policy - if (dynamic_cast *>( - &*this->tria) != nullptr) - policy = - std_cxx14::make_unique>>( - *this); - else if (dynamic_cast< - const parallel::distributed::Triangulation *>( - &*this->tria) != nullptr) - policy = std_cxx14::make_unique< - internal::DoFHandlerImplementation::Policy::ParallelDistributed< - DoFHandler>>(*this); - else - policy = - std_cxx14::make_unique>>(*this); - + // connect functions to signals of the underlying triangulation tria_listeners.push_back(this->tria->signals.pre_refinement.connect( std::bind(&DoFHandler::pre_refinement_action, std::ref(*this)))); @@ -1522,22 +1520,77 @@ namespace hp tria_listeners.push_back(this->tria->signals.create.connect(std::bind( &DoFHandler::post_refinement_action, std::ref(*this)))); - // Only connect the update of active fe indices if we are not using a - // p::d::Triangulation. In this case, the class ActiveFEIndicesTransfer - // has to be consulted. - if (dynamic_cast - *>(&*this->tria) == nullptr) + // decide whether we need a sequential or a parallel shared/distributed + // policy and attach corresponding callback functions dealing with the + // transfer of active_fe_indices + if (const auto *distributed_tria = dynamic_cast< + const parallel::distributed::Triangulation *>( + &this->get_triangulation())) { + policy = std_cxx14::make_unique< + internal::DoFHandlerImplementation::Policy::ParallelDistributed< + DoFHandler>>(*this); + +#ifdef DEAL_II_WITH_P4EST + cell_data_transfer = std_cxx14::make_unique< + parallel::distributed:: + CellDataTransfer>>( + *distributed_tria, + /*transfer_variable_size_data=*/false, + ¶llel::distributed::CellDataTransfer< + dim, + spacedim, + std::vector>::CoarseningStrategies::check_equality); +#endif + + tria_listeners.push_back( + distributed_tria->signals.pre_distributed_refinement.connect( + std::bind( + &DoFHandler::pre_distributed_active_fe_index_transfer, + std::ref(*this)))); + tria_listeners.push_back( + distributed_tria->signals.post_distributed_refinement.connect( + std::bind( + &DoFHandler::post_distributed_active_fe_index_transfer, + std::ref(*this)))); + } + else if (dynamic_cast + *>(&this->get_triangulation()) != nullptr) + { + policy = + std_cxx14::make_unique>>( + *this); + + tria_listeners.push_back( + this->tria->signals.pre_refinement.connect(std::bind( + &DoFHandler::pre_shared_active_fe_index_transfer, + std::ref(*this)))); + tria_listeners.push_back( + this->tria->signals.post_refinement.connect(std::bind( + &DoFHandler::post_shared_active_fe_index_transfer, + std::ref(*this)))); + } + else + { + policy = + std_cxx14::make_unique>>( + *this); + tria_listeners.push_back(this->tria->signals.pre_refinement.connect( - std::bind(&DoFHandler::pre_refinement_fe_index_update, + std::bind(&DoFHandler::pre_active_fe_index_transfer, std::ref(*this)))); tria_listeners.push_back(this->tria->signals.post_refinement.connect( - std::bind(&DoFHandler::post_refinement_fe_index_update, + std::bind(&DoFHandler::post_active_fe_index_transfer, std::ref(*this)))); } } + template void DoFHandler::clear() @@ -1695,6 +1748,7 @@ namespace hp } + template void DoFHandler::pre_refinement_action() @@ -1703,6 +1757,7 @@ namespace hp } + template void DoFHandler::post_refinement_action() @@ -1727,9 +1782,10 @@ namespace hp } + template void - DoFHandler::pre_refinement_fe_index_update() + DoFHandler::pre_active_fe_index_transfer() { // Finite elements need to be assigned to each cell by calling // distribute_dofs() first to make this functionality available. @@ -1738,6 +1794,74 @@ namespace hp Assert(refined_cells_fe_index.empty(), ExcInternalError()); Assert(coarsened_cells_fe_index.empty(), ExcInternalError()); + // Store active_fe_index information for all cells that will be + // affected by refinement/coarsening. + for (const auto &cell : active_cell_iterators()) + if (cell->is_locally_owned()) + { + if (cell->refine_flag_set()) + { + // Store the active_fe_index of each cell that will be refined + // to and distribute it later on its children. + refined_cells_fe_index.insert( + {cell, cell->active_fe_index()}); + } + else if (cell->coarsen_flag_set()) + { + // From all cells that will be coarsened, determine their + // parent and calculate its proper active_fe_index, so that it + // can be set after refinement. But first, check if that + // particular cell has a parent at all. + Assert(cell->level() > 0, ExcInternalError()); + const auto &parent = cell->parent(); + // Check if the active_fe_index for the current cell has been + // determined already. + if (coarsened_cells_fe_index.find(parent) == + coarsened_cells_fe_index.end()) + { + std::set fe_indices_children; + for (unsigned int child_index = 0; + child_index < parent->n_children(); + ++child_index) + { + Assert(parent->child(child_index)->active(), + ExcInternalError()); + + fe_indices_children.insert( + parent->child(child_index)->active_fe_index()); + } + + const unsigned int fe_index = + fe_collection.find_least_dominating_fe_in_collection( + fe_indices_children, /*codim=*/0); + + Assert( + fe_index != numbers::invalid_unsigned_int, + ExcMessage( + "No FiniteElement has been found in your FECollection " + "that dominates all children of a cell you are trying " + "to coarsen!")); + + coarsened_cells_fe_index.insert({parent, fe_index}); + } + } + } + } + } + + + + template + void + DoFHandler::pre_shared_active_fe_index_transfer() + { +#ifndef DEAL_II_WITH_MPI + Assert(false, ExcInternalError()); +#else + // Finite elements need to be assigned to each cell by calling + // distribute_dofs() first to make this functionality available. + if (fe_collection.size() > 0) + { // If the underlying shared::Tria allows artificial cells, // then save the current set of subdomain ids, and set // subdomain ids to the "true" owner of each cell. We later @@ -1745,9 +1869,10 @@ namespace hp const parallel::shared::Triangulation *shared_tria = (dynamic_cast *>( &(*tria))); + Assert(shared_tria != nullptr, ExcInternalError()); std::vector saved_subdomain_ids; - if (shared_tria != nullptr && shared_tria->with_artificial_cells()) + if (shared_tria->with_artificial_cells()) { saved_subdomain_ids.resize(shared_tria->n_active_cells()); @@ -1767,54 +1892,11 @@ namespace hp communicate_active_fe_indices(*this); } - // Store active_fe_index information for all cells that will be - // affected by refinement/coarsening. - for (const auto &cell : active_cell_iterators()) - { - if (cell->refine_flag_set()) - { - // Store the active_fe_index of each cell that will be refined - // to and distribute it later on its children. - refined_cells_fe_index.insert({cell, cell->active_fe_index()}); - } - else if (cell->coarsen_flag_set()) - { - // From all cells that will be coarsened, determine their parent - // and calculate its proper active_fe_index, so that it can be - // set after refinement. - // But first, check if that particular cell has a parent at all. - Assert(cell->level() > 0, ExcInternalError()); - const auto &parent = cell->parent(); - // Check if the active_fe_index for the current cell has been - // determined already. - if (coarsened_cells_fe_index.find(parent) == - coarsened_cells_fe_index.end()) - { - std::set fe_indices_children; - for (unsigned int child_index = 0; - child_index < parent->n_children(); - ++child_index) - fe_indices_children.insert( - parent->child(child_index)->active_fe_index()); - - const unsigned int fe_index = - fe_collection.find_least_dominating_fe_in_collection( - fe_indices_children, /*codim=*/0); - - Assert( - fe_index != numbers::invalid_unsigned_int, - ExcMessage( - "No FiniteElement has been found in your FECollection " - "that dominates all children of a cell you are trying " - "to coarsen!")); - - coarsened_cells_fe_index.insert({parent, fe_index}); - } - } - } + // Now do what we would do in the sequential case. + pre_active_fe_index_transfer(); // Finally, restore current subdomain_ids. - if (shared_tria != nullptr && shared_tria->with_artificial_cells()) + if (shared_tria->with_artificial_cells()) for (const auto &cell : active_cell_iterators()) { if (cell->is_artificial()) @@ -1824,17 +1906,69 @@ namespace hp saved_subdomain_ids[cell->active_cell_index()]); } } +#endif } + template void - DoFHandler::post_refinement_fe_index_update() + DoFHandler::pre_distributed_active_fe_index_transfer() { +#ifndef DEAL_II_WITH_P4EST + Assert(false, ExcInternalError()); +#else // Finite elements need to be assigned to each cell by calling // distribute_dofs() first to make this functionality available. if (fe_collection.size() > 0) { + // First, do what we would do in the sequential case. + pre_active_fe_index_transfer(); + + // If we work on a p::d::Triangulation, we have to transfer all + // active_fe_indices since ownership of cells may change. We will + // use our p::d::CellDataTransfer member to achieve this. Further, + // we prepare the values in such a way that they will correspond to + // the active_fe_indices on the new mesh. + + // Gather all current active_fe_indices. + get_active_fe_indices(active_fe_indices); + + // Overwrite values of cells that will be coarsened with the + // active_fe_index determined beforehand for their parent. + for (const auto &pair : coarsened_cells_fe_index) + for (unsigned int child_index = 0; + child_index < pair.first->n_children(); + ++child_index) + active_fe_indices[pair.first->child(child_index) + ->active_cell_index()] = pair.second; + + // Attach to transfer object. + cell_data_transfer->prepare_for_coarsening_and_refinement( + active_fe_indices); + + // Free some memory. + refined_cells_fe_index.clear(); + coarsened_cells_fe_index.clear(); + } +#endif + } + + + + template + void + DoFHandler::post_active_fe_index_transfer() + { + // Finite elements need to be assigned to each cell by calling + // distribute_dofs() first to make this functionality available. + if (fe_collection.size() > 0) + { + // For Triangulation and p::s::Triangulation, the old cell iterators + // are still valid. There is no need to transfer data in this case, + // and we can re-use our previously gathered information from the + // container. + // Distribute active_fe_indices from all refined cells on their // respective children. for (const auto &pair : refined_cells_fe_index) @@ -1868,19 +2002,140 @@ namespace hp } } + // Clear stored active_fe_indices. + refined_cells_fe_index.clear(); + coarsened_cells_fe_index.clear(); + } + } + + + + template + void + DoFHandler::post_shared_active_fe_index_transfer() + { +#ifndef DEAL_II_WITH_MPI + Assert(false, ExcInternalError()); +#else + // Finite elements need to be assigned to each cell by calling + // distribute_dofs() first to make this functionality available. + if (fe_collection.size() > 0) + { + // Do what we normally do in the sequential case. + post_active_fe_index_transfer(); + // We have to distribute the information about active_fe_indices // on all processors, if a parallel::shared::Triangulation // has been used. dealii::internal::hp::DoFHandlerImplementation::Implementation:: communicate_active_fe_indices(*this); + } +#endif + } - // Clear stored active_fe_indices. - refined_cells_fe_index.clear(); - coarsened_cells_fe_index.clear(); + + + template + void + DoFHandler::post_distributed_active_fe_index_transfer() + { +#ifndef DEAL_II_WITH_P4EST + Assert(false, ExcInternalError()); +#else + // Finite elements need to be assigned to each cell by calling + // distribute_dofs() first to make this functionality available. + if (fe_collection.size() > 0) + { + // Unpack active_fe_indices. + active_fe_indices.resize(tria->n_active_cells(), + numbers::invalid_unsigned_int); + cell_data_transfer->unpack(active_fe_indices); + + // Update all locally owned active_fe_indices. + set_active_fe_indices(active_fe_indices); + + // Free some memory. + active_fe_indices.clear(); + active_fe_indices.shrink_to_fit(); } +#endif + } + + + + template + void + DoFHandler::prepare_for_serialization_of_active_fe_indices() + { +#ifndef DEAL_II_WITH_P4EST + Assert(false, + ExcMessage( + "You are attempting to use a functionality that is only available " + "if deal.II was configured to use p4est, but cmake did not find a " + "valid p4est library.")); +#else + Assert( + (dynamic_cast + *>(&this->get_triangulation()) != nullptr), + ExcMessage( + "This functionality requires a parallel::distributed::Triangulation object.")); + + // Finite elements need to be assigned to each cell by calling + // distribute_dofs() first to make this functionality available. + if (fe_collection.size() > 0) + { + // If we work on a p::d::Triangulation, we have to transfer all + // active fe indices since ownership of cells may change. + + // Gather all current active_fe_indices + get_active_fe_indices(active_fe_indices); + + // Attach to transfer object + cell_data_transfer->prepare_for_serialization(active_fe_indices); + } +#endif + } + + + + template + void + DoFHandler::deserialize_active_fe_indices() + { +#ifndef DEAL_II_WITH_P4EST + Assert(false, + ExcMessage( + "You are attempting to use a functionality that is only available " + "if deal.II was configured to use p4est, but cmake did not find a " + "valid p4est library.")); +#else + Assert( + (dynamic_cast + *>(&this->get_triangulation()) != nullptr), + ExcMessage( + "This functionality requires a parallel::distributed::Triangulation object.")); + + // Finite elements need to be assigned to each cell by calling + // distribute_dofs() first to make this functionality available. + if (fe_collection.size() > 0) + { + // Unpack active_fe_indices. + active_fe_indices.resize(tria->n_active_cells(), + numbers::invalid_unsigned_int); + cell_data_transfer->deserialize(active_fe_indices); + + // Update all locally owned active_fe_indices. + set_active_fe_indices(active_fe_indices); + + // Free some memory. + active_fe_indices.clear(); + active_fe_indices.shrink_to_fit(); + } +#endif } + template template types::global_dof_index @@ -1894,6 +2149,7 @@ namespace hp } + template template void @@ -1907,6 +2163,7 @@ namespace hp } + template void DoFHandler::clear_space() diff --git a/tests/mpi/hp_active_fe_indices_transfer_01.cc b/tests/mpi/hp_active_fe_indices_transfer_01.cc index 4e0266a1b9..2d5cb00ab2 100644 --- a/tests/mpi/hp_active_fe_indices_transfer_01.cc +++ b/tests/mpi/hp_active_fe_indices_transfer_01.cc @@ -15,10 +15,9 @@ -// ActiveFEIndicesTransfer Test +// active fe indices transfer on refinement -#include #include #include @@ -51,65 +50,54 @@ test() for (unsigned int i = 0; i < max_degree; ++i) fe_collection.push_back(FE_Q(max_degree - i)); - typename hp::DoFHandler::active_cell_iterator cell; - unsigned int i = 0; - - for (cell = dh.begin_active(); cell != dh.end(); ++cell) - { - if (cell->is_locally_owned()) - { - // set active fe index - if (!(cell->is_artificial())) - { - if (i >= fe_collection.size()) - i = 0; - cell->set_active_fe_index(i++); - } - - // set refinement/coarsening flags - if (cell->id().to_string() == "0_1:0") - cell->set_refine_flag(); - else if (cell->parent()->id().to_string() == - ((dim == 2) ? "3_0:" : "7_0:")) - cell->set_coarsen_flag(); - - deallog << "myid=" << myid << " cellid=" << cell->id() - << " fe_index=" << cell->active_fe_index() - << " feq_degree=" << max_degree - cell->active_fe_index(); - if (cell->coarsen_flag_set()) - deallog << " coarsening"; - else if (cell->refine_flag_set()) - deallog << " refining"; - deallog << std::endl; - } - } - + // this distribute_dofs() call is necessary + // we need to introduce dof_handler to its fe_collection first dh.distribute_dofs(fe_collection); - // ----- transfer ----- - parallel::distributed::ActiveFEIndicesTransfer feidx_transfer(dh); + unsigned int i = 0; + for (auto &cell : dh.active_cell_iterators()) + if (cell->is_locally_owned()) + { + // set active fe index + if (!(cell->is_artificial())) + { + if (i >= fe_collection.size()) + i = 0; + cell->set_active_fe_index(i++); + } + + // set refinement/coarsening flags + if (cell->id().to_string() == "0_1:0") + cell->set_refine_flag(); + else if (cell->parent()->id().to_string() == + ((dim == 2) ? "3_0:" : "7_0:")) + cell->set_coarsen_flag(); + + deallog << "myid=" << myid << " cellid=" << cell->id() + << " fe_index=" << cell->active_fe_index() + << " feq_degree=" << max_degree - cell->active_fe_index(); + if (cell->coarsen_flag_set()) + deallog << " coarsening"; + else if (cell->refine_flag_set()) + deallog << " refining"; + deallog << std::endl; + } - feidx_transfer.prepare_for_transfer(); + // ----- transfer ----- tria.execute_coarsening_and_refinement(); deallog << "cells after: " << tria.n_global_active_cells() << std::endl; - feidx_transfer.unpack(); - - // for further calculations, distribute dofs after unpacking, i.e. - // dh.distribute_dofs(fe_collection); - // ------ verify ------ // check if all children adopted the correct id - for (cell = dh.begin_active(); cell != dh.end(); ++cell) - { - if (cell->is_locally_owned()) - { - deallog << "myid=" << myid << " cellid=" << cell->id() - << " fe_index=" << cell->active_fe_index() - << " feq_degree=" << max_degree - cell->active_fe_index() - << std::endl; - } - } + for (auto &cell : dh.active_cell_iterators()) + if (cell->is_locally_owned()) + deallog << "myid=" << myid << " cellid=" << cell->id() + << " fe_index=" << cell->active_fe_index() + << " feq_degree=" << max_degree - cell->active_fe_index() + << std::endl; + + // for further calculations, distribute dofs, i.e. + // dh.distribute_dofs(fe_collection); // make sure no processor is hanging MPI_Barrier(MPI_COMM_WORLD); diff --git a/tests/mpi/hp_active_fe_indices_transfer_02.cc b/tests/mpi/hp_active_fe_indices_transfer_02.cc new file mode 100644 index 0000000000..6539345740 --- /dev/null +++ b/tests/mpi/hp_active_fe_indices_transfer_02.cc @@ -0,0 +1,127 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +// active fe indices transfer on serialization + + +#include + +#include + +#include + +#include + +#include "../tests.h" + + +template +void +test() +{ + const unsigned int max_degree = 1 + Utilities::pow(2, dim); + + // prepare FECollection with arbitrary number of entries + hp::FECollection fe_collection; + for (unsigned int i = 0; i < max_degree; ++i) + fe_collection.push_back(FE_Q(max_degree - i)); + + { + deallog << "writing" << std::endl; + + // ------ setup ------ + parallel::distributed::Triangulation tria(MPI_COMM_WORLD); + GridGenerator::subdivided_hyper_cube(tria, 2); + tria.refine_global(1); + + // this distribute_dofs() call is necessary + // we need to introduce dof_handler to its fe_collection first + hp::DoFHandler dh(tria); + dh.distribute_dofs(fe_collection); + + unsigned int i = 0; + for (auto &cell : dh.active_cell_iterators()) + if (cell->is_locally_owned()) + { + // set active fe index + if (!(cell->is_artificial())) + { + if (i >= fe_collection.size()) + i = 0; + cell->set_active_fe_index(i++); + } + + deallog << "cellid=" << cell->id() + << " fe_index=" << cell->active_fe_index() << std::endl; + } + + // ----- transfer ----- + dh.prepare_for_serialization_of_active_fe_indices(); + tria.save("file"); + + // make sure no processor is hanging + MPI_Barrier(MPI_COMM_WORLD); + } + + { + deallog << "reading" << std::endl; + + // ------ setup ------ + parallel::distributed::Triangulation tria(MPI_COMM_WORLD); + GridGenerator::subdivided_hyper_cube(tria, 2); + // triangulation has to be initialized with correct coarse cells + + // this distribute_dofs() call is necessary + // we need to introduce dof_handler to its fe_collection first + hp::DoFHandler dh(tria); + dh.distribute_dofs(fe_collection); + + // ----- transfer ----- + tria.load("file"); + dh.deserialize_active_fe_indices(); + + // ------ verify ------ + // check if all children adopted the correct id + for (auto &cell : dh.active_cell_iterators()) + if (cell->is_locally_owned()) + deallog << "cellid=" << cell->id() + << " fe_index=" << cell->active_fe_index() << std::endl; + + // distribute dofs again for further calculations, i.e. + // dh.distribute_dofs(fe_collection); + + // make sure no processor is hanging + MPI_Barrier(MPI_COMM_WORLD); + } + + deallog << "OK" << std::endl; +} + + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll log; + + deallog.push("2d"); + test<2>(); + deallog.pop(); + deallog.push("3d"); + test<3>(); + deallog.pop(); +} diff --git a/tests/mpi/hp_active_fe_indices_transfer_02.with_p4est=true.mpirun=2.output b/tests/mpi/hp_active_fe_indices_transfer_02.with_p4est=true.mpirun=2.output new file mode 100644 index 0000000000..a9f6eeb1ba --- /dev/null +++ b/tests/mpi/hp_active_fe_indices_transfer_02.with_p4est=true.mpirun=2.output @@ -0,0 +1,175 @@ + +DEAL:0:2d::writing +DEAL:0:2d::cellid=0_1:0 fe_index=0 +DEAL:0:2d::cellid=0_1:1 fe_index=1 +DEAL:0:2d::cellid=0_1:2 fe_index=2 +DEAL:0:2d::cellid=0_1:3 fe_index=3 +DEAL:0:2d::cellid=1_1:0 fe_index=4 +DEAL:0:2d::cellid=1_1:1 fe_index=0 +DEAL:0:2d::cellid=1_1:2 fe_index=1 +DEAL:0:2d::cellid=1_1:3 fe_index=2 +DEAL:0:2d::reading +DEAL:0:2d::cellid=0_1:0 fe_index=0 +DEAL:0:2d::cellid=0_1:1 fe_index=1 +DEAL:0:2d::cellid=0_1:2 fe_index=2 +DEAL:0:2d::cellid=0_1:3 fe_index=3 +DEAL:0:2d::cellid=1_1:0 fe_index=4 +DEAL:0:2d::cellid=1_1:1 fe_index=0 +DEAL:0:2d::cellid=1_1:2 fe_index=1 +DEAL:0:2d::cellid=1_1:3 fe_index=2 +DEAL:0:2d::OK +DEAL:0:3d::writing +DEAL:0:3d::cellid=0_1:0 fe_index=0 +DEAL:0:3d::cellid=0_1:1 fe_index=1 +DEAL:0:3d::cellid=0_1:2 fe_index=2 +DEAL:0:3d::cellid=0_1:3 fe_index=3 +DEAL:0:3d::cellid=0_1:4 fe_index=4 +DEAL:0:3d::cellid=0_1:5 fe_index=5 +DEAL:0:3d::cellid=0_1:6 fe_index=6 +DEAL:0:3d::cellid=0_1:7 fe_index=7 +DEAL:0:3d::cellid=1_1:0 fe_index=8 +DEAL:0:3d::cellid=1_1:1 fe_index=0 +DEAL:0:3d::cellid=1_1:2 fe_index=1 +DEAL:0:3d::cellid=1_1:3 fe_index=2 +DEAL:0:3d::cellid=1_1:4 fe_index=3 +DEAL:0:3d::cellid=1_1:5 fe_index=4 +DEAL:0:3d::cellid=1_1:6 fe_index=5 +DEAL:0:3d::cellid=1_1:7 fe_index=6 +DEAL:0:3d::cellid=2_1:0 fe_index=7 +DEAL:0:3d::cellid=2_1:1 fe_index=8 +DEAL:0:3d::cellid=2_1:2 fe_index=0 +DEAL:0:3d::cellid=2_1:3 fe_index=1 +DEAL:0:3d::cellid=2_1:4 fe_index=2 +DEAL:0:3d::cellid=2_1:5 fe_index=3 +DEAL:0:3d::cellid=2_1:6 fe_index=4 +DEAL:0:3d::cellid=2_1:7 fe_index=5 +DEAL:0:3d::cellid=3_1:0 fe_index=6 +DEAL:0:3d::cellid=3_1:1 fe_index=7 +DEAL:0:3d::cellid=3_1:2 fe_index=8 +DEAL:0:3d::cellid=3_1:3 fe_index=0 +DEAL:0:3d::cellid=3_1:4 fe_index=1 +DEAL:0:3d::cellid=3_1:5 fe_index=2 +DEAL:0:3d::cellid=3_1:6 fe_index=3 +DEAL:0:3d::cellid=3_1:7 fe_index=4 +DEAL:0:3d::reading +DEAL:0:3d::cellid=0_1:0 fe_index=0 +DEAL:0:3d::cellid=0_1:1 fe_index=1 +DEAL:0:3d::cellid=0_1:2 fe_index=2 +DEAL:0:3d::cellid=0_1:3 fe_index=3 +DEAL:0:3d::cellid=0_1:4 fe_index=4 +DEAL:0:3d::cellid=0_1:5 fe_index=5 +DEAL:0:3d::cellid=0_1:6 fe_index=6 +DEAL:0:3d::cellid=0_1:7 fe_index=7 +DEAL:0:3d::cellid=1_1:0 fe_index=8 +DEAL:0:3d::cellid=1_1:1 fe_index=0 +DEAL:0:3d::cellid=1_1:2 fe_index=1 +DEAL:0:3d::cellid=1_1:3 fe_index=2 +DEAL:0:3d::cellid=1_1:4 fe_index=3 +DEAL:0:3d::cellid=1_1:5 fe_index=4 +DEAL:0:3d::cellid=1_1:6 fe_index=5 +DEAL:0:3d::cellid=1_1:7 fe_index=6 +DEAL:0:3d::cellid=2_1:0 fe_index=7 +DEAL:0:3d::cellid=2_1:1 fe_index=8 +DEAL:0:3d::cellid=2_1:2 fe_index=0 +DEAL:0:3d::cellid=2_1:3 fe_index=1 +DEAL:0:3d::cellid=2_1:4 fe_index=2 +DEAL:0:3d::cellid=2_1:5 fe_index=3 +DEAL:0:3d::cellid=2_1:6 fe_index=4 +DEAL:0:3d::cellid=2_1:7 fe_index=5 +DEAL:0:3d::cellid=3_1:0 fe_index=6 +DEAL:0:3d::cellid=3_1:1 fe_index=7 +DEAL:0:3d::cellid=3_1:2 fe_index=8 +DEAL:0:3d::cellid=3_1:3 fe_index=0 +DEAL:0:3d::cellid=3_1:4 fe_index=1 +DEAL:0:3d::cellid=3_1:5 fe_index=2 +DEAL:0:3d::cellid=3_1:6 fe_index=3 +DEAL:0:3d::cellid=3_1:7 fe_index=4 +DEAL:0:3d::OK + +DEAL:1:2d::writing +DEAL:1:2d::cellid=2_1:0 fe_index=0 +DEAL:1:2d::cellid=2_1:1 fe_index=1 +DEAL:1:2d::cellid=2_1:2 fe_index=2 +DEAL:1:2d::cellid=2_1:3 fe_index=3 +DEAL:1:2d::cellid=3_1:0 fe_index=4 +DEAL:1:2d::cellid=3_1:1 fe_index=0 +DEAL:1:2d::cellid=3_1:2 fe_index=1 +DEAL:1:2d::cellid=3_1:3 fe_index=2 +DEAL:1:2d::reading +DEAL:1:2d::cellid=2_1:0 fe_index=0 +DEAL:1:2d::cellid=2_1:1 fe_index=1 +DEAL:1:2d::cellid=2_1:2 fe_index=2 +DEAL:1:2d::cellid=2_1:3 fe_index=3 +DEAL:1:2d::cellid=3_1:0 fe_index=4 +DEAL:1:2d::cellid=3_1:1 fe_index=0 +DEAL:1:2d::cellid=3_1:2 fe_index=1 +DEAL:1:2d::cellid=3_1:3 fe_index=2 +DEAL:1:2d::OK +DEAL:1:3d::writing +DEAL:1:3d::cellid=4_1:0 fe_index=0 +DEAL:1:3d::cellid=4_1:1 fe_index=1 +DEAL:1:3d::cellid=4_1:2 fe_index=2 +DEAL:1:3d::cellid=4_1:3 fe_index=3 +DEAL:1:3d::cellid=4_1:4 fe_index=4 +DEAL:1:3d::cellid=4_1:5 fe_index=5 +DEAL:1:3d::cellid=4_1:6 fe_index=6 +DEAL:1:3d::cellid=4_1:7 fe_index=7 +DEAL:1:3d::cellid=5_1:0 fe_index=8 +DEAL:1:3d::cellid=5_1:1 fe_index=0 +DEAL:1:3d::cellid=5_1:2 fe_index=1 +DEAL:1:3d::cellid=5_1:3 fe_index=2 +DEAL:1:3d::cellid=5_1:4 fe_index=3 +DEAL:1:3d::cellid=5_1:5 fe_index=4 +DEAL:1:3d::cellid=5_1:6 fe_index=5 +DEAL:1:3d::cellid=5_1:7 fe_index=6 +DEAL:1:3d::cellid=6_1:0 fe_index=7 +DEAL:1:3d::cellid=6_1:1 fe_index=8 +DEAL:1:3d::cellid=6_1:2 fe_index=0 +DEAL:1:3d::cellid=6_1:3 fe_index=1 +DEAL:1:3d::cellid=6_1:4 fe_index=2 +DEAL:1:3d::cellid=6_1:5 fe_index=3 +DEAL:1:3d::cellid=6_1:6 fe_index=4 +DEAL:1:3d::cellid=6_1:7 fe_index=5 +DEAL:1:3d::cellid=7_1:0 fe_index=6 +DEAL:1:3d::cellid=7_1:1 fe_index=7 +DEAL:1:3d::cellid=7_1:2 fe_index=8 +DEAL:1:3d::cellid=7_1:3 fe_index=0 +DEAL:1:3d::cellid=7_1:4 fe_index=1 +DEAL:1:3d::cellid=7_1:5 fe_index=2 +DEAL:1:3d::cellid=7_1:6 fe_index=3 +DEAL:1:3d::cellid=7_1:7 fe_index=4 +DEAL:1:3d::reading +DEAL:1:3d::cellid=4_1:0 fe_index=0 +DEAL:1:3d::cellid=4_1:1 fe_index=1 +DEAL:1:3d::cellid=4_1:2 fe_index=2 +DEAL:1:3d::cellid=4_1:3 fe_index=3 +DEAL:1:3d::cellid=4_1:4 fe_index=4 +DEAL:1:3d::cellid=4_1:5 fe_index=5 +DEAL:1:3d::cellid=4_1:6 fe_index=6 +DEAL:1:3d::cellid=4_1:7 fe_index=7 +DEAL:1:3d::cellid=5_1:0 fe_index=8 +DEAL:1:3d::cellid=5_1:1 fe_index=0 +DEAL:1:3d::cellid=5_1:2 fe_index=1 +DEAL:1:3d::cellid=5_1:3 fe_index=2 +DEAL:1:3d::cellid=5_1:4 fe_index=3 +DEAL:1:3d::cellid=5_1:5 fe_index=4 +DEAL:1:3d::cellid=5_1:6 fe_index=5 +DEAL:1:3d::cellid=5_1:7 fe_index=6 +DEAL:1:3d::cellid=6_1:0 fe_index=7 +DEAL:1:3d::cellid=6_1:1 fe_index=8 +DEAL:1:3d::cellid=6_1:2 fe_index=0 +DEAL:1:3d::cellid=6_1:3 fe_index=1 +DEAL:1:3d::cellid=6_1:4 fe_index=2 +DEAL:1:3d::cellid=6_1:5 fe_index=3 +DEAL:1:3d::cellid=6_1:6 fe_index=4 +DEAL:1:3d::cellid=6_1:7 fe_index=5 +DEAL:1:3d::cellid=7_1:0 fe_index=6 +DEAL:1:3d::cellid=7_1:1 fe_index=7 +DEAL:1:3d::cellid=7_1:2 fe_index=8 +DEAL:1:3d::cellid=7_1:3 fe_index=0 +DEAL:1:3d::cellid=7_1:4 fe_index=1 +DEAL:1:3d::cellid=7_1:5 fe_index=2 +DEAL:1:3d::cellid=7_1:6 fe_index=3 +DEAL:1:3d::cellid=7_1:7 fe_index=4 +DEAL:1:3d::OK + diff --git a/tests/mpi/hp_active_fe_indices_transfer_02.with_p4est=true.mpirun=8.output b/tests/mpi/hp_active_fe_indices_transfer_02.with_p4est=true.mpirun=8.output new file mode 100644 index 0000000000..883234fd1c --- /dev/null +++ b/tests/mpi/hp_active_fe_indices_transfer_02.with_p4est=true.mpirun=8.output @@ -0,0 +1,223 @@ + +DEAL:0:2d::writing +DEAL:0:2d::cellid=0_1:0 fe_index=0 +DEAL:0:2d::cellid=0_1:1 fe_index=1 +DEAL:0:2d::cellid=0_1:2 fe_index=2 +DEAL:0:2d::cellid=0_1:3 fe_index=3 +DEAL:0:2d::reading +DEAL:0:2d::cellid=0_1:0 fe_index=0 +DEAL:0:2d::cellid=0_1:1 fe_index=1 +DEAL:0:2d::OK +DEAL:0:3d::writing +DEAL:0:3d::cellid=0_1:0 fe_index=0 +DEAL:0:3d::cellid=0_1:1 fe_index=1 +DEAL:0:3d::cellid=0_1:2 fe_index=2 +DEAL:0:3d::cellid=0_1:3 fe_index=3 +DEAL:0:3d::cellid=0_1:4 fe_index=4 +DEAL:0:3d::cellid=0_1:5 fe_index=5 +DEAL:0:3d::cellid=0_1:6 fe_index=6 +DEAL:0:3d::cellid=0_1:7 fe_index=7 +DEAL:0:3d::reading +DEAL:0:3d::cellid=0_1:0 fe_index=0 +DEAL:0:3d::cellid=0_1:1 fe_index=1 +DEAL:0:3d::cellid=0_1:2 fe_index=2 +DEAL:0:3d::cellid=0_1:3 fe_index=3 +DEAL:0:3d::cellid=0_1:4 fe_index=4 +DEAL:0:3d::cellid=0_1:5 fe_index=5 +DEAL:0:3d::cellid=0_1:6 fe_index=6 +DEAL:0:3d::cellid=0_1:7 fe_index=7 +DEAL:0:3d::OK + +DEAL:1:2d::writing +DEAL:1:2d::reading +DEAL:1:2d::cellid=0_1:2 fe_index=2 +DEAL:1:2d::cellid=0_1:3 fe_index=3 +DEAL:1:2d::OK +DEAL:1:3d::writing +DEAL:1:3d::cellid=1_1:0 fe_index=0 +DEAL:1:3d::cellid=1_1:1 fe_index=1 +DEAL:1:3d::cellid=1_1:2 fe_index=2 +DEAL:1:3d::cellid=1_1:3 fe_index=3 +DEAL:1:3d::cellid=1_1:4 fe_index=4 +DEAL:1:3d::cellid=1_1:5 fe_index=5 +DEAL:1:3d::cellid=1_1:6 fe_index=6 +DEAL:1:3d::cellid=1_1:7 fe_index=7 +DEAL:1:3d::reading +DEAL:1:3d::cellid=1_1:0 fe_index=0 +DEAL:1:3d::cellid=1_1:1 fe_index=1 +DEAL:1:3d::cellid=1_1:2 fe_index=2 +DEAL:1:3d::cellid=1_1:3 fe_index=3 +DEAL:1:3d::cellid=1_1:4 fe_index=4 +DEAL:1:3d::cellid=1_1:5 fe_index=5 +DEAL:1:3d::cellid=1_1:6 fe_index=6 +DEAL:1:3d::cellid=1_1:7 fe_index=7 +DEAL:1:3d::OK + + +DEAL:2:2d::writing +DEAL:2:2d::cellid=1_1:0 fe_index=0 +DEAL:2:2d::cellid=1_1:1 fe_index=1 +DEAL:2:2d::cellid=1_1:2 fe_index=2 +DEAL:2:2d::cellid=1_1:3 fe_index=3 +DEAL:2:2d::reading +DEAL:2:2d::cellid=1_1:0 fe_index=0 +DEAL:2:2d::cellid=1_1:1 fe_index=1 +DEAL:2:2d::OK +DEAL:2:3d::writing +DEAL:2:3d::cellid=2_1:0 fe_index=0 +DEAL:2:3d::cellid=2_1:1 fe_index=1 +DEAL:2:3d::cellid=2_1:2 fe_index=2 +DEAL:2:3d::cellid=2_1:3 fe_index=3 +DEAL:2:3d::cellid=2_1:4 fe_index=4 +DEAL:2:3d::cellid=2_1:5 fe_index=5 +DEAL:2:3d::cellid=2_1:6 fe_index=6 +DEAL:2:3d::cellid=2_1:7 fe_index=7 +DEAL:2:3d::reading +DEAL:2:3d::cellid=2_1:0 fe_index=0 +DEAL:2:3d::cellid=2_1:1 fe_index=1 +DEAL:2:3d::cellid=2_1:2 fe_index=2 +DEAL:2:3d::cellid=2_1:3 fe_index=3 +DEAL:2:3d::cellid=2_1:4 fe_index=4 +DEAL:2:3d::cellid=2_1:5 fe_index=5 +DEAL:2:3d::cellid=2_1:6 fe_index=6 +DEAL:2:3d::cellid=2_1:7 fe_index=7 +DEAL:2:3d::OK + + +DEAL:3:2d::writing +DEAL:3:2d::reading +DEAL:3:2d::cellid=1_1:2 fe_index=2 +DEAL:3:2d::cellid=1_1:3 fe_index=3 +DEAL:3:2d::OK +DEAL:3:3d::writing +DEAL:3:3d::cellid=3_1:0 fe_index=0 +DEAL:3:3d::cellid=3_1:1 fe_index=1 +DEAL:3:3d::cellid=3_1:2 fe_index=2 +DEAL:3:3d::cellid=3_1:3 fe_index=3 +DEAL:3:3d::cellid=3_1:4 fe_index=4 +DEAL:3:3d::cellid=3_1:5 fe_index=5 +DEAL:3:3d::cellid=3_1:6 fe_index=6 +DEAL:3:3d::cellid=3_1:7 fe_index=7 +DEAL:3:3d::reading +DEAL:3:3d::cellid=3_1:0 fe_index=0 +DEAL:3:3d::cellid=3_1:1 fe_index=1 +DEAL:3:3d::cellid=3_1:2 fe_index=2 +DEAL:3:3d::cellid=3_1:3 fe_index=3 +DEAL:3:3d::cellid=3_1:4 fe_index=4 +DEAL:3:3d::cellid=3_1:5 fe_index=5 +DEAL:3:3d::cellid=3_1:6 fe_index=6 +DEAL:3:3d::cellid=3_1:7 fe_index=7 +DEAL:3:3d::OK + + +DEAL:4:2d::writing +DEAL:4:2d::cellid=2_1:0 fe_index=0 +DEAL:4:2d::cellid=2_1:1 fe_index=1 +DEAL:4:2d::cellid=2_1:2 fe_index=2 +DEAL:4:2d::cellid=2_1:3 fe_index=3 +DEAL:4:2d::reading +DEAL:4:2d::cellid=2_1:0 fe_index=0 +DEAL:4:2d::cellid=2_1:1 fe_index=1 +DEAL:4:2d::OK +DEAL:4:3d::writing +DEAL:4:3d::cellid=4_1:0 fe_index=0 +DEAL:4:3d::cellid=4_1:1 fe_index=1 +DEAL:4:3d::cellid=4_1:2 fe_index=2 +DEAL:4:3d::cellid=4_1:3 fe_index=3 +DEAL:4:3d::cellid=4_1:4 fe_index=4 +DEAL:4:3d::cellid=4_1:5 fe_index=5 +DEAL:4:3d::cellid=4_1:6 fe_index=6 +DEAL:4:3d::cellid=4_1:7 fe_index=7 +DEAL:4:3d::reading +DEAL:4:3d::cellid=4_1:0 fe_index=0 +DEAL:4:3d::cellid=4_1:1 fe_index=1 +DEAL:4:3d::cellid=4_1:2 fe_index=2 +DEAL:4:3d::cellid=4_1:3 fe_index=3 +DEAL:4:3d::cellid=4_1:4 fe_index=4 +DEAL:4:3d::cellid=4_1:5 fe_index=5 +DEAL:4:3d::cellid=4_1:6 fe_index=6 +DEAL:4:3d::cellid=4_1:7 fe_index=7 +DEAL:4:3d::OK + + +DEAL:5:2d::writing +DEAL:5:2d::reading +DEAL:5:2d::cellid=2_1:2 fe_index=2 +DEAL:5:2d::cellid=2_1:3 fe_index=3 +DEAL:5:2d::OK +DEAL:5:3d::writing +DEAL:5:3d::cellid=5_1:0 fe_index=0 +DEAL:5:3d::cellid=5_1:1 fe_index=1 +DEAL:5:3d::cellid=5_1:2 fe_index=2 +DEAL:5:3d::cellid=5_1:3 fe_index=3 +DEAL:5:3d::cellid=5_1:4 fe_index=4 +DEAL:5:3d::cellid=5_1:5 fe_index=5 +DEAL:5:3d::cellid=5_1:6 fe_index=6 +DEAL:5:3d::cellid=5_1:7 fe_index=7 +DEAL:5:3d::reading +DEAL:5:3d::cellid=5_1:0 fe_index=0 +DEAL:5:3d::cellid=5_1:1 fe_index=1 +DEAL:5:3d::cellid=5_1:2 fe_index=2 +DEAL:5:3d::cellid=5_1:3 fe_index=3 +DEAL:5:3d::cellid=5_1:4 fe_index=4 +DEAL:5:3d::cellid=5_1:5 fe_index=5 +DEAL:5:3d::cellid=5_1:6 fe_index=6 +DEAL:5:3d::cellid=5_1:7 fe_index=7 +DEAL:5:3d::OK + + +DEAL:6:2d::writing +DEAL:6:2d::cellid=3_1:0 fe_index=0 +DEAL:6:2d::cellid=3_1:1 fe_index=1 +DEAL:6:2d::cellid=3_1:2 fe_index=2 +DEAL:6:2d::cellid=3_1:3 fe_index=3 +DEAL:6:2d::reading +DEAL:6:2d::cellid=3_1:0 fe_index=0 +DEAL:6:2d::cellid=3_1:1 fe_index=1 +DEAL:6:2d::OK +DEAL:6:3d::writing +DEAL:6:3d::cellid=6_1:0 fe_index=0 +DEAL:6:3d::cellid=6_1:1 fe_index=1 +DEAL:6:3d::cellid=6_1:2 fe_index=2 +DEAL:6:3d::cellid=6_1:3 fe_index=3 +DEAL:6:3d::cellid=6_1:4 fe_index=4 +DEAL:6:3d::cellid=6_1:5 fe_index=5 +DEAL:6:3d::cellid=6_1:6 fe_index=6 +DEAL:6:3d::cellid=6_1:7 fe_index=7 +DEAL:6:3d::reading +DEAL:6:3d::cellid=6_1:0 fe_index=0 +DEAL:6:3d::cellid=6_1:1 fe_index=1 +DEAL:6:3d::cellid=6_1:2 fe_index=2 +DEAL:6:3d::cellid=6_1:3 fe_index=3 +DEAL:6:3d::cellid=6_1:4 fe_index=4 +DEAL:6:3d::cellid=6_1:5 fe_index=5 +DEAL:6:3d::cellid=6_1:6 fe_index=6 +DEAL:6:3d::cellid=6_1:7 fe_index=7 +DEAL:6:3d::OK + + +DEAL:7:2d::writing +DEAL:7:2d::reading +DEAL:7:2d::cellid=3_1:2 fe_index=2 +DEAL:7:2d::cellid=3_1:3 fe_index=3 +DEAL:7:2d::OK +DEAL:7:3d::writing +DEAL:7:3d::cellid=7_1:0 fe_index=0 +DEAL:7:3d::cellid=7_1:1 fe_index=1 +DEAL:7:3d::cellid=7_1:2 fe_index=2 +DEAL:7:3d::cellid=7_1:3 fe_index=3 +DEAL:7:3d::cellid=7_1:4 fe_index=4 +DEAL:7:3d::cellid=7_1:5 fe_index=5 +DEAL:7:3d::cellid=7_1:6 fe_index=6 +DEAL:7:3d::cellid=7_1:7 fe_index=7 +DEAL:7:3d::reading +DEAL:7:3d::cellid=7_1:0 fe_index=0 +DEAL:7:3d::cellid=7_1:1 fe_index=1 +DEAL:7:3d::cellid=7_1:2 fe_index=2 +DEAL:7:3d::cellid=7_1:3 fe_index=3 +DEAL:7:3d::cellid=7_1:4 fe_index=4 +DEAL:7:3d::cellid=7_1:5 fe_index=5 +DEAL:7:3d::cellid=7_1:6 fe_index=6 +DEAL:7:3d::cellid=7_1:7 fe_index=7 +DEAL:7:3d::OK + diff --git a/tests/mpi/hp_cell_weights_01.cc b/tests/mpi/hp_cell_weights_01.cc index 838e48cc85..6d15d5841b 100644 --- a/tests/mpi/hp_cell_weights_01.cc +++ b/tests/mpi/hp_cell_weights_01.cc @@ -30,7 +30,6 @@ // This test works on a parallel::distributed::Triangulation. -#include #include #include @@ -66,9 +65,6 @@ test() dh.distribute_dofs(fe_collection); - parallel::distributed::ActiveFEIndicesTransfer feidx_transfer(dh); - feidx_transfer.prepare_for_transfer(); - parallel::CellWeights cell_weights(dh); cell_weights.register_ndofs_weighting(100000); @@ -85,7 +81,6 @@ test() tria.repartition(); - feidx_transfer.unpack(); dh.distribute_dofs(fe_collection); diff --git a/tests/mpi/hp_cell_weights_02.cc b/tests/mpi/hp_cell_weights_02.cc index 52b8a66080..5d13dff774 100644 --- a/tests/mpi/hp_cell_weights_02.cc +++ b/tests/mpi/hp_cell_weights_02.cc @@ -36,7 +36,6 @@ // to 'cut' its tree on a parent branch that does not exist in this case. -#include #include #include @@ -72,9 +71,6 @@ test() dh.distribute_dofs(fe_collection); - parallel::distributed::ActiveFEIndicesTransfer feidx_transfer(dh); - feidx_transfer.prepare_for_transfer(); - parallel::CellWeights cell_weights(dh); cell_weights.register_ndofs_weighting(100000); @@ -91,7 +87,6 @@ test() tria.repartition(); - feidx_transfer.unpack(); dh.distribute_dofs(fe_collection); diff --git a/tests/mpi/p4est_save_06.cc b/tests/mpi/p4est_save_06.cc index 446242a454..5af46a8696 100644 --- a/tests/mpi/p4est_save_06.cc +++ b/tests/mpi/p4est_save_06.cc @@ -21,7 +21,6 @@ #include #include -#include #include #include @@ -93,8 +92,6 @@ test() locally_relevant_dofs, com_small); - parallel::distributed::ActiveFEIndicesTransfer feidx_transfer( - dh); parallel::distributed:: SolutionTransfer> soltrans(dh); @@ -109,7 +106,7 @@ test() x.compress(VectorOperation::insert); rel_x = x; - feidx_transfer.prepare_for_transfer(); + dh.prepare_for_serialization_of_active_fe_indices(); soltrans.prepare_serialization(rel_x); tr.save("file"); @@ -138,10 +135,7 @@ test() fe_collection.push_back(FE_Q(max_degree - i)); dh.distribute_dofs(fe_collection); - - parallel::distributed::ActiveFEIndicesTransfer feidx_transfer(dh); - feidx_transfer.deserialize(); - + dh.deserialize_active_fe_indices(); dh.distribute_dofs(fe_collection); IndexSet locally_owned_dofs = dh.locally_owned_dofs(); diff --git a/tests/mpi/solution_transfer_04.cc b/tests/mpi/solution_transfer_04.cc index 6edb651b38..6f81c5cf5f 100644 --- a/tests/mpi/solution_transfer_04.cc +++ b/tests/mpi/solution_transfer_04.cc @@ -19,7 +19,6 @@ // This tests is based on mpi/feindices_transfer.cc -#include #include #include @@ -101,17 +100,13 @@ test() // ----- transfer ----- - parallel::distributed::ActiveFEIndicesTransfer feidx_transfer(dh); parallel::distributed:: SolutionTransfer> soltrans(dh); - feidx_transfer.prepare_for_transfer(); soltrans.prepare_for_coarsening_and_refinement(old_solution); tria.execute_coarsening_and_refinement(); - feidx_transfer.unpack(); - dh.distribute_dofs(fe_collection); locally_owned_dofs = dh.locally_owned_dofs(); DoFTools::extract_locally_relevant_dofs(dh, locally_relevant_dofs);