From: Marc Fehling Date: Wed, 17 Apr 2019 20:16:37 +0000 (+0200) Subject: Minor cleanup of hp::DoFHandler. X-Git-Tag: v9.1.0-rc1~81^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F7933%2Fhead;p=dealii.git Minor cleanup of hp::DoFHandler. --- diff --git a/include/deal.II/hp/dof_handler.h b/include/deal.II/hp/dof_handler.h index 6a11be2f7a..064503e358 100644 --- a/include/deal.II/hp/dof_handler.h +++ b/include/deal.II/hp/dof_handler.h @@ -1122,7 +1122,8 @@ namespace hp /** * A function that will be triggered through a triangulation - * signal just before the associated Triangulation is modified. + * signal just before the associated Triangulation or + * parallel::shared::Triangulation is modified. * * The function that stores the active_fe_indices of all cells that will * be refined or coarsened before the refinement happens, so that @@ -1131,18 +1132,6 @@ namespace hp void pre_active_fe_index_transfer(); - /** - * A function that will be triggered through a triangulation - * signal just before the associated parallel::shared::Triangulation is - * modified. - * - * The function that stores the active_fe_indices of all cells that will - * be refined or coarsened before the refinement happens, so that - * they can be set again after refinement. - */ - void - pre_shared_active_fe_index_transfer(); - /** * A function that will be triggered through a triangulation * signal just before the associated parallel::distributed::Triangulation is @@ -1156,7 +1145,8 @@ namespace hp /** * A function that will be triggered through a triangulation - * signal just after the associated Triangulation is modified. + * signal just after the associated Triangulation or + * parallel::shared::Triangulation is modified. * * The function that restores the active_fe_indices of all cells that * were refined or coarsened. @@ -1164,17 +1154,6 @@ namespace hp void post_active_fe_index_transfer(); - /** - * A function that will be triggered through a triangulation - * signal just after the associated parallel::shared::Triangulation is - * modified. - * - * The function that restores the active_fe_indices of all cells that - * were refined or coarsened. - */ - void - post_shared_active_fe_index_transfer(); - /** * A function that will be triggered through a triangulation * signal just after the associated parallel::distributed::Triangulation is diff --git a/source/dofs/dof_handler_policy.cc b/source/dofs/dof_handler_policy.cc index 671309ccf5..a203543799 100644 --- a/source/dofs/dof_handler_policy.cc +++ b/source/dofs/dof_handler_policy.cc @@ -3705,16 +3705,12 @@ namespace internal { saved_subdomain_ids.resize(tr->n_active_cells()); - typename parallel::shared::Triangulation:: - active_cell_iterator - cell = this->dof_handler->get_triangulation().begin_active(), - endc = this->dof_handler->get_triangulation().end(); - const std::vector &true_subdomain_ids = tr->get_true_subdomain_ids_of_cells(); - for (unsigned int index = 0; cell != endc; ++cell, ++index) + for (const auto &cell : tr->active_cell_iterators()) { + const unsigned int index = cell->active_cell_index(); saved_subdomain_ids[index] = cell->subdomain_id(); cell->set_subdomain_id(true_subdomain_ids[index]); } @@ -3834,15 +3830,9 @@ namespace internal // finally, restore current subdomain ids if (tr->with_artificial_cells()) - { - typename parallel::shared::Triangulation:: - active_cell_iterator - cell = this->dof_handler->get_triangulation().begin_active(), - endc = this->dof_handler->get_triangulation().end(); - - for (unsigned int index = 0; cell != endc; ++cell, ++index) - cell->set_subdomain_id(saved_subdomain_ids[index]); - } + for (const auto &cell : tr->active_cell_iterators()) + cell->set_subdomain_id( + saved_subdomain_ids[cell->active_cell_index()]); // return a NumberCache object made up from the sets of locally // owned DoFs diff --git a/source/hp/dof_handler.cc b/source/hp/dof_handler.cc index 78d5017b22..329a7c2ef0 100644 --- a/source/hp/dof_handler.cc +++ b/source/hp/dof_handler.cc @@ -961,7 +961,13 @@ namespace internal /** * Given a hp::DoFHandler object, make sure that the active_fe_indices * that a user has set for locally owned cells are communicated to all - * ghost cells as well. + * other relevant cells as well. + * + * For parallel::shared::Triangulation objects, + * this information is distributed on both ghost and artificial cells. + * + * In case a parallel::distributed::Triangulation is used, + * active_fe_indices are communicated only to ghost cells. */ template static void @@ -994,14 +1000,14 @@ namespace internal tr->get_communicator(), active_fe_indices); - // now go back and fill the active_fe_index on ghost + // now go back and fill the active_fe_index on all other // cells. we would like to call cell->set_active_fe_index(), // but that function does not allow setting these indices on // non-locally_owned cells. so we have to work around the // issue a little bit by accessing the underlying data // structures directly for (const auto &cell : dof_handler.active_cell_iterators()) - if (cell->is_ghost()) + if (!cell->is_locally_owned()) dof_handler.levels[cell->level()]->set_active_fe_index( cell->index(), active_fe_indices[cell->active_cell_index()]); @@ -1191,12 +1197,9 @@ namespace internal ++child_index) { const auto &child = parent->child(child_index); - - if (child->is_locally_owned()) - { - Assert(child->active(), ExcInternalError()); - child->set_active_fe_index(pair.second); - } + Assert(child->is_locally_owned() && child->active(), + ExcInternalError()); + child->set_active_fe_index(pair.second); } } @@ -1205,12 +1208,9 @@ namespace internal for (const auto &pair : fe_transfer->coarsened_cells_fe_index) { const auto &cell = pair.first; - - if (cell->is_locally_owned()) - { - Assert(cell->active(), ExcInternalError()); - cell->set_active_fe_index(pair.second); - } + Assert(cell->is_locally_owned() && cell->active(), + ExcInternalError()); + cell->set_active_fe_index(pair.second); } } }; @@ -1537,7 +1537,7 @@ namespace hp // stored as protected data of this object, but for simplicity we // use the cell-wise access. for (const auto &cell : active_cell_iterators()) - if (cell->is_locally_owned()) + if (!cell->is_artificial()) active_fe_indices[cell->active_cell_index()] = cell->active_fe_index(); } @@ -1571,6 +1571,15 @@ namespace hp void DoFHandler::set_fe(const hp::FECollection &ff) { + Assert( + tria != nullptr, + ExcMessage( + "You need to set the Triangulation in the DoFHandler using initialize() or " + "in the constructor before you can distribute DoFs.")); + Assert(tria->n_levels() > 0, + ExcMessage("The Triangulation you are using is empty!")); + Assert(ff.size() > 0, ExcMessage("The hp::FECollection given is empty!")); + // don't create a new object if the one we have is already appropriate if (fe_collection != ff) fe_collection = hp::FECollection(ff); @@ -1578,6 +1587,9 @@ namespace hp // ensure that the active_fe_indices vectors are initialized correctly create_active_fe_table(); + // initialize all p-refinement and p-coarsening flags + create_p_adaptation_flags(); + // make sure every processor knows the active_fe_indices // on both its own cells and all ghost cells dealii::internal::hp::DoFHandlerImplementation::Implementation:: @@ -1586,13 +1598,10 @@ namespace hp // make sure that the fe collection is large enough to // cover all fe indices presently in use on the mesh for (const auto &cell : active_cell_iterators()) - if (cell->is_locally_owned()) + if (!cell->is_artificial()) Assert(cell->active_fe_index() < fe_collection.size(), ExcInvalidFEIndex(cell->active_fe_index(), fe_collection.size())); - - // initialize all p-refinement and p-coarsening flags - create_p_adaptation_flags(); } @@ -1602,53 +1611,40 @@ namespace hp DoFHandler::distribute_dofs( const hp::FECollection &ff) { - Assert( - tria != nullptr, - ExcMessage( - "You need to set the Triangulation in the DoFHandler using initialize() or " - "in the constructor before you can distribute DoFs.")); - Assert(tria->n_levels() > 0, - ExcMessage("The Triangulation you are using is empty!")); - Assert(ff.size() > 0, ExcMessage("The hp::FECollection given is empty!")); + // assign the fe_collection and initialize all active_fe_indices + set_fe(ff); // If an underlying shared::Tria allows artificial cells, // then save the current set of subdomain ids, and set // subdomain ids to the "true" owner of each cell. we later // restore these flags - std::vector saved_subdomain_ids; - if (const parallel::shared::Triangulation *shared_tria = - (dynamic_cast *>( - &get_triangulation()))) - if (shared_tria->with_artificial_cells()) - { - saved_subdomain_ids.resize(shared_tria->n_active_cells()); - - const std::vector &true_subdomain_ids = - shared_tria->get_true_subdomain_ids_of_cells(); + std::vector saved_subdomain_ids; + const parallel::shared::Triangulation *shared_tria = + (dynamic_cast *>( + &get_triangulation())); + if (shared_tria != nullptr && shared_tria->with_artificial_cells()) + { + saved_subdomain_ids.resize(shared_tria->n_active_cells()); - for (const auto &cell : shared_tria->active_cell_iterators()) - { - const unsigned int index = cell->active_cell_index(); - saved_subdomain_ids[index] = cell->subdomain_id(); - cell->set_subdomain_id(true_subdomain_ids[index]); - } - } + const std::vector &true_subdomain_ids = + shared_tria->get_true_subdomain_ids_of_cells(); - // assign the fe_collection and initialize all active_fe_indices - set_fe(ff); + for (const auto &cell : shared_tria->active_cell_iterators()) + { + const unsigned int index = cell->active_cell_index(); + saved_subdomain_ids[index] = cell->subdomain_id(); + cell->set_subdomain_id(true_subdomain_ids[index]); + } + } // then allocate space for all the other tables dealii::internal::hp::DoFHandlerImplementation::Implementation:: reserve_space(*this); // now undo the subdomain modification - if (const parallel::shared::Triangulation *shared_tria = - (dynamic_cast *>( - &get_triangulation()))) - if (shared_tria->with_artificial_cells()) - for (const auto &cell : shared_tria->active_cell_iterators()) - cell->set_subdomain_id( - saved_subdomain_ids[cell->active_cell_index()]); + if (shared_tria != nullptr && shared_tria->with_artificial_cells()) + for (const auto &cell : shared_tria->active_cell_iterators()) + cell->set_subdomain_id(saved_subdomain_ids[cell->active_cell_index()]); // Clear user flags because we will need them. But first we save @@ -1709,6 +1705,7 @@ namespace hp internal::DoFHandlerImplementation::Policy::ParallelDistributed< DoFHandler>>(*this); + // repartitioning signals tria_listeners.push_back( this->tria->signals.pre_distributed_repartition.connect(std::bind( &DoFHandler::post_distributed_active_fe_index_transfer, std::ref(*this)))); + // refinement signals tria_listeners.push_back( this->tria->signals.pre_distributed_refinement.connect(std::bind( &DoFHandler::post_distributed_active_fe_index_transfer, std::ref(*this)))); + // serialization signals tria_listeners.push_back( this->tria->signals.post_distributed_save.connect( std::bind(&DoFHandler:: post_distributed_serialization_of_active_fe_indices, std::ref(*this)))); } - else if (dynamic_cast - *>(&this->get_triangulation()) != nullptr) - { - policy = - std_cxx14::make_unique>>( - *this); - - tria_listeners.push_back( - this->tria->signals.pre_refinement.connect(std::bind( - &DoFHandler::pre_shared_active_fe_index_transfer, - std::ref(*this)))); - tria_listeners.push_back( - this->tria->signals.post_refinement.connect(std::bind( - &DoFHandler::post_shared_active_fe_index_transfer, - std::ref(*this)))); - } else { - policy = - std_cxx14::make_unique>>( - *this); + if (dynamic_cast + *>(&this->get_triangulation()) != nullptr) + policy = + std_cxx14::make_unique>>( + *this); + else + policy = + std_cxx14::make_unique>>( + *this); + // refinement signals tria_listeners.push_back(this->tria->signals.pre_refinement.connect( std::bind(&DoFHandler::pre_active_fe_index_transfer, std::ref(*this)))); @@ -2021,72 +2011,6 @@ namespace hp - template - void - DoFHandler::pre_shared_active_fe_index_transfer() - { -#ifndef DEAL_II_WITH_MPI - Assert(false, ExcInternalError()); -#else - // Finite elements need to be assigned to each cell by calling - // distribute_dofs() first to make this functionality available. - if (fe_collection.size() > 0) - { - Assert(active_fe_index_transfer == nullptr, ExcInternalError()); - - active_fe_index_transfer = - std_cxx14::make_unique(); - - // If the underlying shared::Tria allows artificial cells, - // then save the current set of subdomain ids, and set - // subdomain ids to the "true" owner of each cell. We later - // restore these flags. - const parallel::shared::Triangulation *shared_tria = - (dynamic_cast *>( - &(*tria))); - Assert(shared_tria != nullptr, ExcInternalError()); - - std::vector saved_subdomain_ids; - if (shared_tria->with_artificial_cells()) - { - saved_subdomain_ids.resize(shared_tria->n_active_cells()); - - const std::vector &true_subdomain_ids = - shared_tria->get_true_subdomain_ids_of_cells(); - - for (const auto &cell : active_cell_iterators()) - { - const unsigned int index = cell->active_cell_index(); - saved_subdomain_ids[index] = cell->subdomain_id(); - cell->set_subdomain_id(true_subdomain_ids[index]); - } - - // Make sure every processor knows the active_fe_indices - // on both its own cells and all ghost cells. - dealii::internal::hp::DoFHandlerImplementation::Implementation:: - communicate_active_fe_indices(*this); - } - - // Now do what we would do in the sequential case. - dealii::internal::hp::DoFHandlerImplementation::Implementation:: - collect_fe_indices_on_cells_to_be_refined(*this); - - // Finally, restore current subdomain_ids. - if (shared_tria->with_artificial_cells()) - for (const auto &cell : active_cell_iterators()) - { - if (cell->is_artificial()) - cell->set_subdomain_id(numbers::invalid_subdomain_id); - else - cell->set_subdomain_id( - saved_subdomain_ids[cell->active_cell_index()]); - } - } -#endif - } - - - template void DoFHandler::pre_distributed_active_fe_index_transfer() @@ -2133,9 +2057,15 @@ namespace hp for (unsigned int child_index = 0; child_index < pair.first->n_children(); ++child_index) - active_fe_index_transfer - ->active_fe_indices[pair.first->child(child_index) - ->active_cell_index()] = pair.second; + { + // Make sure that all children belong to the same subdomain. + Assert(pair.first->child(child_index)->is_locally_owned(), + ExcInternalError()); + + active_fe_index_transfer + ->active_fe_indices[pair.first->child(child_index) + ->active_cell_index()] = pair.second; + } // Create transfer object and attach to it. const auto *distributed_tria = dynamic_cast< @@ -2171,48 +2101,18 @@ namespace hp { Assert(active_fe_index_transfer != nullptr, ExcInternalError()); - // For Triangulation and p::s::Triangulation, the old cell iterators - // are still valid. There is no need to transfer data in this case, - // and we can re-use our previously gathered information from the - // container. - - dealii::internal::hp::DoFHandlerImplementation::Implementation:: - distribute_fe_indices_on_refined_cells(*this); - - // Free memory. - active_fe_index_transfer.reset(); - } - } - - - - template - void - DoFHandler::post_shared_active_fe_index_transfer() - { -#ifndef DEAL_II_WITH_MPI - Assert(false, ExcInternalError()); -#else - // Finite elements need to be assigned to each cell by calling - // distribute_dofs() first to make this functionality available. - if (fe_collection.size() > 0) - { - Assert(active_fe_index_transfer != nullptr, ExcInternalError()); - - // Do what we normally do in the sequential case. dealii::internal::hp::DoFHandlerImplementation::Implementation:: distribute_fe_indices_on_refined_cells(*this); // We have to distribute the information about active_fe_indices - // on all processors, if a parallel::shared::Triangulation - // has been used. + // of all cells (including the artificial ones) on all processors, + // if a parallel::shared::Triangulation has been used. dealii::internal::hp::DoFHandlerImplementation::Implementation:: communicate_active_fe_indices(*this); // Free memory. active_fe_index_transfer.reset(); } -#endif } diff --git a/tests/mpi/hp_cell_weights_04.cc b/tests/mpi/hp_cell_weights_04.cc new file mode 100644 index 0000000000..3b0461bbea --- /dev/null +++ b/tests/mpi/hp_cell_weights_04.cc @@ -0,0 +1,122 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2019 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +// Cell Weights Test +// ----------------- +// Create a 4x4(x4) grid, on which all cells are associated with a Q1 +// element besides the very first one, which has a Q5 element. +// We choose a cell weighting algorithm based on the number of degrees +// of freedom and check if load is balanced as expected after +// repartitioning the triangulation. The expected accumulated weight on +// each processor should correlate to the sum of all degrees of +// freedom on all cells of the corresponding subdomain. +// We employ a large proportionality factor on our weighting function +// to neglect the standard weight of '1000' per cell. +// +// This test works on a parallel::shared::Triangulation with METIS +// as a partitioner. Cell weighting with ZOLTAN was not available +// during the time this test was written. +// +// We consider aritifical cells in this case. + + +#include +#include + +#include + +#include + +#include + +#include "../tests.h" + + +template +void +test() +{ + parallel::shared::Triangulation tria( + MPI_COMM_WORLD, + ::Triangulation::none, + true, + parallel::shared::Triangulation::Settings::partition_metis); + + GridGenerator::hyper_cube(tria); + tria.refine_global(2); + + // Apply ndof cell weights. + hp::FECollection fe_collection; + fe_collection.push_back(FE_Q(1)); + fe_collection.push_back(FE_Q(5)); + + hp::DoFHandler dh(tria); + dh.set_fe(fe_collection); + // default: active_fe_index = 0 + for (auto &cell : dh.active_cell_iterators()) + if (cell->is_locally_owned()) + if (cell->id().to_string() == "0_2:00") + cell->set_active_fe_index(1); + + deallog << "Number of cells before repartitioning: " + << tria.n_locally_owned_active_cells() << std::endl; + { + unsigned int dof_counter = 0; + for (auto &cell : dh.active_cell_iterators()) + if (cell->is_locally_owned()) + dof_counter += cell->get_fe().dofs_per_cell; + deallog << " Cumulative dofs per cell: " << dof_counter << std::endl; + } + + + parallel::CellWeights cell_weights(dh); + cell_weights.register_ndofs_weighting(100000); + + // we didn't mark any cells, but we want to repartition our domain + tria.execute_coarsening_and_refinement(); + + + deallog << "Number of cells after repartitioning: " + << tria.n_locally_owned_active_cells() << std::endl; + { + unsigned int dof_counter = 0; + for (auto &cell : dh.active_cell_iterators()) + if (cell->is_locally_owned()) + dof_counter += cell->get_fe().dofs_per_cell; + deallog << " Cumulative dofs per cell: " << dof_counter << std::endl; + } + + // make sure no processor is hanging + MPI_Barrier(MPI_COMM_WORLD); + + deallog << "OK" << std::endl; +} + + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll log; + + deallog.push("2d"); + test<2>(); + deallog.pop(); + deallog.push("3d"); + test<3>(); + deallog.pop(); +} diff --git a/tests/mpi/hp_cell_weights_04.with_mpi=true.with_metis=true.mpirun=2.output b/tests/mpi/hp_cell_weights_04.with_mpi=true.with_metis=true.mpirun=2.output new file mode 100644 index 0000000000..d1d118c466 --- /dev/null +++ b/tests/mpi/hp_cell_weights_04.with_mpi=true.with_metis=true.mpirun=2.output @@ -0,0 +1,23 @@ + +DEAL:0:2d::Number of cells before repartitioning: 8 +DEAL:0:2d:: Cumulative dofs per cell: 32 +DEAL:0:2d::Number of cells after repartitioning: 13 +DEAL:0:2d:: Cumulative dofs per cell: 52 +DEAL:0:2d::OK +DEAL:0:3d::Number of cells before repartitioning: 33 +DEAL:0:3d:: Cumulative dofs per cell: 264 +DEAL:0:3d::Number of cells after repartitioning: 47 +DEAL:0:3d:: Cumulative dofs per cell: 376 +DEAL:0:3d::OK + +DEAL:1:2d::Number of cells before repartitioning: 8 +DEAL:1:2d:: Cumulative dofs per cell: 64 +DEAL:1:2d::Number of cells after repartitioning: 3 +DEAL:1:2d:: Cumulative dofs per cell: 44 +DEAL:1:2d::OK +DEAL:1:3d::Number of cells before repartitioning: 31 +DEAL:1:3d:: Cumulative dofs per cell: 456 +DEAL:1:3d::Number of cells after repartitioning: 17 +DEAL:1:3d:: Cumulative dofs per cell: 344 +DEAL:1:3d::OK + diff --git a/tests/mpi/hp_cell_weights_04.with_mpi=true.with_metis=true.mpirun=2.output.1 b/tests/mpi/hp_cell_weights_04.with_mpi=true.with_metis=true.mpirun=2.output.1 new file mode 100644 index 0000000000..f42d31ee46 --- /dev/null +++ b/tests/mpi/hp_cell_weights_04.with_mpi=true.with_metis=true.mpirun=2.output.1 @@ -0,0 +1,23 @@ + +DEAL:0:2d::Number of cells before repartitioning: 8 +DEAL:0:2d:: Cumulative dofs per cell: 64 +DEAL:0:2d::Number of cells after repartitioning: 13 +DEAL:0:2d:: Cumulative dofs per cell: 52 +DEAL:0:2d::OK +DEAL:0:3d::Number of cells before repartitioning: 32 +DEAL:0:3d:: Cumulative dofs per cell: 256 +DEAL:0:3d::Number of cells after repartitioning: 47 +DEAL:0:3d:: Cumulative dofs per cell: 376 +DEAL:0:3d::OK + +DEAL:1:2d::Number of cells before repartitioning: 8 +DEAL:1:2d:: Cumulative dofs per cell: 32 +DEAL:1:2d::Number of cells after repartitioning: 3 +DEAL:1:2d:: Cumulative dofs per cell: 44 +DEAL:1:2d::OK +DEAL:1:3d::Number of cells before repartitioning: 32 +DEAL:1:3d:: Cumulative dofs per cell: 464 +DEAL:1:3d::Number of cells after repartitioning: 17 +DEAL:1:3d:: Cumulative dofs per cell: 344 +DEAL:1:3d::OK + diff --git a/tests/mpi/hp_refinement_01.cc b/tests/mpi/hp_refinement_01.cc index 610511c50a..129ec7436c 100644 --- a/tests/mpi/hp_refinement_01.cc +++ b/tests/mpi/hp_refinement_01.cc @@ -88,7 +88,7 @@ test() } } - dh.distribute_dofs(fe_collection); + dh.set_fe(fe_collection); // ----- refine ----- tria.execute_coarsening_and_refinement(); diff --git a/tests/mpi/hp_refinement_02.cc b/tests/mpi/hp_refinement_02.cc index e00386c29d..06e51dc047 100644 --- a/tests/mpi/hp_refinement_02.cc +++ b/tests/mpi/hp_refinement_02.cc @@ -88,7 +88,7 @@ test() } } - dh.distribute_dofs(fe_collection); + dh.set_fe(fe_collection); // ----- refine ----- tria.execute_coarsening_and_refinement();