From: Peter Munch Date: Tue, 28 Jun 2022 20:33:32 +0000 (+0200) Subject: Communicate refinement flags in p:s:T X-Git-Tag: v9.5.0-rc1~1092^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=e6d60be7ef3492477a713ad1641a0272523b87fa;p=dealii.git Communicate refinement flags in p:s:T --- diff --git a/doc/news/changes/minor/20220712Munch b/doc/news/changes/minor/20220712Munch new file mode 100644 index 0000000000..f2dd59e668 --- /dev/null +++ b/doc/news/changes/minor/20220712Munch @@ -0,0 +1,4 @@ +Improved: Refinement and coarsening flags are now +communicated in parallel::shared::Triangulation. +
+(Peter Munch, 2022/07/12) diff --git a/include/deal.II/distributed/shared_tria.h b/include/deal.II/distributed/shared_tria.h index 102c07ac65..6512842390 100644 --- a/include/deal.II/distributed/shared_tria.h +++ b/include/deal.II/distributed/shared_tria.h @@ -69,10 +69,9 @@ namespace parallel * about cells owned by other processors with the exception of a single * layer of ghost cells around their own part of the domain. * - * As a consequence of storing the entire mesh on each processor, active - * cells need to be flagged for refinement or coarsening consistently on - * all processors if you want to adapt them, regardless of being classified - * as locally owned, ghost or artificial. + * Active cells need to be flagged for refinement or coarsening + * locally owned cells. The relevant information is gathered for + * ghost and artificial cells internally via MPI communication. * * The class is also useful in cases where compute time and memory * considerations dictate that the program needs to be run in parallel, diff --git a/source/distributed/shared_tria.cc b/source/distributed/shared_tria.cc index deb132b1fe..f9b999d28f 100644 --- a/source/distributed/shared_tria.cc +++ b/source/distributed/shared_tria.cc @@ -351,6 +351,51 @@ namespace parallel void Triangulation::execute_coarsening_and_refinement() { + // make sure that all refinement/coarsening flags are the same on all + // processes + { + std::vector refinement_configurations( + this->n_active_cells() * 2, 0u); + for (const auto &cell : this->active_cell_iterators()) + if (cell->is_locally_owned()) + { + refinement_configurations[cell->active_cell_index() * 2 + 0] = + cell->refine_flag_set(); + refinement_configurations[cell->active_cell_index() * 2 + 1] = + cell->coarsen_flag_set(); + } + + Utilities::MPI::max(refinement_configurations, + this->get_communicator(), + refinement_configurations); + + for (const auto &cell : this->active_cell_iterators()) + { + cell->clear_refine_flag(); + cell->clear_coarsen_flag(); + + Assert( + (refinement_configurations[cell->active_cell_index() * 2 + 0] > + 0 ? + 1 : + 0) + + refinement_configurations[cell->active_cell_index() * 2 + + 1] <= + 1, + ExcMessage( + "Refinement/coarsening flags of cells are not consistent in parallel!")); + + if (refinement_configurations[cell->active_cell_index() * 2 + 0] > + 0) + cell->set_refine_flag(RefinementCase( + refinement_configurations[cell->active_cell_index() * 2 + 0])); + + if (refinement_configurations[cell->active_cell_index() * 2 + 1] > + 0) + cell->set_coarsen_flag(); + } + } + dealii::Triangulation::execute_coarsening_and_refinement(); partition(); this->update_number_cache(); diff --git a/tests/sharedtria/refine_and_coarsen_02.cc b/tests/sharedtria/refine_and_coarsen_02.cc new file mode 100644 index 0000000000..a42d5453a8 --- /dev/null +++ b/tests/sharedtria/refine_and_coarsen_02.cc @@ -0,0 +1,88 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2022 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +// test that coarsening/refinement flags are communicated correctly + +#include + +#include + +#include "../tests.h" + + +template +void +test() +{ + using Number = double; + using VectorizedArrayType = VectorizedArray; + + parallel::shared::Triangulation tria( + MPI_COMM_WORLD, + ::Triangulation::none, + true, + parallel::shared::Triangulation::partition_custom_signal); + + tria.signals.create.connect([&]() { + for (const auto &cell : tria.active_cell_iterators()) + if (cell->center()[1] < 0.5) + cell->set_subdomain_id(0); + else + cell->set_subdomain_id(1); + }); + + const auto print = [&]() { + for (unsigned int l = 0; l < tria.n_levels(); ++l) + deallog << tria.n_cells(l) << " "; + deallog << std::endl; + }; + + GridGenerator::hyper_cube(tria); + + print(); + + tria.refine_global(); + + print(); + + for (const auto &cell : tria.active_cell_iterators()) + if (cell->is_locally_owned()) + { + const auto center = cell->center(); + + if (center[0] < 0.5 && center[1] < 0.5) + cell->set_refine_flag(); + else if (center[0] > 0.5 && center[1] > 0.5) + cell->set_coarsen_flag(); + } + + tria.execute_coarsening_and_refinement(); + + print(); +} + + +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + MPILogInitAll log; + + AssertDimension(Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD), 2); + + test<2>(); +} diff --git a/tests/sharedtria/refine_and_coarsen_02.mpirun=2.output b/tests/sharedtria/refine_and_coarsen_02.mpirun=2.output new file mode 100644 index 0000000000..7b87127ae8 --- /dev/null +++ b/tests/sharedtria/refine_and_coarsen_02.mpirun=2.output @@ -0,0 +1,9 @@ + +DEAL:0::1 +DEAL:0::1 4 +DEAL:0::1 4 4 + +DEAL:1::1 +DEAL:1::1 4 +DEAL:1::1 4 4 +