--- /dev/null
+Improved: Refinement and coarsening flags are now
+communicated in parallel::shared::Triangulation.
+<br>
+(Peter Munch, 2022/07/12)
* about cells owned by other processors with the exception of a single
* layer of ghost cells around their own part of the domain.
*
- * As a consequence of storing the entire mesh on each processor, active
- * cells need to be flagged for refinement or coarsening consistently on
- * all processors if you want to adapt them, regardless of being classified
- * as locally owned, ghost or artificial.
+ * Active cells need to be flagged for refinement or coarsening
+ * locally owned cells. The relevant information is gathered for
+ * ghost and artificial cells internally via MPI communication.
*
* The class is also useful in cases where compute time and memory
* considerations dictate that the program needs to be run in parallel,
void
Triangulation<dim, spacedim>::execute_coarsening_and_refinement()
{
+ // make sure that all refinement/coarsening flags are the same on all
+ // processes
+ {
+ std::vector<unsigned int> refinement_configurations(
+ this->n_active_cells() * 2, 0u);
+ for (const auto &cell : this->active_cell_iterators())
+ if (cell->is_locally_owned())
+ {
+ refinement_configurations[cell->active_cell_index() * 2 + 0] =
+ cell->refine_flag_set();
+ refinement_configurations[cell->active_cell_index() * 2 + 1] =
+ cell->coarsen_flag_set();
+ }
+
+ Utilities::MPI::max(refinement_configurations,
+ this->get_communicator(),
+ refinement_configurations);
+
+ for (const auto &cell : this->active_cell_iterators())
+ {
+ cell->clear_refine_flag();
+ cell->clear_coarsen_flag();
+
+ Assert(
+ (refinement_configurations[cell->active_cell_index() * 2 + 0] >
+ 0 ?
+ 1 :
+ 0) +
+ refinement_configurations[cell->active_cell_index() * 2 +
+ 1] <=
+ 1,
+ ExcMessage(
+ "Refinement/coarsening flags of cells are not consistent in parallel!"));
+
+ if (refinement_configurations[cell->active_cell_index() * 2 + 0] >
+ 0)
+ cell->set_refine_flag(RefinementCase<dim>(
+ refinement_configurations[cell->active_cell_index() * 2 + 0]));
+
+ if (refinement_configurations[cell->active_cell_index() * 2 + 1] >
+ 0)
+ cell->set_coarsen_flag();
+ }
+ }
+
dealii::Triangulation<dim, spacedim>::execute_coarsening_and_refinement();
partition();
this->update_number_cache();
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2022 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+// test that coarsening/refinement flags are communicated correctly
+
+#include <deal.II/distributed/shared_tria.h>
+
+#include <deal.II/grid/grid_generator.h>
+
+#include "../tests.h"
+
+
+template <int dim>
+void
+test()
+{
+ using Number = double;
+ using VectorizedArrayType = VectorizedArray<Number>;
+
+ parallel::shared::Triangulation<dim> tria(
+ MPI_COMM_WORLD,
+ ::Triangulation<dim>::none,
+ true,
+ parallel::shared::Triangulation<dim>::partition_custom_signal);
+
+ tria.signals.create.connect([&]() {
+ for (const auto &cell : tria.active_cell_iterators())
+ if (cell->center()[1] < 0.5)
+ cell->set_subdomain_id(0);
+ else
+ cell->set_subdomain_id(1);
+ });
+
+ const auto print = [&]() {
+ for (unsigned int l = 0; l < tria.n_levels(); ++l)
+ deallog << tria.n_cells(l) << " ";
+ deallog << std::endl;
+ };
+
+ GridGenerator::hyper_cube(tria);
+
+ print();
+
+ tria.refine_global();
+
+ print();
+
+ for (const auto &cell : tria.active_cell_iterators())
+ if (cell->is_locally_owned())
+ {
+ const auto center = cell->center();
+
+ if (center[0] < 0.5 && center[1] < 0.5)
+ cell->set_refine_flag();
+ else if (center[0] > 0.5 && center[1] > 0.5)
+ cell->set_coarsen_flag();
+ }
+
+ tria.execute_coarsening_and_refinement();
+
+ print();
+}
+
+
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ MPILogInitAll log;
+
+ AssertDimension(Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD), 2);
+
+ test<2>();
+}
--- /dev/null
+
+DEAL:0::1
+DEAL:0::1 4
+DEAL:0::1 4 4
+
+DEAL:1::1
+DEAL:1::1 4
+DEAL:1::1 4 4
+