From 5e72099376b8ef16736168365b7ac400d2092846 Mon Sep 17 00:00:00 2001 From: Timo Heister Date: Sun, 22 Mar 2015 09:12:27 -0400 Subject: [PATCH] Allow manual repartitioning in distributed tria A new flag no_automatic_repartitioning in parallel::distributed::Triangulation will disable the automatic repartitioning when calling execute_coarsening_and_refinement() (or things like refine_global(), ...), resulting in all cells staying on the processor they were before. A function repartition() will execute the repartitioning resulting in the same as the old behavior. Splitting the process of refining and partitioning into separate functions allows for advanced techniques when handling several meshes at once. Added tests that check that attaching and transfering data is working correctly. Resolves #673. --- doc/news/changes.h | 10 ++ include/deal.II/distributed/tria.h | 23 +++- source/distributed/tria.cc | 42 +++++- tests/mpi/repartition_01.cc | 83 +++++++++++ tests/mpi/repartition_01.mpirun=3.output | 13 ++ tests/mpi/repartition_02.cc | 167 +++++++++++++++++++++++ tests/mpi/repartition_02.mpirun=3.output | 62 +++++++++ 7 files changed, 395 insertions(+), 5 deletions(-) create mode 100644 tests/mpi/repartition_01.cc create mode 100644 tests/mpi/repartition_01.mpirun=3.output create mode 100644 tests/mpi/repartition_02.cc create mode 100644 tests/mpi/repartition_02.mpirun=3.output diff --git a/doc/news/changes.h b/doc/news/changes.h index 2579dbf8c8..c5c5e4a345 100644 --- a/doc/news/changes.h +++ b/doc/news/changes.h @@ -326,6 +326,16 @@ inconvenience this causes.
    +
  1. New: A new flag no_automatic_repartitioning in + parallel::distributed::Triangulation will disable the automatic + repartitioning when calling execute_coarsening_and_refinement() (or things + like refine_global(), ...), resulting in all cells staying on the processor + they were before. The new function repartition() will execute the + repartitioning as done automatically before. +
    + (Timo Heister, 2015/03/22) +
  2. +
  3. Changed: All (Block)Compressed*SparsityPattern classes got replaced by DynamicSparsityPattern and BlockDynamicSparsityPattern, respectively and all examples now diff --git a/include/deal.II/distributed/tria.h b/include/deal.II/distributed/tria.h index 606826b3be..4c58b2d67e 100644 --- a/include/deal.II/distributed/tria.h +++ b/include/deal.II/distributed/tria.h @@ -380,12 +380,17 @@ namespace parallel * geometric multigrid functionality. This option requires additional * computation and communication. Note: geometric multigrid is still a * work in progress. + * + * @p no_automatic_repartitioning will disable automatic repartioning of + * the cells after a refinement cycle. It can be executed manually by + * calling repartition(). */ enum Settings { default_setting = 0x0, mesh_reconstruction_after_repartitioning = 0x1, - construct_multigrid_hierarchy = 0x2 + construct_multigrid_hierarchy = 0x2, + no_automatic_repartitioning = 0x4 }; @@ -469,6 +474,22 @@ namespace parallel */ virtual void execute_coarsening_and_refinement (); + /** + * Redistribute the active cells between processors. Normally this will + * be executed automatically when calling + * execute_coarsening_and_refinement() unless @p + * no_automatic_repartitioning is set in the constructor. + * + * @note When using SolutionTransfer or manual transfer using + * register_data_attach() and notify_ready_to_unpack() you need to do + * this twice: once when calling execute_coarsening_and_refinement(), + * which will handle coarsening and refinement but obviously won't ship + * any data between processors, and a second time when calling this + * function. Here, no coarsening and refinement will be done but + * information will be packed and shipped to different processors. + */ + void repartition (); + /** * When vertices have been moved locally, for example using code like * @code diff --git a/source/distributed/tria.cc b/source/distributed/tria.cc index f577f75857..64977e198e 100644 --- a/source/distributed/tria.cc +++ b/source/distributed/tria.cc @@ -3414,10 +3414,11 @@ namespace parallel attach_mesh_data(); // partition the new mesh between all processors - dealii::internal::p4est::functions:: - partition (parallel_forest, - /* prepare coarsening */ 1, - /* weight_callback */ NULL); + if (!(settings & no_automatic_repartitioning)) + dealii::internal::p4est::functions:: + partition (parallel_forest, + /* prepare coarsening */ 1, + /* weight_callback */ NULL); // finally copy back from local part of tree to deal.II // triangulation. before doing so, make sure there are no refine or @@ -3447,6 +3448,39 @@ namespace parallel update_number_cache (); } + template + void + Triangulation::repartition () + { + AssertThrow(settings & no_automatic_repartitioning, + ExcMessage("You need to set no_automatic_repartition when calling repartition() manually.")); + + refinement_in_progress = true; + + // before repartitioning the mesh let others attach mesh related info + // (such as SolutionTransfer data) to the p4est + attach_mesh_data(); + + dealii::internal::p4est::functions:: + partition (parallel_forest, + /* prepare coarsening */ 1, + /* weight_callback */ NULL); + + try + { + copy_local_forest_to_triangulation (); + } + catch (const typename Triangulation::DistortedCellList &) + { + // the underlying triangulation should not be checking for distorted + // cells + AssertThrow (false, ExcInternalError()); + } + + refinement_in_progress = false; + + update_number_cache (); + } template diff --git a/tests/mpi/repartition_01.cc b/tests/mpi/repartition_01.cc new file mode 100644 index 0000000000..5057a7a70b --- /dev/null +++ b/tests/mpi/repartition_01.cc @@ -0,0 +1,83 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2009 - 2014 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// test manual repartitioning + +#include "../tests.h" +#include "coarse_grid_common.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include + + +template +void test() +{ + unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + + if (true) + { + parallel::distributed::Triangulation tr(MPI_COMM_WORLD, + dealii::Triangulation::none, + parallel::distributed::Triangulation::no_automatic_repartitioning); + + GridGenerator::hyper_cube(tr); + tr.refine_global(2); + + deallog << "locally owned cells: " << tr.n_locally_owned_active_cells() + << " / " + << tr.n_global_active_cells() + << std::endl; + + //tr.write_mesh_vtk("a"); + + tr.repartition(); + + //tr.write_mesh_vtk("b"); + + deallog << "locally owned cells: " << tr.n_locally_owned_active_cells() + << " / " + << tr.n_global_active_cells() + << std::endl; + + const unsigned int checksum = tr.get_checksum (); + if (myid == 0) + deallog << "Checksum: " + << checksum + << std::endl; + } + + if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) + deallog << "OK" << std::endl; +} + + +int main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll log; + test<2>(); +} diff --git a/tests/mpi/repartition_01.mpirun=3.output b/tests/mpi/repartition_01.mpirun=3.output new file mode 100644 index 0000000000..99286b7f92 --- /dev/null +++ b/tests/mpi/repartition_01.mpirun=3.output @@ -0,0 +1,13 @@ + +DEAL:0::locally owned cells: 0 / 16 +DEAL:0::locally owned cells: 4 / 16 +DEAL:0::Checksum: 3877634849 +DEAL:0::OK + +DEAL:1::locally owned cells: 0 / 16 +DEAL:1::locally owned cells: 8 / 16 + + +DEAL:2::locally owned cells: 16 / 16 +DEAL:2::locally owned cells: 4 / 16 + diff --git a/tests/mpi/repartition_02.cc b/tests/mpi/repartition_02.cc new file mode 100644 index 0000000000..36add57f52 --- /dev/null +++ b/tests/mpi/repartition_02.cc @@ -0,0 +1,167 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2009 - 2014 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// test manual repartitioning and transfering data + +#include "../tests.h" +#include "coarse_grid_common.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include + +std::string id_to_string(const CellId &id) +{ + std::ostringstream ss; + ss << id; + return ss.str(); +} + +template +void pack_function (const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status, + void *data) +{ + static int some_number = cell->index(); + deallog << "packing cell " << cell->id() << " with data=" << some_number << " status="; + if (status==parallel::distributed::Triangulation::CELL_PERSIST) + deallog << "PERSIST"; + else if (status==parallel::distributed::Triangulation::CELL_REFINE) + deallog << "REFINE"; + else if (status==parallel::distributed::Triangulation::CELL_COARSEN) + deallog << "COARSEN"; + deallog << std::endl; + + if (status==parallel::distributed::Triangulation::CELL_COARSEN) + { + Assert(cell->has_children(), ExcInternalError()); + } + else + { + Assert(!cell->has_children(), ExcInternalError()); + } + + int * intdata = reinterpret_cast(data); + *intdata = some_number; + + ++some_number; +} + +template +void unpack_function (const typename parallel::distributed::Triangulation::cell_iterator &cell, + const typename parallel::distributed::Triangulation::CellStatus status, + const void *data) +{ + const int * intdata = reinterpret_cast(data); + + deallog << "unpacking cell " << cell->id() << " with data=" << (*intdata) << " status="; + if (status==parallel::distributed::Triangulation::CELL_PERSIST) + deallog << "PERSIST"; + else if (status==parallel::distributed::Triangulation::CELL_REFINE) + deallog << "REFINE"; + else if (status==parallel::distributed::Triangulation::CELL_COARSEN) + deallog << "COARSEN"; + deallog << std::endl; + + if (status==parallel::distributed::Triangulation::CELL_REFINE) + { + Assert(cell->has_children(), ExcInternalError()); + } + else + { + Assert(!cell->has_children(), ExcInternalError()); + } +} + +template +void test() +{ + unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + + if (true) + { + parallel::distributed::Triangulation tr(MPI_COMM_WORLD, + dealii::Triangulation::none, + parallel::distributed::Triangulation::no_automatic_repartitioning); + + GridGenerator::hyper_cube(tr); + tr.refine_global(1); + + deallog << "locally owned cells: " << tr.n_locally_owned_active_cells() + << " / " + << tr.n_global_active_cells() + << std::endl; + + deallog << "* global refine:" << std::endl; + + unsigned int offset = tr.register_data_attach(sizeof(int), pack_function); + + tr.refine_global(1); + + deallog << "locally owned cells: " << tr.n_locally_owned_active_cells() + << " / " + << tr.n_global_active_cells() + << std::endl; + + tr.notify_ready_to_unpack(offset, unpack_function); + + //tr.write_mesh_vtk("a"); + + + + + deallog << "* repartition:" << std::endl; + + offset = tr.register_data_attach(sizeof(int), pack_function); + + tr.repartition(); + + //tr.write_mesh_vtk("b"); + + deallog << "locally owned cells: " << tr.n_locally_owned_active_cells() + << " / " + << tr.n_global_active_cells() + << std::endl; + + tr.notify_ready_to_unpack(offset, unpack_function); + + const unsigned int checksum = tr.get_checksum (); + if (myid == 0) + deallog << "Checksum: " + << checksum + << std::endl; + } + + if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) + deallog << "OK" << std::endl; +} + + +int main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll log; + test<2>(); +} diff --git a/tests/mpi/repartition_02.mpirun=3.output b/tests/mpi/repartition_02.mpirun=3.output new file mode 100644 index 0000000000..ba0fd78ac9 --- /dev/null +++ b/tests/mpi/repartition_02.mpirun=3.output @@ -0,0 +1,62 @@ + +DEAL:0::locally owned cells: 0 / 4 +DEAL:0::* global refine: +DEAL:0::locally owned cells: 0 / 16 +DEAL:0::* repartition: +DEAL:0::locally owned cells: 4 / 16 +DEAL:0::unpacking cell 0_2:00 with data=4 status=PERSIST +DEAL:0::unpacking cell 0_2:01 with data=5 status=PERSIST +DEAL:0::unpacking cell 0_2:02 with data=6 status=PERSIST +DEAL:0::unpacking cell 0_2:03 with data=7 status=PERSIST +DEAL:0::Checksum: 3877634849 +DEAL:0::OK + +DEAL:1::locally owned cells: 0 / 4 +DEAL:1::* global refine: +DEAL:1::locally owned cells: 0 / 16 +DEAL:1::* repartition: +DEAL:1::locally owned cells: 8 / 16 +DEAL:1::unpacking cell 0_2:10 with data=8 status=PERSIST +DEAL:1::unpacking cell 0_2:11 with data=9 status=PERSIST +DEAL:1::unpacking cell 0_2:12 with data=10 status=PERSIST +DEAL:1::unpacking cell 0_2:13 with data=11 status=PERSIST +DEAL:1::unpacking cell 0_2:20 with data=12 status=PERSIST +DEAL:1::unpacking cell 0_2:21 with data=13 status=PERSIST +DEAL:1::unpacking cell 0_2:22 with data=14 status=PERSIST +DEAL:1::unpacking cell 0_2:23 with data=15 status=PERSIST + + +DEAL:2::locally owned cells: 4 / 4 +DEAL:2::* global refine: +DEAL:2::packing cell 0_1:0 with data=0 status=REFINE +DEAL:2::packing cell 0_1:1 with data=1 status=REFINE +DEAL:2::packing cell 0_1:2 with data=2 status=REFINE +DEAL:2::packing cell 0_1:3 with data=3 status=REFINE +DEAL:2::locally owned cells: 16 / 16 +DEAL:2::unpacking cell 0_1:0 with data=0 status=REFINE +DEAL:2::unpacking cell 0_1:1 with data=1 status=REFINE +DEAL:2::unpacking cell 0_1:2 with data=2 status=REFINE +DEAL:2::unpacking cell 0_1:3 with data=3 status=REFINE +DEAL:2::* repartition: +DEAL:2::packing cell 0_2:00 with data=4 status=PERSIST +DEAL:2::packing cell 0_2:01 with data=5 status=PERSIST +DEAL:2::packing cell 0_2:02 with data=6 status=PERSIST +DEAL:2::packing cell 0_2:03 with data=7 status=PERSIST +DEAL:2::packing cell 0_2:10 with data=8 status=PERSIST +DEAL:2::packing cell 0_2:11 with data=9 status=PERSIST +DEAL:2::packing cell 0_2:12 with data=10 status=PERSIST +DEAL:2::packing cell 0_2:13 with data=11 status=PERSIST +DEAL:2::packing cell 0_2:20 with data=12 status=PERSIST +DEAL:2::packing cell 0_2:21 with data=13 status=PERSIST +DEAL:2::packing cell 0_2:22 with data=14 status=PERSIST +DEAL:2::packing cell 0_2:23 with data=15 status=PERSIST +DEAL:2::packing cell 0_2:30 with data=16 status=PERSIST +DEAL:2::packing cell 0_2:31 with data=17 status=PERSIST +DEAL:2::packing cell 0_2:32 with data=18 status=PERSIST +DEAL:2::packing cell 0_2:33 with data=19 status=PERSIST +DEAL:2::locally owned cells: 4 / 16 +DEAL:2::unpacking cell 0_2:30 with data=16 status=PERSIST +DEAL:2::unpacking cell 0_2:31 with data=17 status=PERSIST +DEAL:2::unpacking cell 0_2:32 with data=18 status=PERSIST +DEAL:2::unpacking cell 0_2:33 with data=19 status=PERSIST + -- 2.39.5