From 5c25cbbea5679638a9ca015fd435a9617a2511ad Mon Sep 17 00:00:00 2001 From: Timo Heister Date: Sun, 22 Mar 2015 11:35:16 -0400 Subject: [PATCH] address comments - update documentation - check that no flags are set when calling repartition() - add another test --- include/deal.II/distributed/tria.h | 24 +++--- source/distributed/tria.cc | 10 +++ tests/mpi/repartition_02.cc | 7 -- tests/mpi/repartition_03.cc | 105 +++++++++++++++++++++++ tests/mpi/repartition_03.mpirun=3.output | 73 ++++++++++++++++ 5 files changed, 202 insertions(+), 17 deletions(-) create mode 100644 tests/mpi/repartition_03.cc create mode 100644 tests/mpi/repartition_03.mpirun=3.output diff --git a/include/deal.II/distributed/tria.h b/include/deal.II/distributed/tria.h index 4c58b2d67e..4ff5a9e68c 100644 --- a/include/deal.II/distributed/tria.h +++ b/include/deal.II/distributed/tria.h @@ -475,18 +475,22 @@ namespace parallel virtual void execute_coarsening_and_refinement (); /** - * Redistribute the active cells between processors. Normally this will - * be executed automatically when calling - * execute_coarsening_and_refinement() unless @p - * no_automatic_repartitioning is set in the constructor. + * Manually repartition the active cells between processors. Normally + * this repartitioning will happen automatically when calling + * execute_coarsening_and_refinement() (or refine_global()) unless the + * @p no_automatic_repartitioning is set in the constructor. Setting the + * flag and then calling repartition() gives the same result. * - * @note When using SolutionTransfer or manual transfer using - * register_data_attach() and notify_ready_to_unpack() you need to do - * this twice: once when calling execute_coarsening_and_refinement(), + * If you want to transfer data (using SolutionTransfer or manually with + * register_data_attach() and notify_ready_to_unpack()), you need to set + * it up twice: once when calling execute_coarsening_and_refinement(), * which will handle coarsening and refinement but obviously won't ship - * any data between processors, and a second time when calling this - * function. Here, no coarsening and refinement will be done but - * information will be packed and shipped to different processors. + * any data between processors, and a second time when calling + * repartition(). Here, no coarsening and refinement will be done but + * information will be packed and shipped to different processors. In + * other words, you probably want to treat a call to repartition() in + * the same way as execute_coarsening_and_refinement() with respect to + * dealing with data movement (SolutionTransfer, etc..). */ void repartition (); diff --git a/source/distributed/tria.cc b/source/distributed/tria.cc index 64977e198e..32d0d9d4a3 100644 --- a/source/distributed/tria.cc +++ b/source/distributed/tria.cc @@ -3455,6 +3455,16 @@ namespace parallel AssertThrow(settings & no_automatic_repartitioning, ExcMessage("You need to set no_automatic_repartition when calling repartition() manually.")); +#ifdef DEBUG + for (typename Triangulation::active_cell_iterator + cell = this->begin_active(); + cell != this->end(); ++cell) + if (cell->is_locally_owned()) + Assert ( + !cell->refine_flag_set() && !cell->coarsen_flag_set(), + ExcMessage ("Error: There shouldn't be any cells flagged for coarsening/refinement when calling repartition().")); +#endif + refinement_in_progress = true; // before repartitioning the mesh let others attach mesh related info diff --git a/tests/mpi/repartition_02.cc b/tests/mpi/repartition_02.cc index 36add57f52..dbfc980d62 100644 --- a/tests/mpi/repartition_02.cc +++ b/tests/mpi/repartition_02.cc @@ -32,13 +32,6 @@ #include -std::string id_to_string(const CellId &id) -{ - std::ostringstream ss; - ss << id; - return ss.str(); -} - template void pack_function (const typename parallel::distributed::Triangulation::cell_iterator &cell, const typename parallel::distributed::Triangulation::CellStatus status, diff --git a/tests/mpi/repartition_03.cc b/tests/mpi/repartition_03.cc new file mode 100644 index 0000000000..646e8f5348 --- /dev/null +++ b/tests/mpi/repartition_03.cc @@ -0,0 +1,105 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2009 - 2014 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// test that a second call to repartition() doesn't do anything + +#include "../tests.h" +#include "coarse_grid_common.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include + +template +void print_cells(parallel::distributed::Triangulation & tr) +{ + for (typename Triangulation::active_cell_iterator + cell = tr.begin_active(); + cell != tr.end(); ++cell) + if (cell->is_locally_owned()) + deallog << cell->id() << std::endl; +} + +template +void test() +{ + unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + + if (true) + { + parallel::distributed::Triangulation tr(MPI_COMM_WORLD, + dealii::Triangulation::none, + parallel::distributed::Triangulation::no_automatic_repartitioning); + + GridGenerator::hyper_cube(tr); + tr.refine_global(2); + + deallog << "*** 1. everything on one core:" << std::endl; + + deallog << "locally owned cells: " << tr.n_locally_owned_active_cells() + << " / " + << tr.n_global_active_cells() + << std::endl; + print_cells(tr); + + deallog << "*** 2. repartition:" << std::endl; + + tr.repartition(); + + deallog << "locally owned cells: " << tr.n_locally_owned_active_cells() + << " / " + << tr.n_global_active_cells() + << std::endl; + + print_cells(tr); + + deallog << "*** 3. repartition again (noop):" << std::endl; + tr.repartition(); + + deallog << "locally owned cells: " << tr.n_locally_owned_active_cells() + << " / " + << tr.n_global_active_cells() + << std::endl; + + print_cells(tr); + + const unsigned int checksum = tr.get_checksum (); + if (myid == 0) + deallog << "Checksum: " + << checksum + << std::endl; + } + + if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) + deallog << "OK" << std::endl; +} + + +int main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll log; + test<2>(); +} diff --git a/tests/mpi/repartition_03.mpirun=3.output b/tests/mpi/repartition_03.mpirun=3.output new file mode 100644 index 0000000000..5a0e9b0ba9 --- /dev/null +++ b/tests/mpi/repartition_03.mpirun=3.output @@ -0,0 +1,73 @@ + +DEAL:0::*** 1. everything on one core: +DEAL:0::locally owned cells: 0 / 16 +DEAL:0::*** 2. repartition: +DEAL:0::locally owned cells: 4 / 16 +DEAL:0::0_2:00 +DEAL:0::0_2:01 +DEAL:0::0_2:02 +DEAL:0::0_2:03 +DEAL:0::*** 3. repartition again (noop): +DEAL:0::locally owned cells: 4 / 16 +DEAL:0::0_2:00 +DEAL:0::0_2:01 +DEAL:0::0_2:02 +DEAL:0::0_2:03 +DEAL:0::Checksum: 3877634849 +DEAL:0::OK + +DEAL:1::*** 1. everything on one core: +DEAL:1::locally owned cells: 0 / 16 +DEAL:1::*** 2. repartition: +DEAL:1::locally owned cells: 8 / 16 +DEAL:1::0_2:10 +DEAL:1::0_2:11 +DEAL:1::0_2:12 +DEAL:1::0_2:13 +DEAL:1::0_2:20 +DEAL:1::0_2:21 +DEAL:1::0_2:22 +DEAL:1::0_2:23 +DEAL:1::*** 3. repartition again (noop): +DEAL:1::locally owned cells: 8 / 16 +DEAL:1::0_2:10 +DEAL:1::0_2:11 +DEAL:1::0_2:12 +DEAL:1::0_2:13 +DEAL:1::0_2:20 +DEAL:1::0_2:21 +DEAL:1::0_2:22 +DEAL:1::0_2:23 + + +DEAL:2::*** 1. everything on one core: +DEAL:2::locally owned cells: 16 / 16 +DEAL:2::0_2:00 +DEAL:2::0_2:01 +DEAL:2::0_2:02 +DEAL:2::0_2:03 +DEAL:2::0_2:10 +DEAL:2::0_2:11 +DEAL:2::0_2:12 +DEAL:2::0_2:13 +DEAL:2::0_2:20 +DEAL:2::0_2:21 +DEAL:2::0_2:22 +DEAL:2::0_2:23 +DEAL:2::0_2:30 +DEAL:2::0_2:31 +DEAL:2::0_2:32 +DEAL:2::0_2:33 +DEAL:2::*** 2. repartition: +DEAL:2::locally owned cells: 4 / 16 +DEAL:2::0_2:30 +DEAL:2::0_2:31 +DEAL:2::0_2:32 +DEAL:2::0_2:33 +DEAL:2::*** 3. repartition again (noop): +DEAL:2::locally owned cells: 4 / 16 +DEAL:2::0_2:30 +DEAL:2::0_2:31 +DEAL:2::0_2:32 +DEAL:2::0_2:33 + -- 2.39.5