virtual void execute_coarsening_and_refinement ();
/**
- * Redistribute the active cells between processors. Normally this will
- * be executed automatically when calling
- * execute_coarsening_and_refinement() unless @p
- * no_automatic_repartitioning is set in the constructor.
+ * Manually repartition the active cells between processors. Normally
+ * this repartitioning will happen automatically when calling
+ * execute_coarsening_and_refinement() (or refine_global()) unless the
+ * @p no_automatic_repartitioning is set in the constructor. Setting the
+ * flag and then calling repartition() gives the same result.
*
- * @note When using SolutionTransfer or manual transfer using
- * register_data_attach() and notify_ready_to_unpack() you need to do
- * this twice: once when calling execute_coarsening_and_refinement(),
+ * If you want to transfer data (using SolutionTransfer or manually with
+ * register_data_attach() and notify_ready_to_unpack()), you need to set
+ * it up twice: once when calling execute_coarsening_and_refinement(),
* which will handle coarsening and refinement but obviously won't ship
- * any data between processors, and a second time when calling this
- * function. Here, no coarsening and refinement will be done but
- * information will be packed and shipped to different processors.
+ * any data between processors, and a second time when calling
+ * repartition(). Here, no coarsening and refinement will be done but
+ * information will be packed and shipped to different processors. In
+ * other words, you probably want to treat a call to repartition() in
+ * the same way as execute_coarsening_and_refinement() with respect to
+ * dealing with data movement (SolutionTransfer, etc..).
*/
void repartition ();
AssertThrow(settings & no_automatic_repartitioning,
ExcMessage("You need to set no_automatic_repartition when calling repartition() manually."));
+#ifdef DEBUG
+ for (typename Triangulation<dim,spacedim>::active_cell_iterator
+ cell = this->begin_active();
+ cell != this->end(); ++cell)
+ if (cell->is_locally_owned())
+ Assert (
+ !cell->refine_flag_set() && !cell->coarsen_flag_set(),
+ ExcMessage ("Error: There shouldn't be any cells flagged for coarsening/refinement when calling repartition()."));
+#endif
+
refinement_in_progress = true;
// before repartitioning the mesh let others attach mesh related info
#include <fstream>
-std::string id_to_string(const CellId &id)
-{
- std::ostringstream ss;
- ss << id;
- return ss.str();
-}
-
template<int dim>
void pack_function (const typename parallel::distributed::Triangulation<dim,dim>::cell_iterator &cell,
const typename parallel::distributed::Triangulation<dim,dim>::CellStatus status,
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2014 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// test that a second call to repartition() doesn't do anything
+
+#include "../tests.h"
+#include "coarse_grid_common.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/base/utilities.h>
+
+
+#include <fstream>
+
+template<int dim>
+void print_cells(parallel::distributed::Triangulation<dim> & tr)
+{
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = tr.begin_active();
+ cell != tr.end(); ++cell)
+ if (cell->is_locally_owned())
+ deallog << cell->id() << std::endl;
+}
+
+template<int dim>
+void test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+ if (true)
+ {
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+ dealii::Triangulation<dim,dim>::none,
+ parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
+
+ GridGenerator::hyper_cube(tr);
+ tr.refine_global(2);
+
+ deallog << "*** 1. everything on one core:" << std::endl;
+
+ deallog << "locally owned cells: " << tr.n_locally_owned_active_cells()
+ << " / "
+ << tr.n_global_active_cells()
+ << std::endl;
+ print_cells(tr);
+
+ deallog << "*** 2. repartition:" << std::endl;
+
+ tr.repartition();
+
+ deallog << "locally owned cells: " << tr.n_locally_owned_active_cells()
+ << " / "
+ << tr.n_global_active_cells()
+ << std::endl;
+
+ print_cells(tr);
+
+ deallog << "*** 3. repartition again (noop):" << std::endl;
+ tr.repartition();
+
+ deallog << "locally owned cells: " << tr.n_locally_owned_active_cells()
+ << " / "
+ << tr.n_global_active_cells()
+ << std::endl;
+
+ print_cells(tr);
+
+ const unsigned int checksum = tr.get_checksum ();
+ if (myid == 0)
+ deallog << "Checksum: "
+ << checksum
+ << std::endl;
+ }
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll log;
+ test<2>();
+}
--- /dev/null
+
+DEAL:0::*** 1. everything on one core:
+DEAL:0::locally owned cells: 0 / 16
+DEAL:0::*** 2. repartition:
+DEAL:0::locally owned cells: 4 / 16
+DEAL:0::0_2:00
+DEAL:0::0_2:01
+DEAL:0::0_2:02
+DEAL:0::0_2:03
+DEAL:0::*** 3. repartition again (noop):
+DEAL:0::locally owned cells: 4 / 16
+DEAL:0::0_2:00
+DEAL:0::0_2:01
+DEAL:0::0_2:02
+DEAL:0::0_2:03
+DEAL:0::Checksum: 3877634849
+DEAL:0::OK
+
+DEAL:1::*** 1. everything on one core:
+DEAL:1::locally owned cells: 0 / 16
+DEAL:1::*** 2. repartition:
+DEAL:1::locally owned cells: 8 / 16
+DEAL:1::0_2:10
+DEAL:1::0_2:11
+DEAL:1::0_2:12
+DEAL:1::0_2:13
+DEAL:1::0_2:20
+DEAL:1::0_2:21
+DEAL:1::0_2:22
+DEAL:1::0_2:23
+DEAL:1::*** 3. repartition again (noop):
+DEAL:1::locally owned cells: 8 / 16
+DEAL:1::0_2:10
+DEAL:1::0_2:11
+DEAL:1::0_2:12
+DEAL:1::0_2:13
+DEAL:1::0_2:20
+DEAL:1::0_2:21
+DEAL:1::0_2:22
+DEAL:1::0_2:23
+
+
+DEAL:2::*** 1. everything on one core:
+DEAL:2::locally owned cells: 16 / 16
+DEAL:2::0_2:00
+DEAL:2::0_2:01
+DEAL:2::0_2:02
+DEAL:2::0_2:03
+DEAL:2::0_2:10
+DEAL:2::0_2:11
+DEAL:2::0_2:12
+DEAL:2::0_2:13
+DEAL:2::0_2:20
+DEAL:2::0_2:21
+DEAL:2::0_2:22
+DEAL:2::0_2:23
+DEAL:2::0_2:30
+DEAL:2::0_2:31
+DEAL:2::0_2:32
+DEAL:2::0_2:33
+DEAL:2::*** 2. repartition:
+DEAL:2::locally owned cells: 4 / 16
+DEAL:2::0_2:30
+DEAL:2::0_2:31
+DEAL:2::0_2:32
+DEAL:2::0_2:33
+DEAL:2::*** 3. repartition again (noop):
+DEAL:2::locally owned cells: 4 / 16
+DEAL:2::0_2:30
+DEAL:2::0_2:31
+DEAL:2::0_2:32
+DEAL:2::0_2:33
+