<ol>
+ <li> New: A new flag no_automatic_repartitioning in
+ parallel::distributed::Triangulation will disable the automatic
+ repartitioning when calling execute_coarsening_and_refinement() (or things
+ like refine_global(), ...), resulting in all cells staying on the processor
+ they were before. The new function repartition() will execute the
+ repartitioning as done automatically before.
+ <br>
+ (Timo Heister, 2015/03/22)
+ </li>
+
<li> Changed: All (Block)Compressed*SparsityPattern classes got
replaced by DynamicSparsityPattern and
BlockDynamicSparsityPattern, respectively and all examples now
* geometric multigrid functionality. This option requires additional
* computation and communication. Note: geometric multigrid is still a
* work in progress.
+ *
+ * @p no_automatic_repartitioning will disable automatic repartioning of
+ * the cells after a refinement cycle. It can be executed manually by
+ * calling repartition().
*/
enum Settings
{
default_setting = 0x0,
mesh_reconstruction_after_repartitioning = 0x1,
- construct_multigrid_hierarchy = 0x2
+ construct_multigrid_hierarchy = 0x2,
+ no_automatic_repartitioning = 0x4
};
*/
virtual void execute_coarsening_and_refinement ();
+ /**
+ * Redistribute the active cells between processors. Normally this will
+ * be executed automatically when calling
+ * execute_coarsening_and_refinement() unless @p
+ * no_automatic_repartitioning is set in the constructor.
+ *
+ * @note When using SolutionTransfer or manual transfer using
+ * register_data_attach() and notify_ready_to_unpack() you need to do
+ * this twice: once when calling execute_coarsening_and_refinement(),
+ * which will handle coarsening and refinement but obviously won't ship
+ * any data between processors, and a second time when calling this
+ * function. Here, no coarsening and refinement will be done but
+ * information will be packed and shipped to different processors.
+ */
+ void repartition ();
+
/**
* When vertices have been moved locally, for example using code like
* @code
attach_mesh_data();
// partition the new mesh between all processors
- dealii::internal::p4est::functions<dim>::
- partition (parallel_forest,
- /* prepare coarsening */ 1,
- /* weight_callback */ NULL);
+ if (!(settings & no_automatic_repartitioning))
+ dealii::internal::p4est::functions<dim>::
+ partition (parallel_forest,
+ /* prepare coarsening */ 1,
+ /* weight_callback */ NULL);
// finally copy back from local part of tree to deal.II
// triangulation. before doing so, make sure there are no refine or
update_number_cache ();
}
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::repartition ()
+ {
+ AssertThrow(settings & no_automatic_repartitioning,
+ ExcMessage("You need to set no_automatic_repartition when calling repartition() manually."));
+
+ refinement_in_progress = true;
+
+ // before repartitioning the mesh let others attach mesh related info
+ // (such as SolutionTransfer data) to the p4est
+ attach_mesh_data();
+
+ dealii::internal::p4est::functions<dim>::
+ partition (parallel_forest,
+ /* prepare coarsening */ 1,
+ /* weight_callback */ NULL);
+
+ try
+ {
+ copy_local_forest_to_triangulation ();
+ }
+ catch (const typename Triangulation<dim>::DistortedCellList &)
+ {
+ // the underlying triangulation should not be checking for distorted
+ // cells
+ AssertThrow (false, ExcInternalError());
+ }
+
+ refinement_in_progress = false;
+
+ update_number_cache ();
+ }
template <int dim, int spacedim>
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2014 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// test manual repartitioning
+
+#include "../tests.h"
+#include "coarse_grid_common.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/base/utilities.h>
+
+
+#include <fstream>
+
+
+template<int dim>
+void test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+ if (true)
+ {
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+ dealii::Triangulation<dim,dim>::none,
+ parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
+
+ GridGenerator::hyper_cube(tr);
+ tr.refine_global(2);
+
+ deallog << "locally owned cells: " << tr.n_locally_owned_active_cells()
+ << " / "
+ << tr.n_global_active_cells()
+ << std::endl;
+
+ //tr.write_mesh_vtk("a");
+
+ tr.repartition();
+
+ //tr.write_mesh_vtk("b");
+
+ deallog << "locally owned cells: " << tr.n_locally_owned_active_cells()
+ << " / "
+ << tr.n_global_active_cells()
+ << std::endl;
+
+ const unsigned int checksum = tr.get_checksum ();
+ if (myid == 0)
+ deallog << "Checksum: "
+ << checksum
+ << std::endl;
+ }
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll log;
+ test<2>();
+}
--- /dev/null
+
+DEAL:0::locally owned cells: 0 / 16
+DEAL:0::locally owned cells: 4 / 16
+DEAL:0::Checksum: 3877634849
+DEAL:0::OK
+
+DEAL:1::locally owned cells: 0 / 16
+DEAL:1::locally owned cells: 8 / 16
+
+
+DEAL:2::locally owned cells: 16 / 16
+DEAL:2::locally owned cells: 4 / 16
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2014 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// test manual repartitioning and transfering data
+
+#include "../tests.h"
+#include "coarse_grid_common.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/base/utilities.h>
+
+
+#include <fstream>
+
+std::string id_to_string(const CellId &id)
+{
+ std::ostringstream ss;
+ ss << id;
+ return ss.str();
+}
+
+template<int dim>
+void pack_function (const typename parallel::distributed::Triangulation<dim,dim>::cell_iterator &cell,
+ const typename parallel::distributed::Triangulation<dim,dim>::CellStatus status,
+ void *data)
+{
+ static int some_number = cell->index();
+ deallog << "packing cell " << cell->id() << " with data=" << some_number << " status=";
+ if (status==parallel::distributed::Triangulation<dim,dim>::CELL_PERSIST)
+ deallog << "PERSIST";
+ else if (status==parallel::distributed::Triangulation<dim,dim>::CELL_REFINE)
+ deallog << "REFINE";
+ else if (status==parallel::distributed::Triangulation<dim,dim>::CELL_COARSEN)
+ deallog << "COARSEN";
+ deallog << std::endl;
+
+ if (status==parallel::distributed::Triangulation<dim,dim>::CELL_COARSEN)
+ {
+ Assert(cell->has_children(), ExcInternalError());
+ }
+ else
+ {
+ Assert(!cell->has_children(), ExcInternalError());
+ }
+
+ int * intdata = reinterpret_cast<int*>(data);
+ *intdata = some_number;
+
+ ++some_number;
+}
+
+template<int dim>
+void unpack_function (const typename parallel::distributed::Triangulation<dim,dim>::cell_iterator &cell,
+ const typename parallel::distributed::Triangulation<dim,dim>::CellStatus status,
+ const void *data)
+{
+ const int * intdata = reinterpret_cast<const int*>(data);
+
+ deallog << "unpacking cell " << cell->id() << " with data=" << (*intdata) << " status=";
+ if (status==parallel::distributed::Triangulation<dim,dim>::CELL_PERSIST)
+ deallog << "PERSIST";
+ else if (status==parallel::distributed::Triangulation<dim,dim>::CELL_REFINE)
+ deallog << "REFINE";
+ else if (status==parallel::distributed::Triangulation<dim,dim>::CELL_COARSEN)
+ deallog << "COARSEN";
+ deallog << std::endl;
+
+ if (status==parallel::distributed::Triangulation<dim,dim>::CELL_REFINE)
+ {
+ Assert(cell->has_children(), ExcInternalError());
+ }
+ else
+ {
+ Assert(!cell->has_children(), ExcInternalError());
+ }
+}
+
+template<int dim>
+void test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+ if (true)
+ {
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+ dealii::Triangulation<dim,dim>::none,
+ parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
+
+ GridGenerator::hyper_cube(tr);
+ tr.refine_global(1);
+
+ deallog << "locally owned cells: " << tr.n_locally_owned_active_cells()
+ << " / "
+ << tr.n_global_active_cells()
+ << std::endl;
+
+ deallog << "* global refine:" << std::endl;
+
+ unsigned int offset = tr.register_data_attach(sizeof(int), pack_function<dim>);
+
+ tr.refine_global(1);
+
+ deallog << "locally owned cells: " << tr.n_locally_owned_active_cells()
+ << " / "
+ << tr.n_global_active_cells()
+ << std::endl;
+
+ tr.notify_ready_to_unpack(offset, unpack_function<dim>);
+
+ //tr.write_mesh_vtk("a");
+
+
+
+
+ deallog << "* repartition:" << std::endl;
+
+ offset = tr.register_data_attach(sizeof(int), pack_function<dim>);
+
+ tr.repartition();
+
+ //tr.write_mesh_vtk("b");
+
+ deallog << "locally owned cells: " << tr.n_locally_owned_active_cells()
+ << " / "
+ << tr.n_global_active_cells()
+ << std::endl;
+
+ tr.notify_ready_to_unpack(offset, unpack_function<dim>);
+
+ const unsigned int checksum = tr.get_checksum ();
+ if (myid == 0)
+ deallog << "Checksum: "
+ << checksum
+ << std::endl;
+ }
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll log;
+ test<2>();
+}
--- /dev/null
+
+DEAL:0::locally owned cells: 0 / 4
+DEAL:0::* global refine:
+DEAL:0::locally owned cells: 0 / 16
+DEAL:0::* repartition:
+DEAL:0::locally owned cells: 4 / 16
+DEAL:0::unpacking cell 0_2:00 with data=4 status=PERSIST
+DEAL:0::unpacking cell 0_2:01 with data=5 status=PERSIST
+DEAL:0::unpacking cell 0_2:02 with data=6 status=PERSIST
+DEAL:0::unpacking cell 0_2:03 with data=7 status=PERSIST
+DEAL:0::Checksum: 3877634849
+DEAL:0::OK
+
+DEAL:1::locally owned cells: 0 / 4
+DEAL:1::* global refine:
+DEAL:1::locally owned cells: 0 / 16
+DEAL:1::* repartition:
+DEAL:1::locally owned cells: 8 / 16
+DEAL:1::unpacking cell 0_2:10 with data=8 status=PERSIST
+DEAL:1::unpacking cell 0_2:11 with data=9 status=PERSIST
+DEAL:1::unpacking cell 0_2:12 with data=10 status=PERSIST
+DEAL:1::unpacking cell 0_2:13 with data=11 status=PERSIST
+DEAL:1::unpacking cell 0_2:20 with data=12 status=PERSIST
+DEAL:1::unpacking cell 0_2:21 with data=13 status=PERSIST
+DEAL:1::unpacking cell 0_2:22 with data=14 status=PERSIST
+DEAL:1::unpacking cell 0_2:23 with data=15 status=PERSIST
+
+
+DEAL:2::locally owned cells: 4 / 4
+DEAL:2::* global refine:
+DEAL:2::packing cell 0_1:0 with data=0 status=REFINE
+DEAL:2::packing cell 0_1:1 with data=1 status=REFINE
+DEAL:2::packing cell 0_1:2 with data=2 status=REFINE
+DEAL:2::packing cell 0_1:3 with data=3 status=REFINE
+DEAL:2::locally owned cells: 16 / 16
+DEAL:2::unpacking cell 0_1:0 with data=0 status=REFINE
+DEAL:2::unpacking cell 0_1:1 with data=1 status=REFINE
+DEAL:2::unpacking cell 0_1:2 with data=2 status=REFINE
+DEAL:2::unpacking cell 0_1:3 with data=3 status=REFINE
+DEAL:2::* repartition:
+DEAL:2::packing cell 0_2:00 with data=4 status=PERSIST
+DEAL:2::packing cell 0_2:01 with data=5 status=PERSIST
+DEAL:2::packing cell 0_2:02 with data=6 status=PERSIST
+DEAL:2::packing cell 0_2:03 with data=7 status=PERSIST
+DEAL:2::packing cell 0_2:10 with data=8 status=PERSIST
+DEAL:2::packing cell 0_2:11 with data=9 status=PERSIST
+DEAL:2::packing cell 0_2:12 with data=10 status=PERSIST
+DEAL:2::packing cell 0_2:13 with data=11 status=PERSIST
+DEAL:2::packing cell 0_2:20 with data=12 status=PERSIST
+DEAL:2::packing cell 0_2:21 with data=13 status=PERSIST
+DEAL:2::packing cell 0_2:22 with data=14 status=PERSIST
+DEAL:2::packing cell 0_2:23 with data=15 status=PERSIST
+DEAL:2::packing cell 0_2:30 with data=16 status=PERSIST
+DEAL:2::packing cell 0_2:31 with data=17 status=PERSIST
+DEAL:2::packing cell 0_2:32 with data=18 status=PERSIST
+DEAL:2::packing cell 0_2:33 with data=19 status=PERSIST
+DEAL:2::locally owned cells: 4 / 16
+DEAL:2::unpacking cell 0_2:30 with data=16 status=PERSIST
+DEAL:2::unpacking cell 0_2:31 with data=17 status=PERSIST
+DEAL:2::unpacking cell 0_2:32 with data=18 status=PERSIST
+DEAL:2::unpacking cell 0_2:33 with data=19 status=PERSIST
+