]> https://gitweb.dealii.org/ - dealii.git/commitdiff
address comments 678/head
authorTimo Heister <timo.heister@gmail.com>
Sun, 22 Mar 2015 15:35:16 +0000 (11:35 -0400)
committerTimo Heister <timo.heister@gmail.com>
Sun, 22 Mar 2015 16:39:34 +0000 (12:39 -0400)
- update documentation
- check that no flags are set when calling repartition()
- add another test

include/deal.II/distributed/tria.h
source/distributed/tria.cc
tests/mpi/repartition_02.cc
tests/mpi/repartition_03.cc [new file with mode: 0644]
tests/mpi/repartition_03.mpirun=3.output [new file with mode: 0644]

index 4c58b2d67ed2f8875886e18f43e2d9e415f74399..4ff5a9e68cb05f18f4e0a35b2cceb7eb57ef5a56 100644 (file)
@@ -475,18 +475,22 @@ namespace parallel
       virtual void execute_coarsening_and_refinement ();
 
       /**
-       * Redistribute the active cells between processors. Normally this will
-       * be executed automatically when calling
-       * execute_coarsening_and_refinement() unless @p
-       * no_automatic_repartitioning is set in the constructor.
+       * Manually repartition the active cells between processors. Normally
+       * this repartitioning will happen automatically when calling
+       * execute_coarsening_and_refinement() (or refine_global()) unless the
+       * @p no_automatic_repartitioning is set in the constructor. Setting the
+       * flag and then calling repartition() gives the same result.
        *
-       * @note When using SolutionTransfer or manual transfer using
-       * register_data_attach() and notify_ready_to_unpack() you need to do
-       * this twice: once when calling execute_coarsening_and_refinement(),
+       * If you want to transfer data (using SolutionTransfer or manually with
+       * register_data_attach() and notify_ready_to_unpack()), you need to set
+       * it up twice: once when calling execute_coarsening_and_refinement(),
        * which will handle coarsening and refinement but obviously won't ship
-       * any data between processors, and a second time when calling this
-       * function. Here, no coarsening and refinement will be done but
-       * information will be packed and shipped to different processors.
+       * any data between processors, and a second time when calling
+       * repartition().  Here, no coarsening and refinement will be done but
+       * information will be packed and shipped to different processors. In
+       * other words, you probably want to treat a call to repartition() in
+       * the same way as execute_coarsening_and_refinement() with respect to
+       * dealing with data movement (SolutionTransfer, etc..).
        */
       void repartition ();
 
index 64977e198e5f8af211b21225244b7d9bd7f9db35..32d0d9d4a368f1e1e2c946a3357e9ec707a4896a 100644 (file)
@@ -3455,6 +3455,16 @@ namespace parallel
       AssertThrow(settings & no_automatic_repartitioning,
                   ExcMessage("You need to set no_automatic_repartition when calling repartition() manually."));
 
+#ifdef DEBUG
+      for (typename Triangulation<dim,spacedim>::active_cell_iterator
+           cell = this->begin_active();
+           cell != this->end(); ++cell)
+        if (cell->is_locally_owned())
+          Assert (
+            !cell->refine_flag_set() && !cell->coarsen_flag_set(),
+            ExcMessage ("Error: There shouldn't be any cells flagged for coarsening/refinement when calling repartition()."));
+#endif
+
       refinement_in_progress = true;
 
       // before repartitioning the mesh let others attach mesh related info
index 36add57f52c8963223b56f1d16a2c0e9d9e7b17b..dbfc980d62efd3736236f6a531d7039552020137 100644 (file)
 
 #include <fstream>
 
-std::string id_to_string(const CellId &id)
-{
-  std::ostringstream ss;
-  ss << id;
-  return ss.str();
-}
-
 template<int dim>
 void pack_function (const typename parallel::distributed::Triangulation<dim,dim>::cell_iterator &cell,
                     const typename parallel::distributed::Triangulation<dim,dim>::CellStatus status,
diff --git a/tests/mpi/repartition_03.cc b/tests/mpi/repartition_03.cc
new file mode 100644 (file)
index 0000000..646e8f5
--- /dev/null
@@ -0,0 +1,105 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2014 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// test that a second call to repartition() doesn't do anything
+
+#include "../tests.h"
+#include "coarse_grid_common.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/base/utilities.h>
+
+
+#include <fstream>
+
+template<int dim>
+void print_cells(parallel::distributed::Triangulation<dim> & tr)
+{
+  for (typename Triangulation<dim>::active_cell_iterator
+        cell = tr.begin_active();
+       cell != tr.end(); ++cell)
+    if (cell->is_locally_owned())
+      deallog << cell->id() << std::endl;
+}
+
+template<int dim>
+void test()
+{
+  unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+  if (true)
+    {
+      parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+                                                  dealii::Triangulation<dim,dim>::none,
+                                                  parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
+
+      GridGenerator::hyper_cube(tr);
+      tr.refine_global(2);
+
+      deallog << "*** 1. everything on one core:" << std::endl;
+      
+      deallog << "locally owned cells: " << tr.n_locally_owned_active_cells()
+             << " / "
+             << tr.n_global_active_cells()
+             << std::endl;
+      print_cells(tr);
+      
+      deallog << "*** 2. repartition:" << std::endl;
+
+      tr.repartition();
+      
+      deallog << "locally owned cells: " << tr.n_locally_owned_active_cells()
+             << " / "
+             << tr.n_global_active_cells()
+             << std::endl;
+
+      print_cells(tr);
+
+      deallog << "*** 3. repartition again (noop):" << std::endl;
+      tr.repartition();
+      
+      deallog << "locally owned cells: " << tr.n_locally_owned_active_cells()
+             << " / "
+             << tr.n_global_active_cells()
+             << std::endl;
+
+      print_cells(tr);
+      
+      const unsigned int checksum = tr.get_checksum ();
+      if (myid == 0)
+        deallog << "Checksum: "
+                << checksum
+                << std::endl;
+    }
+
+  if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+    deallog << "OK" << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+  MPILogInitAll log;
+  test<2>();
+}
diff --git a/tests/mpi/repartition_03.mpirun=3.output b/tests/mpi/repartition_03.mpirun=3.output
new file mode 100644 (file)
index 0000000..5a0e9b0
--- /dev/null
@@ -0,0 +1,73 @@
+
+DEAL:0::*** 1. everything on one core:
+DEAL:0::locally owned cells: 0 / 16
+DEAL:0::*** 2. repartition:
+DEAL:0::locally owned cells: 4 / 16
+DEAL:0::0_2:00
+DEAL:0::0_2:01
+DEAL:0::0_2:02
+DEAL:0::0_2:03
+DEAL:0::*** 3. repartition again (noop):
+DEAL:0::locally owned cells: 4 / 16
+DEAL:0::0_2:00
+DEAL:0::0_2:01
+DEAL:0::0_2:02
+DEAL:0::0_2:03
+DEAL:0::Checksum: 3877634849
+DEAL:0::OK
+
+DEAL:1::*** 1. everything on one core:
+DEAL:1::locally owned cells: 0 / 16
+DEAL:1::*** 2. repartition:
+DEAL:1::locally owned cells: 8 / 16
+DEAL:1::0_2:10
+DEAL:1::0_2:11
+DEAL:1::0_2:12
+DEAL:1::0_2:13
+DEAL:1::0_2:20
+DEAL:1::0_2:21
+DEAL:1::0_2:22
+DEAL:1::0_2:23
+DEAL:1::*** 3. repartition again (noop):
+DEAL:1::locally owned cells: 8 / 16
+DEAL:1::0_2:10
+DEAL:1::0_2:11
+DEAL:1::0_2:12
+DEAL:1::0_2:13
+DEAL:1::0_2:20
+DEAL:1::0_2:21
+DEAL:1::0_2:22
+DEAL:1::0_2:23
+
+
+DEAL:2::*** 1. everything on one core:
+DEAL:2::locally owned cells: 16 / 16
+DEAL:2::0_2:00
+DEAL:2::0_2:01
+DEAL:2::0_2:02
+DEAL:2::0_2:03
+DEAL:2::0_2:10
+DEAL:2::0_2:11
+DEAL:2::0_2:12
+DEAL:2::0_2:13
+DEAL:2::0_2:20
+DEAL:2::0_2:21
+DEAL:2::0_2:22
+DEAL:2::0_2:23
+DEAL:2::0_2:30
+DEAL:2::0_2:31
+DEAL:2::0_2:32
+DEAL:2::0_2:33
+DEAL:2::*** 2. repartition:
+DEAL:2::locally owned cells: 4 / 16
+DEAL:2::0_2:30
+DEAL:2::0_2:31
+DEAL:2::0_2:32
+DEAL:2::0_2:33
+DEAL:2::*** 3. repartition again (noop):
+DEAL:2::locally owned cells: 4 / 16
+DEAL:2::0_2:30
+DEAL:2::0_2:31
+DEAL:2::0_2:32
+DEAL:2::0_2:33
+

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.