]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Add tests for hp::DoFHandler on shared::Triangulation.
authorWolfgang Bangerth <bangerth@colostate.edu>
Fri, 7 Jul 2017 02:17:43 +0000 (20:17 -0600)
committerWolfgang Bangerth <bangerth@colostate.edu>
Sun, 9 Jul 2017 16:04:06 +0000 (10:04 -0600)
These are simply all tests in that directory with DoFHandler changed to
hp::DoFHandler.

12 files changed:
tests/sharedtria/hp_dof_01.cc [new file with mode: 0644]
tests/sharedtria/hp_dof_01.with_metis=true.mpirun=3.output [new file with mode: 0644]
tests/sharedtria/hp_dof_02.cc [new file with mode: 0644]
tests/sharedtria/hp_dof_02.with_metis=true.mpirun=3.output [new file with mode: 0644]
tests/sharedtria/hp_dof_03.cc [new file with mode: 0644]
tests/sharedtria/hp_dof_03.with_metis=true.mpirun=3.output [new file with mode: 0644]
tests/sharedtria/hp_dof_04.cc [new file with mode: 0644]
tests/sharedtria/hp_dof_04.with_metis=true.mpirun=3.output [new file with mode: 0644]
tests/sharedtria/hp_no_cells_01.cc [new file with mode: 0644]
tests/sharedtria/hp_no_cells_01.with_metis=true.mpirun=3.output [new file with mode: 0644]
tests/sharedtria/hp_no_cells_01.with_metis=true.mpirun=5.output [new file with mode: 0644]
tests/sharedtria/hp_no_cells_01.with_metis=true.mpirun=9.output [new file with mode: 0644]

diff --git a/tests/sharedtria/hp_dof_01.cc b/tests/sharedtria/hp_dof_01.cc
new file mode 100644 (file)
index 0000000..8f0663f
--- /dev/null
@@ -0,0 +1,153 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// check number cache for shared_tria
+//
+// this test is just like the one without hp_ but uses an
+// hp::DoFHandler instead of a regular DoFHandler (but with only one
+// element). the output should be, and is, the same
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/intergrid_map.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/hp/dof_handler.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_dgq.h>
+
+#include <fstream>
+#include <cstdlib>
+#include <numeric>
+
+
+template <int dim>
+void test()
+{
+  parallel::shared::Triangulation<dim>
+  triangulation (MPI_COMM_WORLD);
+
+  hp::FECollection<dim> fe;
+  fe.push_back (FESystem<dim> (FE_Q<dim>(3),2,
+                               FE_DGQ<dim>(1),1));
+
+  hp::DoFHandler<dim> dof_handler (triangulation);
+
+  GridGenerator::hyper_cube(triangulation);
+  triangulation.refine_global (2);
+
+  const unsigned int n_refinements[] = { 0, 4, 3, 2 };
+  for (unsigned int i=0; i<n_refinements[dim]; ++i)
+    {
+      // refine one-fifth of cells randomly
+      std::vector<bool> flags (triangulation.n_active_cells(), false);
+      for (unsigned int k=0; k<flags.size()/5 + 1; ++k)
+        flags[Testing::rand() % flags.size()] = true;
+      // make sure there's at least one that
+      // will be refined
+      flags[0] = true;
+
+      // refine triangulation
+      unsigned int index=0;
+      for (typename Triangulation<dim>::active_cell_iterator
+           cell = triangulation.begin_active();
+           cell != triangulation.end(); ++cell)
+        {
+          if (flags[index])
+            cell->set_refine_flag();
+          ++index;
+        }
+
+      Assert (index <= triangulation.n_active_cells(), ExcInternalError());
+
+      // flag all other cells for coarsening
+      // (this should ensure that at least
+      // some of them will actually be
+      // coarsened)
+      index=0;
+      for (typename Triangulation<dim>::active_cell_iterator
+           cell = triangulation.begin_active();
+           cell != triangulation.end(); ++cell)
+        {
+          if (!flags[index])
+            cell->set_coarsen_flag();
+          ++index;
+        }
+
+      triangulation.execute_coarsening_and_refinement ();
+      dof_handler.distribute_dofs (fe);
+
+      // avoid outputing any partitioning info until Parmetis gives reproducable results
+      deallog
+          << "n_dofs: " << dof_handler.n_dofs() << std::endl;
+//          << "n_locally_owned_dofs: " << dof_handler.n_locally_owned_dofs() << std::endl;
+
+//      deallog << "n_locally_owned_dofs_per_processor: ";
+//      std::vector<types::global_dof_index> v = dof_handler.n_locally_owned_dofs_per_processor();
+//      unsigned int sum = 0;
+//      for (unsigned int i=0; i<v.size(); ++i)
+//        {
+//          deallog << v[i] << " ";
+//          sum += v[i];
+//        }
+//      deallog << " sum: " << sum << std::endl;
+
+      Assert(dof_handler.n_locally_owned_dofs() == dof_handler.n_locally_owned_dofs_per_processor()[triangulation.locally_owned_subdomain()], ExcInternalError());
+      Assert( dof_handler.n_locally_owned_dofs() == dof_handler.locally_owned_dofs().n_elements(), ExcInternalError());
+
+      const unsigned int N = dof_handler.n_dofs();
+
+      Assert (dof_handler.n_locally_owned_dofs() <= N,
+              ExcInternalError());
+      Assert (std::accumulate (dof_handler.n_locally_owned_dofs_per_processor().begin(),
+                               dof_handler.n_locally_owned_dofs_per_processor().end(),
+                               0U) == N,
+              ExcInternalError());
+
+      IndexSet all (N);
+      for (unsigned int i=0;
+           i<dof_handler.locally_owned_dofs_per_processor().size(); ++i)
+        {
+          IndexSet intersect = all & dof_handler.locally_owned_dofs_per_processor()[i];
+          Assert(intersect.n_elements()==0, ExcInternalError());
+          all.add_indices(dof_handler.locally_owned_dofs_per_processor()[i]);
+        }
+
+      Assert(all == complete_index_set(N), ExcInternalError());
+    }
+}
+
+
+int main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+  MPILogInitAll all;
+
+  deallog.push("2d");
+  test<2>();
+  deallog.pop();
+
+  deallog.push("3d");
+  test<3>();
+  deallog.pop();
+}
diff --git a/tests/sharedtria/hp_dof_01.with_metis=true.mpirun=3.output b/tests/sharedtria/hp_dof_01.with_metis=true.mpirun=3.output
new file mode 100644 (file)
index 0000000..ae56d84
--- /dev/null
@@ -0,0 +1,20 @@
+
+DEAL:0:2d::n_dofs: 818
+DEAL:0:2d::n_dofs: 1754
+DEAL:0:2d::n_dofs: 3056
+DEAL:0:3d::n_dofs: 13282
+DEAL:0:3d::n_dofs: 41826
+
+DEAL:1:2d::n_dofs: 818
+DEAL:1:2d::n_dofs: 1754
+DEAL:1:2d::n_dofs: 3056
+DEAL:1:3d::n_dofs: 13282
+DEAL:1:3d::n_dofs: 41826
+
+
+DEAL:2:2d::n_dofs: 818
+DEAL:2:2d::n_dofs: 1754
+DEAL:2:2d::n_dofs: 3056
+DEAL:2:3d::n_dofs: 13282
+DEAL:2:3d::n_dofs: 41826
+
diff --git a/tests/sharedtria/hp_dof_02.cc b/tests/sharedtria/hp_dof_02.cc
new file mode 100644 (file)
index 0000000..86a2921
--- /dev/null
@@ -0,0 +1,153 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// check number cache for shared_tria with renumbering
+//
+// this test is just like the one without hp_ but uses an
+// hp::DoFHandler instead of a regular DoFHandler (but with only one
+// element). the output should be, and is, the same
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/dofs/dof_renumbering.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/intergrid_map.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/hp/dof_handler.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_dgq.h>
+
+#include <fstream>
+#include <cstdlib>
+#include <numeric>
+
+
+template <int dim>
+void test()
+{
+  parallel::shared::Triangulation<dim>
+  triangulation (MPI_COMM_WORLD);
+
+  hp::FECollection<dim> fe;
+  fe.push_back (FESystem<dim> (FE_Q<dim>(3),2,
+                               FE_DGQ<dim>(1),1));
+
+  hp::DoFHandler<dim> dof_handler (triangulation);
+
+  GridGenerator::hyper_cube(triangulation);
+  triangulation.refine_global (2);
+
+  const unsigned int n_refinements[] = { 0, 4, 3, 2 };
+  for (unsigned int i=0; i<n_refinements[dim]; ++i)
+    {
+      // refine one-fifth of cells randomly
+      std::vector<bool> flags (triangulation.n_active_cells(), false);
+      for (unsigned int k=0; k<flags.size()/5 + 1; ++k)
+        flags[Testing::rand() % flags.size()] = true;
+      // make sure there's at least one that
+      // will be refined
+      flags[0] = true;
+
+      // refine triangulation
+      unsigned int index=0;
+      for (typename Triangulation<dim>::active_cell_iterator
+           cell = triangulation.begin_active();
+           cell != triangulation.end(); ++cell)
+        {
+          if (flags[index])
+            cell->set_refine_flag();
+          ++index;
+        }
+
+      Assert (index <= triangulation.n_active_cells(), ExcInternalError());
+
+      // flag all other cells for coarsening
+      // (this should ensure that at least
+      // some of them will actually be
+      // coarsened)
+      index=0;
+      for (typename Triangulation<dim>::active_cell_iterator
+           cell = triangulation.begin_active();
+           cell != triangulation.end(); ++cell)
+        {
+          if (!flags[index])
+            cell->set_coarsen_flag();
+          ++index;
+        }
+
+      triangulation.execute_coarsening_and_refinement ();
+      dof_handler.distribute_dofs (fe);
+      DoFRenumbering::component_wise(dof_handler);
+
+      deallog
+          << "n_dofs: " << dof_handler.n_dofs() << std::endl;
+//          << "n_locally_owned_dofs: " << dof_handler.n_locally_owned_dofs() << std::endl;
+//
+//      deallog << "n_locally_owned_dofs_per_processor: ";
+//      std::vector<types::global_dof_index> v = dof_handler.n_locally_owned_dofs_per_processor();
+//      unsigned int sum = 0;
+//      for (unsigned int i=0; i<v.size(); ++i)
+//        {
+//          deallog << v[i] << " ";
+//          sum += v[i];
+//        }
+//      deallog << " sum: " << sum << std::endl;
+
+      Assert(dof_handler.n_locally_owned_dofs() == dof_handler.n_locally_owned_dofs_per_processor()[triangulation.locally_owned_subdomain()], ExcInternalError());
+      Assert( dof_handler.n_locally_owned_dofs() == dof_handler.locally_owned_dofs().n_elements(), ExcInternalError());
+
+      const unsigned int N = dof_handler.n_dofs();
+
+      Assert (dof_handler.n_locally_owned_dofs() <= N,
+              ExcInternalError());
+      Assert (std::accumulate (dof_handler.n_locally_owned_dofs_per_processor().begin(),
+                               dof_handler.n_locally_owned_dofs_per_processor().end(),
+                               0U) == N,
+              ExcInternalError());
+
+      IndexSet all (N);
+      for (unsigned int i=0;
+           i<dof_handler.locally_owned_dofs_per_processor().size(); ++i)
+        {
+          IndexSet intersect = all & dof_handler.locally_owned_dofs_per_processor()[i];
+          Assert(intersect.n_elements()==0, ExcInternalError());
+          all.add_indices(dof_handler.locally_owned_dofs_per_processor()[i]);
+        }
+      Assert(all == complete_index_set(N), ExcInternalError());
+    }
+}
+
+
+int main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+  MPILogInitAll all;
+
+  deallog.push("2d");
+  test<2>();
+  deallog.pop();
+
+  deallog.push("3d");
+  test<3>();
+  deallog.pop();
+}
diff --git a/tests/sharedtria/hp_dof_02.with_metis=true.mpirun=3.output b/tests/sharedtria/hp_dof_02.with_metis=true.mpirun=3.output
new file mode 100644 (file)
index 0000000..ae56d84
--- /dev/null
@@ -0,0 +1,20 @@
+
+DEAL:0:2d::n_dofs: 818
+DEAL:0:2d::n_dofs: 1754
+DEAL:0:2d::n_dofs: 3056
+DEAL:0:3d::n_dofs: 13282
+DEAL:0:3d::n_dofs: 41826
+
+DEAL:1:2d::n_dofs: 818
+DEAL:1:2d::n_dofs: 1754
+DEAL:1:2d::n_dofs: 3056
+DEAL:1:3d::n_dofs: 13282
+DEAL:1:3d::n_dofs: 41826
+
+
+DEAL:2:2d::n_dofs: 818
+DEAL:2:2d::n_dofs: 1754
+DEAL:2:2d::n_dofs: 3056
+DEAL:2:3d::n_dofs: 13282
+DEAL:2:3d::n_dofs: 41826
+
diff --git a/tests/sharedtria/hp_dof_03.cc b/tests/sharedtria/hp_dof_03.cc
new file mode 100644 (file)
index 0000000..c61a082
--- /dev/null
@@ -0,0 +1,154 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// check number cache for shared_tria with artifical cells
+//
+// this test is just like the one without hp_ but uses an
+// hp::DoFHandler instead of a regular DoFHandler (but with only one
+// element). the output should be, and is, the same
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/intergrid_map.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/hp/dof_handler.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_dgq.h>
+
+#include <fstream>
+#include <cstdlib>
+#include <numeric>
+
+
+template <int dim>
+void test()
+{
+  parallel::shared::Triangulation<dim>
+  triangulation (MPI_COMM_WORLD,
+                 Triangulation<dim>::none,
+                 /*artificial*/true);
+
+  hp::FECollection<dim> fe;
+  fe.push_back(FESystem<dim> (FE_Q<dim>(3),2,
+                              FE_DGQ<dim>(1),1));
+
+  hp::DoFHandler<dim> dof_handler (triangulation);
+
+  GridGenerator::hyper_cube(triangulation);
+  triangulation.refine_global (2);
+
+  const unsigned int n_refinements[] = { 0, 4, 3, 2 };
+  for (unsigned int i=0; i<n_refinements[dim]; ++i)
+    {
+      // refine one-fifth of cells randomly
+      std::vector<bool> flags (triangulation.n_active_cells(), false);
+      for (unsigned int k=0; k<flags.size()/5 + 1; ++k)
+        flags[Testing::rand() % flags.size()] = true;
+      // make sure there's at least one that
+      // will be refined
+      flags[0] = true;
+
+      // refine triangulation
+      unsigned int index=0;
+      for (typename Triangulation<dim>::active_cell_iterator
+           cell = triangulation.begin_active();
+           cell != triangulation.end(); ++cell)
+        {
+          if (flags[index])
+            cell->set_refine_flag();
+          ++index;
+        }
+
+      Assert (index <= triangulation.n_active_cells(), ExcInternalError());
+
+      // flag all other cells for coarsening
+      // (this should ensure that at least
+      // some of them will actually be
+      // coarsened)
+      index=0;
+      for (typename Triangulation<dim>::active_cell_iterator
+           cell = triangulation.begin_active();
+           cell != triangulation.end(); ++cell)
+        {
+          if (!flags[index])
+            cell->set_coarsen_flag();
+          ++index;
+        }
+
+      triangulation.execute_coarsening_and_refinement ();
+      dof_handler.distribute_dofs (fe);
+
+      deallog
+          << "n_dofs: " << dof_handler.n_dofs() << std::endl;
+//          << "n_locally_owned_dofs: " << dof_handler.n_locally_owned_dofs() << std::endl;
+//
+//      deallog << "n_locally_owned_dofs_per_processor: ";
+//      std::vector<types::global_dof_index> v = dof_handler.n_locally_owned_dofs_per_processor();
+//      unsigned int sum = 0;
+//      for (unsigned int i=0; i<v.size(); ++i)
+//        {
+//          deallog << v[i] << " ";
+//          sum += v[i];
+//        }
+//      deallog << " sum: " << sum << std::endl;
+
+      Assert(dof_handler.n_locally_owned_dofs() == dof_handler.n_locally_owned_dofs_per_processor()[triangulation.locally_owned_subdomain()], ExcInternalError());
+      Assert( dof_handler.n_locally_owned_dofs() == dof_handler.locally_owned_dofs().n_elements(), ExcInternalError());
+
+      const unsigned int N = dof_handler.n_dofs();
+
+      Assert (dof_handler.n_locally_owned_dofs() <= N,
+              ExcInternalError());
+      Assert (std::accumulate (dof_handler.n_locally_owned_dofs_per_processor().begin(),
+                               dof_handler.n_locally_owned_dofs_per_processor().end(),
+                               0U) == N,
+              ExcInternalError());
+
+      IndexSet all (N);
+      for (unsigned int i=0;
+           i<dof_handler.locally_owned_dofs_per_processor().size(); ++i)
+        {
+          IndexSet intersect = all & dof_handler.locally_owned_dofs_per_processor()[i];
+          Assert(intersect.n_elements()==0, ExcInternalError());
+          all.add_indices(dof_handler.locally_owned_dofs_per_processor()[i]);
+        }
+
+      Assert(all == complete_index_set(N), ExcInternalError());
+    }
+}
+
+
+int main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+  MPILogInitAll all;
+
+  deallog.push("2d");
+  test<2>();
+  deallog.pop();
+
+  deallog.push("3d");
+  test<3>();
+  deallog.pop();
+}
diff --git a/tests/sharedtria/hp_dof_03.with_metis=true.mpirun=3.output b/tests/sharedtria/hp_dof_03.with_metis=true.mpirun=3.output
new file mode 100644 (file)
index 0000000..ae56d84
--- /dev/null
@@ -0,0 +1,20 @@
+
+DEAL:0:2d::n_dofs: 818
+DEAL:0:2d::n_dofs: 1754
+DEAL:0:2d::n_dofs: 3056
+DEAL:0:3d::n_dofs: 13282
+DEAL:0:3d::n_dofs: 41826
+
+DEAL:1:2d::n_dofs: 818
+DEAL:1:2d::n_dofs: 1754
+DEAL:1:2d::n_dofs: 3056
+DEAL:1:3d::n_dofs: 13282
+DEAL:1:3d::n_dofs: 41826
+
+
+DEAL:2:2d::n_dofs: 818
+DEAL:2:2d::n_dofs: 1754
+DEAL:2:2d::n_dofs: 3056
+DEAL:2:3d::n_dofs: 13282
+DEAL:2:3d::n_dofs: 41826
+
diff --git a/tests/sharedtria/hp_dof_04.cc b/tests/sharedtria/hp_dof_04.cc
new file mode 100644 (file)
index 0000000..5c080ad
--- /dev/null
@@ -0,0 +1,156 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// check number cache for shared_tria with artifical cells and renumbering
+//
+// this test is just like the one without hp_ but uses an
+// hp::DoFHandler instead of a regular DoFHandler (but with only one
+// element). the output should be, and is, the same
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/dofs/dof_renumbering.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/intergrid_map.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/hp/dof_handler.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_dgq.h>
+
+#include <fstream>
+#include <cstdlib>
+#include <numeric>
+
+
+template <int dim>
+void test()
+{
+  parallel::shared::Triangulation<dim>
+  triangulation (MPI_COMM_WORLD,
+                 Triangulation<dim>::none,
+                 /*artificial*/true);
+
+  hp::FECollection<dim> fe;
+  fe.push_back (FESystem<dim> (FE_Q<dim>(3),2,
+                               FE_DGQ<dim>(1),1));
+
+  hp::DoFHandler<dim> dof_handler (triangulation);
+
+  GridGenerator::hyper_cube(triangulation);
+  triangulation.refine_global (2);
+
+  const unsigned int n_refinements[] = { 0, 4, 3, 2 };
+  for (unsigned int i=0; i<n_refinements[dim]; ++i)
+    {
+      // refine one-fifth of cells randomly
+      std::vector<bool> flags (triangulation.n_active_cells(), false);
+      for (unsigned int k=0; k<flags.size()/5 + 1; ++k)
+        flags[Testing::rand() % flags.size()] = true;
+      // make sure there's at least one that
+      // will be refined
+      flags[0] = true;
+
+      // refine triangulation
+      unsigned int index=0;
+      for (typename Triangulation<dim>::active_cell_iterator
+           cell = triangulation.begin_active();
+           cell != triangulation.end(); ++cell)
+        {
+          if (flags[index])
+            cell->set_refine_flag();
+          ++index;
+        }
+
+      Assert (index <= triangulation.n_active_cells(), ExcInternalError());
+
+      // flag all other cells for coarsening
+      // (this should ensure that at least
+      // some of them will actually be
+      // coarsened)
+      index=0;
+      for (typename Triangulation<dim>::active_cell_iterator
+           cell = triangulation.begin_active();
+           cell != triangulation.end(); ++cell)
+        {
+          if (!flags[index])
+            cell->set_coarsen_flag();
+          ++index;
+        }
+
+      triangulation.execute_coarsening_and_refinement ();
+      dof_handler.distribute_dofs (fe);
+      DoFRenumbering::component_wise(dof_handler);
+
+      deallog
+          << "n_dofs: " << dof_handler.n_dofs() << std::endl;
+//          << "n_locally_owned_dofs: " << dof_handler.n_locally_owned_dofs() << std::endl;
+//
+//      deallog << "n_locally_owned_dofs_per_processor: ";
+//      std::vector<types::global_dof_index> v = dof_handler.n_locally_owned_dofs_per_processor();
+//      unsigned int sum = 0;
+//      for (unsigned int i=0; i<v.size(); ++i)
+//        {
+//          deallog << v[i] << " ";
+//          sum += v[i];
+//        }
+//      deallog << " sum: " << sum << std::endl;
+
+      Assert(dof_handler.n_locally_owned_dofs() == dof_handler.n_locally_owned_dofs_per_processor()[triangulation.locally_owned_subdomain()], ExcInternalError());
+      Assert( dof_handler.n_locally_owned_dofs() == dof_handler.locally_owned_dofs().n_elements(), ExcInternalError());
+
+      const unsigned int N = dof_handler.n_dofs();
+
+      Assert (dof_handler.n_locally_owned_dofs() <= N,
+              ExcInternalError());
+      Assert (std::accumulate (dof_handler.n_locally_owned_dofs_per_processor().begin(),
+                               dof_handler.n_locally_owned_dofs_per_processor().end(),
+                               0U) == N,
+              ExcInternalError());
+
+      IndexSet all (N);
+      for (unsigned int i=0;
+           i<dof_handler.locally_owned_dofs_per_processor().size(); ++i)
+        {
+          IndexSet intersect = all & dof_handler.locally_owned_dofs_per_processor()[i];
+          Assert(intersect.n_elements()==0, ExcInternalError());
+          all.add_indices(dof_handler.locally_owned_dofs_per_processor()[i]);
+        }
+
+      Assert(all == complete_index_set(N), ExcInternalError());
+    }
+}
+
+
+int main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+  MPILogInitAll all;
+
+  deallog.push("2d");
+  test<2>();
+  deallog.pop();
+
+  deallog.push("3d");
+  test<3>();
+  deallog.pop();
+}
diff --git a/tests/sharedtria/hp_dof_04.with_metis=true.mpirun=3.output b/tests/sharedtria/hp_dof_04.with_metis=true.mpirun=3.output
new file mode 100644 (file)
index 0000000..ae56d84
--- /dev/null
@@ -0,0 +1,20 @@
+
+DEAL:0:2d::n_dofs: 818
+DEAL:0:2d::n_dofs: 1754
+DEAL:0:2d::n_dofs: 3056
+DEAL:0:3d::n_dofs: 13282
+DEAL:0:3d::n_dofs: 41826
+
+DEAL:1:2d::n_dofs: 818
+DEAL:1:2d::n_dofs: 1754
+DEAL:1:2d::n_dofs: 3056
+DEAL:1:3d::n_dofs: 13282
+DEAL:1:3d::n_dofs: 41826
+
+
+DEAL:2:2d::n_dofs: 818
+DEAL:2:2d::n_dofs: 1754
+DEAL:2:2d::n_dofs: 3056
+DEAL:2:3d::n_dofs: 13282
+DEAL:2:3d::n_dofs: 41826
+
diff --git a/tests/sharedtria/hp_no_cells_01.cc b/tests/sharedtria/hp_no_cells_01.cc
new file mode 100644 (file)
index 0000000..47acb6c
--- /dev/null
@@ -0,0 +1,130 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// check that everything is ok when we have a triangulation that has
+// fewer cells than there are processors
+//
+// this test is run with sufficiently many processors so that there
+// are idle processors in 1d, 2d, and 3d
+//
+// this test is just like the one without hp_ but uses an
+// hp::DoFHandler instead of a regular DoFHandler (but with only one
+// element). the output should be, and is, the same
+
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/intergrid_map.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/hp/dof_handler.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_dgq.h>
+
+#include <fstream>
+#include <cstdlib>
+#include <numeric>
+
+
+template <int dim>
+void test()
+{
+  parallel::shared::Triangulation<dim>
+  triangulation (MPI_COMM_WORLD);
+
+  GridGenerator::hyper_cube(triangulation);
+  triangulation.refine_global (1);
+
+  // partition the triangulation by hand
+  for (auto cell : triangulation.active_cell_iterators())
+    cell->set_subdomain_id (cell->active_cell_index() %
+                            Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD));
+
+
+  hp::FECollection<dim> fe;
+  fe.push_back(FE_Q<dim>(1));
+
+  hp::DoFHandler<dim> dof_handler (triangulation);
+  dof_handler.distribute_dofs (fe);
+
+  deallog << "n_dofs: " << dof_handler.n_dofs() << std::endl;
+  deallog << "n_locally_owned_dofs: " << dof_handler.n_locally_owned_dofs() << std::endl;
+
+  deallog << "n_locally_owned_dofs_per_processor: ";
+  std::vector<types::global_dof_index> v = dof_handler.n_locally_owned_dofs_per_processor();
+  unsigned int sum = 0;
+  for (unsigned int i=0; i<v.size(); ++i)
+    {
+      deallog << v[i] << " ";
+      sum += v[i];
+    }
+  deallog << " sum: " << sum << std::endl;
+  deallog << " locally_owned_dofs: ";
+  dof_handler.locally_owned_dofs().write(deallog.get_file_stream());
+  deallog << std::endl;
+
+  Assert (dof_handler.n_locally_owned_dofs() ==
+          dof_handler.n_locally_owned_dofs_per_processor()[triangulation.locally_owned_subdomain()],
+          ExcInternalError());
+  Assert (dof_handler.n_locally_owned_dofs() == dof_handler.locally_owned_dofs().n_elements(),
+          ExcInternalError());
+
+  const unsigned int N = dof_handler.n_dofs();
+
+  Assert (dof_handler.n_locally_owned_dofs() <= N,
+          ExcInternalError());
+  Assert (std::accumulate (dof_handler.n_locally_owned_dofs_per_processor().begin(),
+                           dof_handler.n_locally_owned_dofs_per_processor().end(),
+                           0U) == N,
+          ExcInternalError());
+
+  IndexSet all (N);
+  for (unsigned int i=0;
+       i<dof_handler.locally_owned_dofs_per_processor().size(); ++i)
+    {
+      IndexSet intersect = all & dof_handler.locally_owned_dofs_per_processor()[i];
+      Assert(intersect.n_elements()==0, ExcInternalError());
+      all.add_indices(dof_handler.locally_owned_dofs_per_processor()[i]);
+    }
+
+  Assert(all == complete_index_set(N), ExcInternalError());
+}
+
+
+int main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+  MPILogInitAll all;
+
+  deallog.push("1d");
+  test<1>();
+  deallog.pop();
+
+  deallog.push("2d");
+  test<2>();
+  deallog.pop();
+
+  deallog.push("3d");
+  test<3>();
+  deallog.pop();
+}
diff --git a/tests/sharedtria/hp_no_cells_01.with_metis=true.mpirun=3.output b/tests/sharedtria/hp_no_cells_01.with_metis=true.mpirun=3.output
new file mode 100644 (file)
index 0000000..81969cc
--- /dev/null
@@ -0,0 +1,58 @@
+
+DEAL:0:1d::n_dofs: 3
+DEAL:0:1d::n_locally_owned_dofs: 1
+DEAL:0:1d::n_locally_owned_dofs_per_processor: 1 2 0  sum: 3
+3 1
+0 1
+DEAL:0:1d:: locally_owned_dofs: 
+DEAL:0:2d::n_dofs: 9
+DEAL:0:2d::n_locally_owned_dofs: 4
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 4 3 2  sum: 9
+9 1
+0 4
+DEAL:0:2d:: locally_owned_dofs: 
+DEAL:0:3d::n_dofs: 27
+DEAL:0:3d::n_locally_owned_dofs: 10
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 10 11 6  sum: 27
+27 1
+0 10
+DEAL:0:3d:: locally_owned_dofs: 
+
+DEAL:1:1d::n_dofs: 3
+DEAL:1:1d::n_locally_owned_dofs: 2
+DEAL:1:1d::n_locally_owned_dofs_per_processor: 1 2 0  sum: 3
+3 1
+1 3
+DEAL:1:1d:: locally_owned_dofs: 
+DEAL:1:2d::n_dofs: 9
+DEAL:1:2d::n_locally_owned_dofs: 3
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 4 3 2  sum: 9
+9 1
+4 7
+DEAL:1:2d:: locally_owned_dofs: 
+DEAL:1:3d::n_dofs: 27
+DEAL:1:3d::n_locally_owned_dofs: 11
+DEAL:1:3d::n_locally_owned_dofs_per_processor: 10 11 6  sum: 27
+27 1
+10 21
+DEAL:1:3d:: locally_owned_dofs: 
+
+
+DEAL:2:1d::n_dofs: 3
+DEAL:2:1d::n_locally_owned_dofs: 0
+DEAL:2:1d::n_locally_owned_dofs_per_processor: 1 2 0  sum: 3
+3 0
+DEAL:2:1d:: locally_owned_dofs: 
+DEAL:2:2d::n_dofs: 9
+DEAL:2:2d::n_locally_owned_dofs: 2
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 4 3 2  sum: 9
+9 1
+7 9
+DEAL:2:2d:: locally_owned_dofs: 
+DEAL:2:3d::n_dofs: 27
+DEAL:2:3d::n_locally_owned_dofs: 6
+DEAL:2:3d::n_locally_owned_dofs_per_processor: 10 11 6  sum: 27
+27 1
+21 27
+DEAL:2:3d:: locally_owned_dofs: 
+
diff --git a/tests/sharedtria/hp_no_cells_01.with_metis=true.mpirun=5.output b/tests/sharedtria/hp_no_cells_01.with_metis=true.mpirun=5.output
new file mode 100644 (file)
index 0000000..648350f
--- /dev/null
@@ -0,0 +1,95 @@
+
+DEAL:0:1d::n_dofs: 3
+DEAL:0:1d::n_locally_owned_dofs: 1
+DEAL:0:1d::n_locally_owned_dofs_per_processor: 1 2 0 0 0  sum: 3
+3 1
+0 1
+DEAL:0:1d:: locally_owned_dofs: 
+DEAL:0:2d::n_dofs: 9
+DEAL:0:2d::n_locally_owned_dofs: 1
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 1 3 2 3 0  sum: 9
+9 1
+0 1
+DEAL:0:2d:: locally_owned_dofs: 
+DEAL:0:3d::n_dofs: 27
+DEAL:0:3d::n_locally_owned_dofs: 5
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 5 8 7 5 2  sum: 27
+27 1
+0 5
+DEAL:0:3d:: locally_owned_dofs: 
+
+DEAL:1:1d::n_dofs: 3
+DEAL:1:1d::n_locally_owned_dofs: 2
+DEAL:1:1d::n_locally_owned_dofs_per_processor: 1 2 0 0 0  sum: 3
+3 1
+1 3
+DEAL:1:1d:: locally_owned_dofs: 
+DEAL:1:2d::n_dofs: 9
+DEAL:1:2d::n_locally_owned_dofs: 3
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 1 3 2 3 0  sum: 9
+9 1
+1 4
+DEAL:1:2d:: locally_owned_dofs: 
+DEAL:1:3d::n_dofs: 27
+DEAL:1:3d::n_locally_owned_dofs: 8
+DEAL:1:3d::n_locally_owned_dofs_per_processor: 5 8 7 5 2  sum: 27
+27 1
+5 13
+DEAL:1:3d:: locally_owned_dofs: 
+
+
+DEAL:2:1d::n_dofs: 3
+DEAL:2:1d::n_locally_owned_dofs: 0
+DEAL:2:1d::n_locally_owned_dofs_per_processor: 1 2 0 0 0  sum: 3
+3 0
+DEAL:2:1d:: locally_owned_dofs: 
+DEAL:2:2d::n_dofs: 9
+DEAL:2:2d::n_locally_owned_dofs: 2
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 1 3 2 3 0  sum: 9
+9 1
+4 6
+DEAL:2:2d:: locally_owned_dofs: 
+DEAL:2:3d::n_dofs: 27
+DEAL:2:3d::n_locally_owned_dofs: 7
+DEAL:2:3d::n_locally_owned_dofs_per_processor: 5 8 7 5 2  sum: 27
+27 1
+13 20
+DEAL:2:3d:: locally_owned_dofs: 
+
+
+DEAL:3:1d::n_dofs: 3
+DEAL:3:1d::n_locally_owned_dofs: 0
+DEAL:3:1d::n_locally_owned_dofs_per_processor: 1 2 0 0 0  sum: 3
+3 0
+DEAL:3:1d:: locally_owned_dofs: 
+DEAL:3:2d::n_dofs: 9
+DEAL:3:2d::n_locally_owned_dofs: 3
+DEAL:3:2d::n_locally_owned_dofs_per_processor: 1 3 2 3 0  sum: 9
+9 1
+6 9
+DEAL:3:2d:: locally_owned_dofs: 
+DEAL:3:3d::n_dofs: 27
+DEAL:3:3d::n_locally_owned_dofs: 5
+DEAL:3:3d::n_locally_owned_dofs_per_processor: 5 8 7 5 2  sum: 27
+27 1
+20 25
+DEAL:3:3d:: locally_owned_dofs: 
+
+
+DEAL:4:1d::n_dofs: 3
+DEAL:4:1d::n_locally_owned_dofs: 0
+DEAL:4:1d::n_locally_owned_dofs_per_processor: 1 2 0 0 0  sum: 3
+3 0
+DEAL:4:1d:: locally_owned_dofs: 
+DEAL:4:2d::n_dofs: 9
+DEAL:4:2d::n_locally_owned_dofs: 0
+DEAL:4:2d::n_locally_owned_dofs_per_processor: 1 3 2 3 0  sum: 9
+9 0
+DEAL:4:2d:: locally_owned_dofs: 
+DEAL:4:3d::n_dofs: 27
+DEAL:4:3d::n_locally_owned_dofs: 2
+DEAL:4:3d::n_locally_owned_dofs_per_processor: 5 8 7 5 2  sum: 27
+27 1
+25 27
+DEAL:4:3d:: locally_owned_dofs: 
+
diff --git a/tests/sharedtria/hp_no_cells_01.with_metis=true.mpirun=9.output b/tests/sharedtria/hp_no_cells_01.with_metis=true.mpirun=9.output
new file mode 100644 (file)
index 0000000..06567b8
--- /dev/null
@@ -0,0 +1,166 @@
+
+DEAL:0:1d::n_dofs: 3
+DEAL:0:1d::n_locally_owned_dofs: 1
+DEAL:0:1d::n_locally_owned_dofs_per_processor: 1 2 0 0 0 0 0 0 0  sum: 3
+3 1
+0 1
+DEAL:0:1d:: locally_owned_dofs: 
+DEAL:0:2d::n_dofs: 9
+DEAL:0:2d::n_locally_owned_dofs: 1
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 1 3 2 3 0 0 0 0 0  sum: 9
+9 1
+0 1
+DEAL:0:2d:: locally_owned_dofs: 
+DEAL:0:3d::n_dofs: 27
+DEAL:0:3d::n_locally_owned_dofs: 1
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 1 4 2 5 2 4 4 5 0  sum: 27
+27 1
+0 1
+DEAL:0:3d:: locally_owned_dofs: 
+
+DEAL:1:1d::n_dofs: 3
+DEAL:1:1d::n_locally_owned_dofs: 2
+DEAL:1:1d::n_locally_owned_dofs_per_processor: 1 2 0 0 0 0 0 0 0  sum: 3
+3 1
+1 3
+DEAL:1:1d:: locally_owned_dofs: 
+DEAL:1:2d::n_dofs: 9
+DEAL:1:2d::n_locally_owned_dofs: 3
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 1 3 2 3 0 0 0 0 0  sum: 9
+9 1
+1 4
+DEAL:1:2d:: locally_owned_dofs: 
+DEAL:1:3d::n_dofs: 27
+DEAL:1:3d::n_locally_owned_dofs: 4
+DEAL:1:3d::n_locally_owned_dofs_per_processor: 1 4 2 5 2 4 4 5 0  sum: 27
+27 1
+1 5
+DEAL:1:3d:: locally_owned_dofs: 
+
+
+DEAL:2:1d::n_dofs: 3
+DEAL:2:1d::n_locally_owned_dofs: 0
+DEAL:2:1d::n_locally_owned_dofs_per_processor: 1 2 0 0 0 0 0 0 0  sum: 3
+3 0
+DEAL:2:1d:: locally_owned_dofs: 
+DEAL:2:2d::n_dofs: 9
+DEAL:2:2d::n_locally_owned_dofs: 2
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 1 3 2 3 0 0 0 0 0  sum: 9
+9 1
+4 6
+DEAL:2:2d:: locally_owned_dofs: 
+DEAL:2:3d::n_dofs: 27
+DEAL:2:3d::n_locally_owned_dofs: 2
+DEAL:2:3d::n_locally_owned_dofs_per_processor: 1 4 2 5 2 4 4 5 0  sum: 27
+27 1
+5 7
+DEAL:2:3d:: locally_owned_dofs: 
+
+
+DEAL:3:1d::n_dofs: 3
+DEAL:3:1d::n_locally_owned_dofs: 0
+DEAL:3:1d::n_locally_owned_dofs_per_processor: 1 2 0 0 0 0 0 0 0  sum: 3
+3 0
+DEAL:3:1d:: locally_owned_dofs: 
+DEAL:3:2d::n_dofs: 9
+DEAL:3:2d::n_locally_owned_dofs: 3
+DEAL:3:2d::n_locally_owned_dofs_per_processor: 1 3 2 3 0 0 0 0 0  sum: 9
+9 1
+6 9
+DEAL:3:2d:: locally_owned_dofs: 
+DEAL:3:3d::n_dofs: 27
+DEAL:3:3d::n_locally_owned_dofs: 5
+DEAL:3:3d::n_locally_owned_dofs_per_processor: 1 4 2 5 2 4 4 5 0  sum: 27
+27 1
+7 12
+DEAL:3:3d:: locally_owned_dofs: 
+
+
+DEAL:4:1d::n_dofs: 3
+DEAL:4:1d::n_locally_owned_dofs: 0
+DEAL:4:1d::n_locally_owned_dofs_per_processor: 1 2 0 0 0 0 0 0 0  sum: 3
+3 0
+DEAL:4:1d:: locally_owned_dofs: 
+DEAL:4:2d::n_dofs: 9
+DEAL:4:2d::n_locally_owned_dofs: 0
+DEAL:4:2d::n_locally_owned_dofs_per_processor: 1 3 2 3 0 0 0 0 0  sum: 9
+9 0
+DEAL:4:2d:: locally_owned_dofs: 
+DEAL:4:3d::n_dofs: 27
+DEAL:4:3d::n_locally_owned_dofs: 2
+DEAL:4:3d::n_locally_owned_dofs_per_processor: 1 4 2 5 2 4 4 5 0  sum: 27
+27 1
+12 14
+DEAL:4:3d:: locally_owned_dofs: 
+
+
+DEAL:5:1d::n_dofs: 3
+DEAL:5:1d::n_locally_owned_dofs: 0
+DEAL:5:1d::n_locally_owned_dofs_per_processor: 1 2 0 0 0 0 0 0 0  sum: 3
+3 0
+DEAL:5:1d:: locally_owned_dofs: 
+DEAL:5:2d::n_dofs: 9
+DEAL:5:2d::n_locally_owned_dofs: 0
+DEAL:5:2d::n_locally_owned_dofs_per_processor: 1 3 2 3 0 0 0 0 0  sum: 9
+9 0
+DEAL:5:2d:: locally_owned_dofs: 
+DEAL:5:3d::n_dofs: 27
+DEAL:5:3d::n_locally_owned_dofs: 4
+DEAL:5:3d::n_locally_owned_dofs_per_processor: 1 4 2 5 2 4 4 5 0  sum: 27
+27 1
+14 18
+DEAL:5:3d:: locally_owned_dofs: 
+
+
+DEAL:6:1d::n_dofs: 3
+DEAL:6:1d::n_locally_owned_dofs: 0
+DEAL:6:1d::n_locally_owned_dofs_per_processor: 1 2 0 0 0 0 0 0 0  sum: 3
+3 0
+DEAL:6:1d:: locally_owned_dofs: 
+DEAL:6:2d::n_dofs: 9
+DEAL:6:2d::n_locally_owned_dofs: 0
+DEAL:6:2d::n_locally_owned_dofs_per_processor: 1 3 2 3 0 0 0 0 0  sum: 9
+9 0
+DEAL:6:2d:: locally_owned_dofs: 
+DEAL:6:3d::n_dofs: 27
+DEAL:6:3d::n_locally_owned_dofs: 4
+DEAL:6:3d::n_locally_owned_dofs_per_processor: 1 4 2 5 2 4 4 5 0  sum: 27
+27 1
+18 22
+DEAL:6:3d:: locally_owned_dofs: 
+
+
+DEAL:7:1d::n_dofs: 3
+DEAL:7:1d::n_locally_owned_dofs: 0
+DEAL:7:1d::n_locally_owned_dofs_per_processor: 1 2 0 0 0 0 0 0 0  sum: 3
+3 0
+DEAL:7:1d:: locally_owned_dofs: 
+DEAL:7:2d::n_dofs: 9
+DEAL:7:2d::n_locally_owned_dofs: 0
+DEAL:7:2d::n_locally_owned_dofs_per_processor: 1 3 2 3 0 0 0 0 0  sum: 9
+9 0
+DEAL:7:2d:: locally_owned_dofs: 
+DEAL:7:3d::n_dofs: 27
+DEAL:7:3d::n_locally_owned_dofs: 5
+DEAL:7:3d::n_locally_owned_dofs_per_processor: 1 4 2 5 2 4 4 5 0  sum: 27
+27 1
+22 27
+DEAL:7:3d:: locally_owned_dofs: 
+
+
+DEAL:8:1d::n_dofs: 3
+DEAL:8:1d::n_locally_owned_dofs: 0
+DEAL:8:1d::n_locally_owned_dofs_per_processor: 1 2 0 0 0 0 0 0 0  sum: 3
+3 0
+DEAL:8:1d:: locally_owned_dofs: 
+DEAL:8:2d::n_dofs: 9
+DEAL:8:2d::n_locally_owned_dofs: 0
+DEAL:8:2d::n_locally_owned_dofs_per_processor: 1 3 2 3 0 0 0 0 0  sum: 9
+9 0
+DEAL:8:2d:: locally_owned_dofs: 
+DEAL:8:3d::n_dofs: 27
+DEAL:8:3d::n_locally_owned_dofs: 0
+DEAL:8:3d::n_locally_owned_dofs_per_processor: 1 4 2 5 2 4 4 5 0  sum: 27
+27 0
+DEAL:8:3d:: locally_owned_dofs: 
+

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.