--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// create a parallel hp::DoFHandler on a single CPU
+//
+// like the test without the hp_ prefix, but for hp::DoFHandler
+
+#include "../tests.h"
+#include "coarse_grid_common.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/hp/dof_handler.h>
+#include <deal.II/hp/dof_handler.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/dofs/dof_accessor.h>
+
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/hp/fe_collection.h>
+
+#include <fstream>
+
+
+template <int dim>
+void test(std::ostream & /*out*/)
+{
+ deallog << "hyper_cube" << std::endl;
+
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
+
+ GridGenerator::hyper_cube(tr);
+ tr.refine_global (1);
+
+ hp::FECollection<dim> fe;
+ fe.push_back (FE_Q<dim> (2));
+ hp::DoFHandler<dim> dofh(tr);
+ dofh.distribute_dofs (fe);
+
+ typename
+ hp::DoFHandler<dim>::active_cell_iterator cell
+ = dofh.begin_active();
+
+ const unsigned int dofs_per_cell = dofh.get_fe()[0].dofs_per_cell;
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
+
+
+
+ for (; cell != dofh.end(); ++cell)
+ {
+ cell->get_dof_indices (local_dof_indices);
+
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ deallog << local_dof_indices[i] << " ";
+
+ deallog << std::endl;
+ }
+
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1);
+
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+
+ deallog.push("2d");
+ test<2>(logfile);
+ deallog.pop();
+
+
+}
--- /dev/null
+
+DEAL:2d::hyper_cube
+DEAL:2d::0 1 2 3 4 5 6 7 8
+DEAL:2d::1 9 3 10 5 11 12 13 14
+DEAL:2d::2 3 15 16 17 18 7 19 20
+DEAL:2d::3 10 16 21 18 22 13 23 24
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// Test DoFTools::count_dofs_per_block
+//
+// like the test without the hp_ prefix, but for hp::DoFHandler
+
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/intergrid_map.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/hp/dof_handler.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_dgq.h>
+#include <deal.II/hp/fe_collection.h>
+
+#include <fstream>
+#include <numeric>
+#include <cstdlib>
+
+
+template <int dim>
+void test()
+{
+ parallel::distributed::Triangulation<dim>
+ triangulation (MPI_COMM_WORLD,
+ Triangulation<dim>::limit_level_difference_at_vertices);
+
+ hp::FECollection<dim> fe;
+ fe.push_back (FESystem<dim> (FE_Q<dim>(3),2,
+ FE_DGQ<dim>(1),1));
+
+ hp::DoFHandler<dim> dof_handler (triangulation);
+
+ GridGenerator::hyper_cube(triangulation);
+ triangulation.refine_global (2);
+
+ const unsigned int n_refinements[] = { 0, 4, 3, 2 };
+ for (unsigned int i=0; i<n_refinements[dim]; ++i)
+ {
+ // refine one-fifth of cells randomly
+ std::vector<bool> flags (triangulation.n_active_cells(), false);
+ for (unsigned int k=0; k<flags.size()/5 + 1; ++k)
+ flags[Testing::rand() % flags.size()] = true;
+ // make sure there's at least one that
+ // will be refined
+ flags[0] = true;
+
+ // refine triangulation
+ unsigned int index=0;
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell, ++index)
+ if (flags[index])
+ cell->set_refine_flag();
+ AssertThrow (index == triangulation.n_active_cells(), ExcInternalError());
+
+ // flag all other cells for coarsening
+ // (this should ensure that at least
+ // some of them will actually be
+ // coarsened)
+ index=0;
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell, ++index)
+ if (!flags[index])
+ cell->set_coarsen_flag();
+
+ triangulation.execute_coarsening_and_refinement ();
+ dof_handler.distribute_dofs (fe);
+
+ std::vector<types::global_dof_index> dofs_per_block (fe.n_components());
+ DoFTools::count_dofs_per_block (dof_handler, dofs_per_block);
+
+ AssertThrow (std::accumulate (dofs_per_block.begin(), dofs_per_block.end(), 0U)
+ == dof_handler.n_dofs(),
+ ExcInternalError());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ if (myid == 0)
+ {
+ deallog << "Total number of dofs: " << dof_handler.n_dofs() << std::endl;
+ for (unsigned int i=0; i<dofs_per_block.size(); ++i)
+ deallog << "Block " << i << " has " << dofs_per_block[i] << " global dofs"
+ << std::endl;
+ }
+ }
+}
+
+
+int main(int argc, char *argv[])
+{
+#ifdef DEAL_II_WITH_MPI
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1);
+#else
+ (void)argc;
+ (void)argv;
+#endif
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ if (myid == 0)
+ {
+ initlog();
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+ }
+ else
+ {
+ test<2>();
+ test<3>();
+ }
+
+
+}
--- /dev/null
+
+DEAL:2d::Total number of dofs: 818
+DEAL:2d::Block 0 has 347 global dofs
+DEAL:2d::Block 1 has 347 global dofs
+DEAL:2d::Block 2 has 124 global dofs
+DEAL:2d::Total number of dofs: 1874
+DEAL:2d::Block 0 has 791 global dofs
+DEAL:2d::Block 1 has 791 global dofs
+DEAL:2d::Block 2 has 292 global dofs
+DEAL:2d::Total number of dofs: 2800
+DEAL:2d::Block 0 has 1182 global dofs
+DEAL:2d::Block 1 has 1182 global dofs
+DEAL:2d::Block 2 has 436 global dofs
+DEAL:3d::Total number of dofs: 13282
+DEAL:3d::Block 0 has 6021 global dofs
+DEAL:3d::Block 1 has 6021 global dofs
+DEAL:3d::Block 2 has 1240 global dofs
+DEAL:3d::Total number of dofs: 42960
+DEAL:3d::Block 0 has 19292 global dofs
+DEAL:3d::Block 1 has 19292 global dofs
+DEAL:3d::Block 2 has 4376 global dofs
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// Test DoFTools::count_dofs_per_component
+//
+// like the test without the hp_ prefix, but for hp::DoFHandler
+
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/intergrid_map.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/hp/dof_handler.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_dgq.h>
+#include <deal.II/hp/fe_collection.h>
+
+#include <fstream>
+#include <numeric>
+#include <cstdlib>
+
+
+template <int dim>
+void test()
+{
+ parallel::distributed::Triangulation<dim>
+ triangulation (MPI_COMM_WORLD,
+ Triangulation<dim>::limit_level_difference_at_vertices);
+
+ hp::FECollection<dim> fe;
+ fe.push_back (FESystem<dim> (FE_Q<dim>(3),2,
+ FE_DGQ<dim>(1),1));
+
+ hp::DoFHandler<dim> dof_handler (triangulation);
+
+ GridGenerator::hyper_cube(triangulation);
+ triangulation.refine_global (2);
+ dof_handler.distribute_dofs (fe);
+
+ std::vector<types::global_dof_index> dofs_per_component (fe.n_components());
+ DoFTools::count_dofs_per_component (dof_handler, dofs_per_component);
+
+ Assert (std::accumulate (dofs_per_component.begin(), dofs_per_component.end(), 0U)
+ == dof_handler.n_dofs(),
+ ExcInternalError());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ if (myid == 0)
+ {
+ deallog << "Total number of dofs: " << dof_handler.n_dofs() << std::endl;
+ for (unsigned int i=0; i<dofs_per_component.size(); ++i)
+ deallog << "Block " << i << " has " << dofs_per_component[i] << " global dofs"
+ << std::endl;
+ }
+}
+
+
+int main(int argc, char *argv[])
+{
+#ifdef DEAL_II_WITH_MPI
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1);
+#else
+ (void)argc;
+ (void)argv;
+#endif
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ if (myid == 0)
+ {
+ initlog();
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+ }
+ else
+ {
+ test<2>();
+ test<3>();
+ }
+
+
+}
--- /dev/null
+
+DEAL:2d::Total number of dofs: 402
+DEAL:2d::Block 0 has 169 global dofs
+DEAL:2d::Block 1 has 169 global dofs
+DEAL:2d::Block 2 has 64 global dofs
+DEAL:3d::Total number of dofs: 4906
+DEAL:3d::Block 0 has 2197 global dofs
+DEAL:3d::Block 1 has 2197 global dofs
+DEAL:3d::Block 2 has 512 global dofs
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// like the same test in deal.II but this time use a
+// parallel::distributed::Triangulation object. We still use only a
+// single processor so the end result should be the same but we use
+// entirely different code paths
+//
+// like the test without the hp_ prefix, but for hp::DoFHandler
+
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/intergrid_map.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/hp/dof_handler.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_dgq.h>
+#include <deal.II/hp/fe_collection.h>
+
+#include <fstream>
+#include <cstdlib>
+
+
+template <int dim>
+void test()
+{
+ parallel::distributed::Triangulation<dim>
+ triangulation (MPI_COMM_WORLD,
+ Triangulation<dim>::limit_level_difference_at_vertices);
+
+ hp::FECollection<dim> fe;
+ fe.push_back (FESystem<dim> (FE_Q<dim>(3),2,
+ FE_DGQ<dim>(1),1));
+
+ hp::DoFHandler<dim> dof_handler (triangulation);
+
+ GridGenerator::hyper_cube(triangulation);
+ triangulation.refine_global (2);
+
+ const unsigned int n_refinements[] = { 0, 4, 3, 2 };
+ for (unsigned int i=0; i<n_refinements[dim]; ++i)
+ {
+ // refine one-fifth of cells randomly
+ std::vector<bool> flags (triangulation.n_active_cells(), false);
+ for (unsigned int k=0; k<flags.size()/5 + 1; ++k)
+ flags[Testing::rand() % flags.size()] = true;
+ // make sure there's at least one that
+ // will be refined
+ flags[0] = true;
+
+ // refine triangulation
+ unsigned int index=0;
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell, ++index)
+ if (flags[index])
+ cell->set_refine_flag();
+ AssertThrow (index == triangulation.n_active_cells(), ExcInternalError());
+
+ // flag all other cells for coarsening
+ // (this should ensure that at least
+ // some of them will actually be
+ // coarsened)
+ index=0;
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell, ++index)
+ if (!flags[index])
+ cell->set_coarsen_flag();
+
+ triangulation.execute_coarsening_and_refinement ();
+ dof_handler.distribute_dofs (fe);
+
+ const unsigned int N = dof_handler.n_dofs();
+ deallog << N << std::endl;
+
+ IndexSet all (N);
+ all.add_range (0, N);
+
+ AssertThrow (dof_handler.n_locally_owned_dofs() == N,
+ ExcInternalError());
+ AssertThrow (dof_handler.locally_owned_dofs() == all,
+ ExcInternalError());
+ AssertThrow (dof_handler.n_locally_owned_dofs_per_processor() ==
+ std::vector<types::global_dof_index> (1,N),
+ ExcInternalError());
+ AssertThrow (dof_handler.locally_owned_dofs_per_processor() ==
+ std::vector<IndexSet>(1,all),
+ ExcInternalError());
+ }
+}
+
+
+int main(int argc, char *argv[])
+{
+#ifdef DEAL_II_WITH_MPI
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1);
+#else
+ (void)argc;
+ (void)argv;
+#endif
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ if (myid == 0)
+ {
+ initlog();
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+ }
+ else
+ {
+ test<2>();
+ test<3>();
+ }
+
+
+}
--- /dev/null
+
+DEAL:2d::818
+DEAL:2d::1874
+DEAL:2d::2800
+DEAL:3d::13282
+DEAL:3d::42960