--- /dev/null
+// ---------------------------------------------------------------------\r
+//\r
+// Copyright (C) 2013 - 2014 by the deal.II authors\r
+//\r
+// This file is part of the deal.II library.\r
+//\r
+// The deal.II library is free software; you can use it, redistribute\r
+// it, and/or modify it under the terms of the GNU Lesser General\r
+// Public License as published by the Free Software Foundation; either\r
+// version 2.1 of the License, or (at your option) any later version.\r
+// The full text of the license can be found in the file LICENSE at\r
+// the top level of the deal.II distribution.\r
+//\r
+// ---------------------------------------------------------------------\r
+\r
+/*\r
+ * Purpose: checks compute_intergrid_transfer_representation\r
+ * on distributed (p4est) triangulation\r
+ *\r
+ * Author: Alexander Grayver, 2015\r
+ */\r
+\r
+#include "../tests.h"\r
+\r
+#include <deal.II/base/logstream.h>\r
+#include <deal.II/distributed/tria.h>\r
+#include <deal.II/distributed/grid_refinement.h>\r
+#include <deal.II/dofs/dof_handler.h>\r
+#include <deal.II/dofs/dof_tools.h>\r
+#include <deal.II/dofs/dof_renumbering.h>\r
+#include <deal.II/grid/grid_generator.h>\r
+#include <deal.II/grid/tria.h>\r
+#include <deal.II/grid/intergrid_map.h>\r
+#include <deal.II/dofs/dof_accessor.h>\r
+#include <deal.II/dofs/dof_handler.h>\r
+#include <deal.II/fe/fe_dgq.h>\r
+#include <deal.II/base/utilities.h>\r
+#include <deal.II/base/timer.h>\r
+\r
+#include <fstream>\r
+\r
+std::ofstream logfile("output");\r
+\r
+template <int dim>\r
+void test (unsigned n_refinements)\r
+{\r
+ unsigned int rank = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);\r
+\r
+ if (rank == 0)\r
+ {\r
+ deallog << "Checking in " << dim << " space dimensions"\r
+ << std::endl\r
+ << "---------------------------------------" << std::endl;\r
+ }\r
+\r
+ // create serial grid and refine once\r
+ parallel::distributed::Triangulation<dim> tria1(MPI_COMM_SELF);\r
+ GridGenerator::hyper_cube (tria1, -1, 1);\r
+ tria1.refine_global(1);\r
+\r
+ // create distributed grid and refine twice\r
+ parallel::distributed::Triangulation<dim> tria2(MPI_COMM_WORLD);\r
+ GridGenerator::hyper_cube (tria2, -1, 1);\r
+ tria2.refine_global(n_refinements);\r
+\r
+ // do some local refinement\r
+ Point<dim> p0;\r
+ p0 *= 0.;\r
+ for (int i = 0; i < n_refinements; ++i)\r
+ {\r
+ typename Triangulation<dim>::active_cell_iterator cell;\r
+ for (cell = tria2.begin_active(); cell != tria2.end(); ++cell)\r
+ {\r
+ if (cell->is_locally_owned() && (cell->center().distance(p0) < 0.71 / double(i + 1)))\r
+ cell->set_refine_flag();\r
+ }\r
+\r
+ tria2.prepare_coarsening_and_refinement ();\r
+ tria2.execute_coarsening_and_refinement ();\r
+ }\r
+\r
+ FE_DGQ<dim> fe_dg(0);\r
+ DoFHandler<dim> dof_handler1(tria1);\r
+ dof_handler1.distribute_dofs(fe_dg);\r
+\r
+ DoFHandler<dim> dof_handler2(tria2);\r
+ dof_handler2.distribute_dofs(fe_dg);\r
+\r
+ InterGridMap<DoFHandler<dim> > grid_1_to_2_map;\r
+ grid_1_to_2_map.make_mapping (dof_handler1, dof_handler2);\r
+\r
+ typedef std::vector<std::map<types::global_dof_index, float> > TransferRep;\r
+ TransferRep transfer_representation;\r
+ DoFTools::compute_intergrid_transfer_representation (dof_handler1, 0, dof_handler2,\r
+ 0, grid_1_to_2_map,\r
+ transfer_representation);\r
+\r
+ // For this test case, all weights are one and their sum\r
+ // should be equal to number of degrees of freedom\r
+ unsigned local_sum = 0.;\r
+ for (size_t i = 0; i < transfer_representation.size(); ++i)\r
+ {\r
+ TransferRep::value_type m = transfer_representation[i];\r
+ for (TransferRep::value_type::const_iterator it = m.begin(); it != m.end(); ++it)\r
+ local_sum += it->second;\r
+ }\r
+\r
+ unsigned global_sum = Utilities::MPI::sum(local_sum, MPI_COMM_WORLD);\r
+\r
+ if (rank == 0)\r
+ {\r
+ deallog << "# dofs = " << dof_handler2.n_dofs() << std::endl;\r
+ deallog << "sum(weights) = " << global_sum << std::endl;\r
+ if (dof_handler2.n_dofs() == global_sum)\r
+ deallog << "OK" << std::endl;\r
+ else\r
+ deallog << "Failed" << std::endl;\r
+ }\r
+}\r
+\r
+int main (int argc, char *argv[])\r
+{\r
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1);\r
+\r
+ unsigned int rank = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);\r
+\r
+ deallog.push(Utilities::int_to_string(rank));\r
+\r
+ unsigned n_refinements = 2;\r
+\r
+ if (rank == 0)\r
+ {\r
+ std::ofstream logfile("output");\r
+ deallog.attach(logfile);\r
+ deallog.depth_console(0);\r
+ deallog.threshold_double(1.e-10);\r
+\r
+ deallog.push("2d");\r
+ test<2>(n_refinements);\r
+ deallog.pop();\r
+ deallog.push("3d");\r
+ test<3>(n_refinements);\r
+ deallog.pop();\r
+ }\r
+ else\r
+ {\r
+ test<2>(n_refinements);\r
+ test<3>(n_refinements);\r
+ }\r
+}\r