From 30f769dcc2406db8504bc14b6f87e41130feb320 Mon Sep 17 00:00:00 2001 From: Alexander Grayver Date: Thu, 8 Oct 2015 18:47:54 +0200 Subject: [PATCH] Add test --- ...ergrid_transfer_representation_parallel.cc | 150 ++++++++++++++++++ ...id_transfer_representation_parallel.output | 10 ++ 2 files changed, 160 insertions(+) create mode 100644 tests/distributed_grids/intergrid_transfer_representation_parallel.cc create mode 100644 tests/distributed_grids/intergrid_transfer_representation_parallel.output diff --git a/tests/distributed_grids/intergrid_transfer_representation_parallel.cc b/tests/distributed_grids/intergrid_transfer_representation_parallel.cc new file mode 100644 index 0000000000..3b4a7c8c38 --- /dev/null +++ b/tests/distributed_grids/intergrid_transfer_representation_parallel.cc @@ -0,0 +1,150 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2013 - 2014 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +/* + * Purpose: checks compute_intergrid_transfer_representation + * on distributed (p4est) triangulation + * + * Author: Alexander Grayver, 2015 + */ + +#include "../tests.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +std::ofstream logfile("output"); + +template +void test (unsigned n_refinements) +{ + unsigned int rank = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + + if (rank == 0) + { + deallog << "Checking in " << dim << " space dimensions" + << std::endl + << "---------------------------------------" << std::endl; + } + + // create serial grid and refine once + parallel::distributed::Triangulation tria1(MPI_COMM_SELF); + GridGenerator::hyper_cube (tria1, -1, 1); + tria1.refine_global(1); + + // create distributed grid and refine twice + parallel::distributed::Triangulation tria2(MPI_COMM_WORLD); + GridGenerator::hyper_cube (tria2, -1, 1); + tria2.refine_global(n_refinements); + + // do some local refinement + Point p0; + p0 *= 0.; + for (int i = 0; i < n_refinements; ++i) + { + typename Triangulation::active_cell_iterator cell; + for (cell = tria2.begin_active(); cell != tria2.end(); ++cell) + { + if (cell->is_locally_owned() && (cell->center().distance(p0) < 0.71 / double(i + 1))) + cell->set_refine_flag(); + } + + tria2.prepare_coarsening_and_refinement (); + tria2.execute_coarsening_and_refinement (); + } + + FE_DGQ fe_dg(0); + DoFHandler dof_handler1(tria1); + dof_handler1.distribute_dofs(fe_dg); + + DoFHandler dof_handler2(tria2); + dof_handler2.distribute_dofs(fe_dg); + + InterGridMap > grid_1_to_2_map; + grid_1_to_2_map.make_mapping (dof_handler1, dof_handler2); + + typedef std::vector > TransferRep; + TransferRep transfer_representation; + DoFTools::compute_intergrid_transfer_representation (dof_handler1, 0, dof_handler2, + 0, grid_1_to_2_map, + transfer_representation); + + // For this test case, all weights are one and their sum + // should be equal to number of degrees of freedom + unsigned local_sum = 0.; + for (size_t i = 0; i < transfer_representation.size(); ++i) + { + TransferRep::value_type m = transfer_representation[i]; + for (TransferRep::value_type::const_iterator it = m.begin(); it != m.end(); ++it) + local_sum += it->second; + } + + unsigned global_sum = Utilities::MPI::sum(local_sum, MPI_COMM_WORLD); + + if (rank == 0) + { + deallog << "# dofs = " << dof_handler2.n_dofs() << std::endl; + deallog << "sum(weights) = " << global_sum << std::endl; + if (dof_handler2.n_dofs() == global_sum) + deallog << "OK" << std::endl; + else + deallog << "Failed" << std::endl; + } +} + +int main (int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1); + + unsigned int rank = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + + deallog.push(Utilities::int_to_string(rank)); + + unsigned n_refinements = 2; + + if (rank == 0) + { + std::ofstream logfile("output"); + deallog.attach(logfile); + deallog.depth_console(0); + deallog.threshold_double(1.e-10); + + deallog.push("2d"); + test<2>(n_refinements); + deallog.pop(); + deallog.push("3d"); + test<3>(n_refinements); + deallog.pop(); + } + else + { + test<2>(n_refinements); + test<3>(n_refinements); + } +} diff --git a/tests/distributed_grids/intergrid_transfer_representation_parallel.output b/tests/distributed_grids/intergrid_transfer_representation_parallel.output new file mode 100644 index 0000000000..dcc23d9cbe --- /dev/null +++ b/tests/distributed_grids/intergrid_transfer_representation_parallel.output @@ -0,0 +1,10 @@ +DEAL:0:2d::Checking in 2 space dimensions +DEAL:0:2d::--------------------------------------- +DEAL:0:2d::# dofs = 40 +DEAL:0:2d::sum(weights) = 40 +DEAL:0:2d::OK +DEAL:0:3d::Checking in 3 space dimensions +DEAL:0:3d::--------------------------------------- +DEAL:0:3d::# dofs = 176 +DEAL:0:3d::sum(weights) = 176 +DEAL:0:3d::OK -- 2.39.5