From 351b257286c0faa6b80b40590ccafec4eacd4b8b Mon Sep 17 00:00:00 2001 From: Timo Heister Date: Sun, 19 Jul 2020 12:37:35 -0400 Subject: [PATCH] add test --- tests/mpi/periodicity_08.cc | 182 ++++++++++++++++++ ...odicity_08.mpirun=1.with_p4est=true.output | 5 + ...odicity_08.mpirun=3.with_p4est=true.output | 17 ++ 3 files changed, 204 insertions(+) create mode 100644 tests/mpi/periodicity_08.cc create mode 100644 tests/mpi/periodicity_08.mpirun=1.with_p4est=true.output create mode 100644 tests/mpi/periodicity_08.mpirun=3.with_p4est=true.output diff --git a/tests/mpi/periodicity_08.cc b/tests/mpi/periodicity_08.cc new file mode 100644 index 0000000000..cc95ad5b41 --- /dev/null +++ b/tests/mpi/periodicity_08.cc @@ -0,0 +1,182 @@ +/* --------------------------------------------------------------------- + * + * Copyright (C) 2018 - 2020 by the deal.II authors + * + * This file is part of the deal.II library. + * + * The deal.II library is free software; you can use it, redistribute + * it, and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * The full text of the license can be found in the file LICENSE.md at + * the top level directory of deal.II. + * + * --------------------------------------------------------------------- + */ + +// Document a hang in Triangulation::prepare_coarsening_and_refinement() when +// periodic boundaries and mesh smoothing is used. + +// The (now fixed bug) was caused by eliminate_refined_boundary_islands also +// incorrectly acting on periodic boundaries. This test originally comes from +// ASPECT. + +#include + +#include + +#include + +#include +#include +#include + +#include +#include + +#include + +#include "../tests.h" + +template +void +test() +{ + MPI_Comm mpi_communicator = MPI_COMM_WORLD; + + const unsigned int n_mpi_processes = + Utilities::MPI::n_mpi_processes(mpi_communicator); + const unsigned int this_mpi_process = + Utilities::MPI::this_mpi_process(mpi_communicator); + + const double L = 20; + + dealii::parallel::distributed::Triangulation triangulation( + mpi_communicator, + typename Triangulation::MeshSmoothing( + Triangulation::limit_level_difference_at_vertices | + Triangulation::eliminate_unrefined_islands | + Triangulation::eliminate_refined_inner_islands | + Triangulation::eliminate_refined_boundary_islands | + Triangulation::do_not_produce_unrefined_islands)); + GridGenerator::hyper_cube(triangulation, 0, 1, /*colorize*/ true); + + + + std::vector< + GridTools::PeriodicFacePair::cell_iterator>> + periodicity_vector; + if (true) + for (int d = 0; d < dim; ++d) + GridTools::collect_periodic_faces(triangulation, + /*b_id1*/ 2 * d, + /*b_id2*/ 2 * d + 1, + /*direction*/ d, + periodicity_vector); + + triangulation.add_periodicity(periodicity_vector); + + // refine mesh + triangulation.refine_global(3); + + // mark all cells except these for coarsening: + std::vector> points = {{312500, 93750}, + {312500, 156250}, + {62500, 343750}, + {312500, 281250}, + {312500, 343750}, + {62500, 406250}, + {62500, 468750}}; + + // rescale to [0,1]^2 for this to work: + for (auto &p : points) + { + p[0] /= 1e6; + p[1] /= 5e5; + } + + for (auto &cell : triangulation.active_cell_iterators()) + { + cell->set_coarsen_flag(); + for (auto &p : points) + if (cell->point_inside(p)) + { + cell->clear_coarsen_flag(); + cell->set_refine_flag(); + } + } + triangulation.execute_coarsening_and_refinement(); + + deallog << "number of elements: " << triangulation.n_global_active_cells() + << std::endl; + + // create dof_handler + FESystem FE(FE_Q(QGaussLobatto<1>(2)), 1); + DoFHandler dof_handler(triangulation); + dof_handler.distribute_dofs(FE); + + // write mesh for visualization + DataOut data_out; + data_out.attach_dof_handler(dof_handler); + Vector subdomain(triangulation.n_active_cells()); + for (unsigned int i = 0; i < subdomain.size(); ++i) + subdomain(i) = triangulation.locally_owned_subdomain(); + data_out.add_data_vector(subdomain, "subdomain"); + data_out.build_patches(); + data_out.write_vtu_in_parallel(std::string("mesh.vtu").c_str(), + mpi_communicator); + + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); + + IndexSet locally_active_dofs; + DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs); + + const std::vector locally_owned_dofs = + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); + + std::map> supportPoints; + DoFTools::map_dofs_to_support_points(MappingQ1(), + dof_handler, + supportPoints); + + /// creating combined hanging node and periodic constraint matrix + AffineConstraints constraints; + constraints.clear(); + constraints.reinit(locally_relevant_dofs); + + for (int d = 0; d < dim; ++d) + DoFTools::make_periodicity_constraints( + dof_handler, 2 * d, 2 * d + 1, d, constraints); + + const bool consistent = + constraints.is_consistent_in_parallel(locally_owned_dofs, + locally_active_dofs, + mpi_communicator, + /*verbose*/ true); + + deallog << "Periodicity constraints are consistent in parallel: " + << consistent << std::endl; + + DoFTools::make_hanging_node_constraints(dof_handler, constraints); + + const bool hanging_consistent = + constraints.is_consistent_in_parallel(locally_owned_dofs, + locally_active_dofs, + mpi_communicator); + + deallog << "Hanging nodes constraints are consistent in parallel: " + << hanging_consistent << std::endl; + + constraints.close(); + deallog << "OK" << std::endl; +} + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv); + MPILogInitAll log; + test<2>(); +} diff --git a/tests/mpi/periodicity_08.mpirun=1.with_p4est=true.output b/tests/mpi/periodicity_08.mpirun=1.with_p4est=true.output new file mode 100644 index 0000000000..fe521e4a25 --- /dev/null +++ b/tests/mpi/periodicity_08.mpirun=1.with_p4est=true.output @@ -0,0 +1,5 @@ + +DEAL:0::number of elements: 70 +DEAL:0::Periodicity constraints are consistent in parallel: 1 +DEAL:0::Hanging nodes constraints are consistent in parallel: 1 +DEAL:0::OK diff --git a/tests/mpi/periodicity_08.mpirun=3.with_p4est=true.output b/tests/mpi/periodicity_08.mpirun=3.with_p4est=true.output new file mode 100644 index 0000000000..22a66f2b10 --- /dev/null +++ b/tests/mpi/periodicity_08.mpirun=3.with_p4est=true.output @@ -0,0 +1,17 @@ + +DEAL:0::number of elements: 70 +DEAL:0::Periodicity constraints are consistent in parallel: 1 +DEAL:0::Hanging nodes constraints are consistent in parallel: 1 +DEAL:0::OK + +DEAL:1::number of elements: 70 +DEAL:1::Periodicity constraints are consistent in parallel: 1 +DEAL:1::Hanging nodes constraints are consistent in parallel: 1 +DEAL:1::OK + + +DEAL:2::number of elements: 70 +DEAL:2::Periodicity constraints are consistent in parallel: 1 +DEAL:2::Hanging nodes constraints are consistent in parallel: 1 +DEAL:2::OK + -- 2.39.5