From: Timo Heister Date: Mon, 18 Oct 2021 20:44:22 +0000 (-0400) Subject: add test X-Git-Tag: v9.4.0-rc1~889^2~3 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=d67773522d7f806450737fbaf3392909faae650a;p=dealii.git add test --- diff --git a/tests/distributed_grids/checkpointing_03.cc b/tests/distributed_grids/checkpointing_03.cc new file mode 100644 index 0000000000..c0c36ba865 --- /dev/null +++ b/tests/distributed_grids/checkpointing_03.cc @@ -0,0 +1,253 @@ +/* --------------------------------------------------------------------- + * + * Copyright (C) 2021 by the deal.II authors + * + * This file is part of the deal.II library. + * + * The deal.II library is free software; you can use it, redistribute + * it, and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * The full text of the license can be found in the file LICENSE.md at + * the top level directory of deal.II. + * + * --------------------------------------------------------------------- + */ + +// Test (de)serialization of Trilinos vectors with checkpointing files +// >4GB. The test here is of course simplified to run quickly. This is +// the hp (variable transfer) version of checkpointing_02.cc + +// set to true to run a test that generates a 5GB file: +const bool big = false; + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "../tests.h" + + +template +class LaplaceProblem +{ +public: + LaplaceProblem(); + + void + run(unsigned int n_cycles_global, unsigned int n_cycles_adaptive); + +private: + void + setup_system(); + void + refine_grid(); + void + output_results(const unsigned int cycle) const; + + MPI_Comm mpi_communicator; + + parallel::distributed::Triangulation triangulation; + + hp::FECollection fe; + DoFHandler dof_handler; + + IndexSet locally_owned_dofs; + IndexSet locally_relevant_dofs; +}; + +template +LaplaceProblem::LaplaceProblem() + : mpi_communicator(MPI_COMM_WORLD) + , triangulation( + mpi_communicator, + typename Triangulation::MeshSmoothing( + Triangulation::smoothing_on_refinement | + Triangulation::smoothing_on_coarsening), + parallel::distributed::Triangulation::construct_multigrid_hierarchy) + , dof_handler(triangulation) +{ + fe.push_back(FE_Q(2)); + fe.push_back(FE_Q(1)); +} + + +template +void +LaplaceProblem::setup_system() +{ + dof_handler.distribute_dofs(fe); + locally_owned_dofs = dof_handler.locally_owned_dofs(); + DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); +} + +template +void +LaplaceProblem::refine_grid() +{ + // refine into a corner + Vector estimated_error_per_cell(triangulation.n_active_cells()); + for (auto &cell : triangulation.active_cell_iterators()) + { + if (cell->is_locally_owned()) + estimated_error_per_cell(cell->active_cell_index()) = + cell->center().norm() * std::pow(cell->diameter(), 0.5); + } + + parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number( + triangulation, estimated_error_per_cell, 0.20, 0.0); + + triangulation.execute_coarsening_and_refinement(); +} + + + +template +void +LaplaceProblem::run(unsigned int n_cycles_global, + unsigned int n_cycles_adaptive) +{ + using VectorType = TrilinosWrappers::MPI::Vector; + + for (unsigned int cycle = 0; cycle < n_cycles_adaptive; ++cycle) + { + deallog << "Cycle " << 1 + cycle << " / " << n_cycles_adaptive << ':' + << std::endl; + + if (cycle == 0) + { + GridGenerator::subdivided_hyper_cube(triangulation, 10); + triangulation.refine_global(n_cycles_global); + } + else + refine_grid(); + + deallog << "n_global_active_cells: " + << triangulation.n_global_active_cells() + << " n_global_levels: " << triangulation.n_global_levels() + << " ghost_owners.size: " << triangulation.ghost_owners().size() + << " level_ghost_owners.size: " + << triangulation.level_ghost_owners().size() << std::endl; + + setup_system(); + + const unsigned int n_vectors = (big) ? 50 : 2; + { + deallog << "checkpointing..." << std::endl; + std::vector vectors(n_vectors); + VectorType x(locally_owned_dofs, mpi_communicator); + for (unsigned int i = 0; i < n_vectors; ++i) + { + vectors[i].reinit(locally_owned_dofs, + locally_relevant_dofs, + mpi_communicator); + vectors[i] = x; + x.add(1.0); + } + + std::vector x_system(n_vectors); + int i = 0; + for (auto &v : x_system) + { + v = &vectors[i]; + ++i; + } + + VectorType y(locally_owned_dofs, + locally_relevant_dofs, + mpi_communicator); + y = x; + + // To be sure, use two SolutionTransfer objects, because the second one + // will get a large offset + parallel::distributed::SolutionTransfer system_trans( + dof_handler); + parallel::distributed::SolutionTransfer trans2( + dof_handler); + + + dof_handler.prepare_for_serialization_of_active_fe_indices(); + system_trans.prepare_for_serialization(x_system); + trans2.prepare_for_serialization(y); + triangulation.save("restart.mesh"); + } + + { + deallog << "resume..." << std::endl; + std::vector vectors(n_vectors); + for (unsigned int i = 0; i < n_vectors; ++i) + vectors[i].reinit(locally_owned_dofs, mpi_communicator); + + std::vector x_system(n_vectors); + int i = 0; + for (auto &v : x_system) + { + v = &vectors[i]; + ++i; + } + + VectorType y(locally_owned_dofs, mpi_communicator); + + triangulation.coarsen_global(99); + triangulation.load("restart.mesh"); + + parallel::distributed::SolutionTransfer system_trans( + dof_handler); + parallel::distributed::SolutionTransfer trans2( + dof_handler); + + dof_handler.deserialize_active_fe_indices(); + system_trans.deserialize(x_system); + trans2.deserialize(y); + + for (unsigned int i = 0; i < n_vectors; ++i) + deallog << "vec " << i << ": " << vectors[i].linfty_norm() + << std::endl; + deallog << "vec y: " << y.linfty_norm() << std::endl; + } + + deallog << std::endl; + } +} + + +int +main(int argc, char *argv[]) +{ + unsigned int n_cycles_global = (big) ? 3 : 1; + unsigned int n_cycles_adaptive = 1; + + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll all; + + LaplaceProblem<3> laplace_problem; + laplace_problem.run(n_cycles_global, n_cycles_adaptive); +} diff --git a/tests/distributed_grids/checkpointing_03.with_trilinos=true.mpirun=2.output b/tests/distributed_grids/checkpointing_03.with_trilinos=true.mpirun=2.output new file mode 100644 index 0000000000..42abe5c2a7 --- /dev/null +++ b/tests/distributed_grids/checkpointing_03.with_trilinos=true.mpirun=2.output @@ -0,0 +1,19 @@ + +DEAL:0::Cycle 1 / 1: +DEAL:0::n_global_active_cells: 8000 n_global_levels: 2 ghost_owners.size: 1 level_ghost_owners.size: 1 +DEAL:0::checkpointing... +DEAL:0::resume... +DEAL:0::vec 0: 0.00000 +DEAL:0::vec 1: 1.00000 +DEAL:0::vec y: 2.00000 +DEAL:0:: + +DEAL:1::Cycle 1 / 1: +DEAL:1::n_global_active_cells: 8000 n_global_levels: 2 ghost_owners.size: 1 level_ghost_owners.size: 1 +DEAL:1::checkpointing... +DEAL:1::resume... +DEAL:1::vec 0: 0.00000 +DEAL:1::vec 1: 1.00000 +DEAL:1::vec y: 2.00000 +DEAL:1:: +