From: Wolfgang Bangerth Date: Wed, 5 Sep 2018 20:31:27 +0000 (-0600) Subject: Add a test for parallel hp handling. X-Git-Tag: v9.1.0-rc1~740^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F7150%2Fhead;p=dealii.git Add a test for parallel hp handling. --- diff --git a/tests/mpi/hp_step-40_variable_01.cc b/tests/mpi/hp_step-40_variable_01.cc new file mode 100644 index 0000000000..68909e613d --- /dev/null +++ b/tests/mpi/hp_step-40_variable_01.cc @@ -0,0 +1,425 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2009 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// A lightly adapted version of the step-40 tutorial program. compared +// to the plain mpi/step-40 test, this test uses hp::DoFHandler, and +// compared to the hp_step-40 test, it uses different finite element +// objects (though all of the same kind) + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "../tests.h" + +namespace Step40 +{ + using namespace dealii; + + + template + class LaplaceProblem + { + public: + LaplaceProblem(); + ~LaplaceProblem(); + + void + run(); + + private: + void + setup_system(); + void + assemble_system(); + void + solve(); + void + refine_grid(); + + MPI_Comm mpi_communicator; + + parallel::distributed::Triangulation triangulation; + + hp::DoFHandler dof_handler; + hp::FECollection fe; + + IndexSet locally_owned_dofs; + IndexSet locally_relevant_dofs; + + AffineConstraints constraints; + + PETScWrappers::MPI::SparseMatrix system_matrix; + PETScWrappers::MPI::Vector locally_relevant_solution; + PETScWrappers::MPI::Vector system_rhs; + + ConditionalOStream pcout; + }; + + + + template + LaplaceProblem::LaplaceProblem() + : mpi_communicator(MPI_COMM_WORLD) + , triangulation(mpi_communicator, + typename Triangulation::MeshSmoothing( + Triangulation::smoothing_on_refinement | + Triangulation::smoothing_on_coarsening)) + , dof_handler(triangulation) + , pcout(Utilities::MPI::this_mpi_process(mpi_communicator) == 0 ? + deallog.get_file_stream() : + std::cout, + (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)) + { + fe.push_back(FE_Q(2)); + fe.push_back(FE_Q(2)); + fe.push_back(FE_Q(2)); + } + + + + template + LaplaceProblem::~LaplaceProblem() + { + dof_handler.clear(); + } + + + + template + void + LaplaceProblem::setup_system() + { + // set active_fe_index mostly randomly + for (const auto &cell : dof_handler.active_cell_iterators()) + if (cell->is_locally_owned()) + cell->set_active_fe_index(cell->active_cell_index() % fe.size()); + + dof_handler.distribute_dofs(fe); + + locally_owned_dofs = dof_handler.locally_owned_dofs(); + DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); + + locally_relevant_solution.reinit(locally_owned_dofs, + locally_relevant_dofs, + mpi_communicator); + system_rhs.reinit(locally_owned_dofs, mpi_communicator); + system_rhs = PetscScalar(); + + constraints.clear(); + constraints.reinit(locally_relevant_dofs); + DoFTools::make_hanging_node_constraints(dof_handler, constraints); + VectorTools::interpolate_boundary_values(dof_handler, + 0, + Functions::ZeroFunction(), + constraints); + constraints.close(); + + DynamicSparsityPattern csp(dof_handler.n_dofs(), + dof_handler.n_dofs(), + locally_relevant_dofs); + DoFTools::make_sparsity_pattern(dof_handler, csp, constraints, false); + SparsityTools::distribute_sparsity_pattern( + csp, + dof_handler.n_locally_owned_dofs_per_processor(), + mpi_communicator, + locally_relevant_dofs); + system_matrix.reinit(mpi_communicator, + csp, + dof_handler.n_locally_owned_dofs_per_processor(), + dof_handler.n_locally_owned_dofs_per_processor(), + Utilities::MPI::this_mpi_process(mpi_communicator)); + } + + + + template + void + LaplaceProblem::assemble_system() + { + const QGauss quadrature_formula(3); + hp::QCollection q_collection; + q_collection.push_back(quadrature_formula); + + hp::FEValues x_fe_values(fe, + q_collection, + update_values | update_gradients | + update_quadrature_points | + update_JxW_values); + + FullMatrix cell_matrix; + Vector cell_rhs; + + std::vector local_dof_indices; + + typename hp::DoFHandler::active_cell_iterator cell = dof_handler + .begin_active(), + endc = dof_handler.end(); + for (; cell != endc; ++cell) + if (cell->is_locally_owned()) + { + x_fe_values.reinit(cell); + const FEValues &fe_values = x_fe_values.get_present_fe_values(); + + const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell; + const unsigned int n_q_points = fe_values.get_quadrature().size(); + + cell_matrix.reinit(dofs_per_cell, dofs_per_cell); + cell_rhs.reinit(dofs_per_cell); + + local_dof_indices.resize(dofs_per_cell); + + cell_matrix = PetscScalar(); + cell_rhs = PetscScalar(); + + for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) + { + const double rhs_value = + (fe_values.quadrature_point(q_point)[1] > + 0.5 + + 0.25 * std::sin(4.0 * numbers::PI * + fe_values.quadrature_point(q_point)[0]) ? + 1 : + -1); + + for (unsigned int i = 0; i < dofs_per_cell; ++i) + { + for (unsigned int j = 0; j < dofs_per_cell; ++j) + cell_matrix(i, j) += (fe_values.shape_grad(i, q_point) * + fe_values.shape_grad(j, q_point) * + fe_values.JxW(q_point)); + + cell_rhs(i) += + (rhs_value * fe_values.shape_value(i, q_point) * + fe_values.JxW(q_point)); + } + } + + cell->get_dof_indices(local_dof_indices); + constraints.distribute_local_to_global(cell_matrix, + cell_rhs, + local_dof_indices, + system_matrix, + system_rhs); + } + + system_matrix.compress(VectorOperation::add); + system_rhs.compress(VectorOperation::add); + } + + + + template + void + LaplaceProblem::solve() + { + PETScWrappers::MPI::Vector completely_distributed_solution( + mpi_communicator, + dof_handler.n_dofs(), + dof_handler.n_locally_owned_dofs()); + + SolverControl solver_control(dof_handler.n_dofs(), 1e-12); + + PETScWrappers::SolverCG solver(solver_control, mpi_communicator); + +#ifndef PETSC_USE_COMPLEX + PETScWrappers::PreconditionBoomerAMG preconditioner( + system_matrix, + PETScWrappers::PreconditionBoomerAMG::AdditionalData(true)); + + check_solver_within_range(solver.solve(system_matrix, + completely_distributed_solution, + system_rhs, + preconditioner), + solver_control.last_step(), + 10, + 10); +#else + check_solver_within_range(solver.solve(system_matrix, + completely_distributed_solution, + system_rhs, + PETScWrappers::PreconditionJacobi( + system_matrix)), + solver_control.last_step(), + 120, + 260); +#endif + + pcout << " Solved in " << solver_control.last_step() << " iterations." + << std::endl; + + constraints.distribute(completely_distributed_solution); + + locally_relevant_solution = completely_distributed_solution; + } + + + + template + void + LaplaceProblem::refine_grid() + { + triangulation.refine_global(1); + } + + + + template + void + LaplaceProblem::run() + { + const unsigned int n_cycles = 2; + for (unsigned int cycle = 0; cycle < n_cycles; ++cycle) + { + pcout << "Cycle " << cycle << ':' << std::endl; + + if (cycle == 0) + { + GridGenerator::hyper_cube(triangulation); + triangulation.refine_global(5); + } + else + refine_grid(); + + setup_system(); + + pcout << " Number of active cells: " + << triangulation.n_global_active_cells() << std::endl + << " "; + for (unsigned int i = 0; + i < Utilities::MPI::n_mpi_processes(mpi_communicator); + ++i) + pcout << triangulation.n_locally_owned_active_cells_per_processor()[i] + << '+'; + pcout << std::endl; + + pcout << " Number of degrees of freedom: " << dof_handler.n_dofs() + << std::endl + << " "; + for (unsigned int i = 0; + i < Utilities::MPI::n_mpi_processes(mpi_communicator); + ++i) + pcout << dof_handler.n_locally_owned_dofs_per_processor()[i] << '+'; + pcout << std::endl; + + assemble_system(); + solve(); + + pcout << std::endl; + } + } +} // namespace Step40 + + +int +test_mpi() +{ + try + { + using namespace dealii; + using namespace Step40; + + + { + LaplaceProblem<2> laplace_problem_2d; + laplace_problem_2d.run(); + } + } + catch (std::exception &exc) + { + std::cerr << std::endl + << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Exception on processing: " << std::endl + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + + return 1; + } + catch (...) + { + std::cerr << std::endl + << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Unknown exception!" << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + } + + return 0; +} + + + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) + { + initlog(); + + deallog.push("mpi"); + test_mpi(); + deallog.pop(); + } + else + test_mpi(); +} diff --git a/tests/mpi/hp_step-40_variable_01.with_petsc=true.mpirun=1.output b/tests/mpi/hp_step-40_variable_01.with_petsc=true.mpirun=1.output new file mode 100644 index 0000000000..3c45af8ce2 --- /dev/null +++ b/tests/mpi/hp_step-40_variable_01.with_petsc=true.mpirun=1.output @@ -0,0 +1,17 @@ + +Cycle 0: + Number of active cells: 1024 + 1024+ + Number of degrees of freedom: 4225 + 4225+ +DEAL:mpi::Solver stopped within 10 - 10 iterations + Solved in 10 iterations. + +Cycle 1: + Number of active cells: 4096 + 4096+ + Number of degrees of freedom: 16641 + 16641+ +DEAL:mpi::Solver stopped within 10 - 10 iterations + Solved in 10 iterations. + diff --git a/tests/mpi/hp_step-40_variable_01.with_petsc=true.mpirun=10.output b/tests/mpi/hp_step-40_variable_01.with_petsc=true.mpirun=10.output new file mode 100644 index 0000000000..d0d35dd90c --- /dev/null +++ b/tests/mpi/hp_step-40_variable_01.with_petsc=true.mpirun=10.output @@ -0,0 +1,17 @@ + +Cycle 0: + Number of active cells: 1024 + 104+100+104+100+104+104+100+104+100+104+ + Number of degrees of freedom: 4225 + 457+412+433+416+417+429+417+418+404+422+ +DEAL:mpi::Solver stopped within 10 - 10 iterations + Solved in 10 iterations. + +Cycle 1: + Number of active cells: 4096 + 408+412+408+412+408+408+412+408+412+408+ + Number of degrees of freedom: 16649 + 1721+1669+1670+1677+1626+1662+1676+1640+1658+1650+ +DEAL:mpi::Solver stopped within 10 - 10 iterations + Solved in 10 iterations. + diff --git a/tests/mpi/hp_step-40_variable_01.with_petsc=true.mpirun=3.output b/tests/mpi/hp_step-40_variable_01.with_petsc=true.mpirun=3.output new file mode 100644 index 0000000000..3da8cd787b --- /dev/null +++ b/tests/mpi/hp_step-40_variable_01.with_petsc=true.mpirun=3.output @@ -0,0 +1,17 @@ + +Cycle 0: + Number of active cells: 1024 + 340+344+340+ + Number of degrees of freedom: 4225 + 1446+1410+1369+ +DEAL:mpi::Solver stopped within 10 - 10 iterations + Solved in 10 iterations. + +Cycle 1: + Number of active cells: 4096 + 1364+1368+1364+ + Number of degrees of freedom: 16641 + 5622+5538+5481+ +DEAL:mpi::Solver stopped within 10 - 10 iterations + Solved in 10 iterations. + diff --git a/tests/mpi/hp_step-40_variable_01.with_petsc=true.mpirun=4.output b/tests/mpi/hp_step-40_variable_01.with_petsc=true.mpirun=4.output new file mode 100644 index 0000000000..b59d1f66a0 --- /dev/null +++ b/tests/mpi/hp_step-40_variable_01.with_petsc=true.mpirun=4.output @@ -0,0 +1,17 @@ + +Cycle 0: + Number of active cells: 1024 + 256+256+256+256+ + Number of degrees of freedom: 4225 + 1077+1056+1058+1034+ +DEAL:mpi::Solver stopped within 10 - 10 iterations + Solved in 10 iterations. + +Cycle 1: + Number of active cells: 4096 + 1024+1024+1024+1024+ + Number of degrees of freedom: 16641 + 4201+4163+4155+4122+ +DEAL:mpi::Solver stopped after 11 iterations + Solved in 11 iterations. +