From: Wolfgang Bangerth Date: Wed, 13 Sep 2017 21:13:24 +0000 (-0600) Subject: Add an hp-ified version of tests/mpi/step-40. X-Git-Tag: v9.0.0-rc1~1051^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F5101%2Fhead;p=dealii.git Add an hp-ified version of tests/mpi/step-40. The output is identical to the original, non-hp version. --- diff --git a/tests/mpi/hp_step-40.cc b/tests/mpi/hp_step-40.cc new file mode 100644 index 0000000000..03a293e2cf --- /dev/null +++ b/tests/mpi/hp_step-40.cc @@ -0,0 +1,404 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2009 - 2016 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// A lightly adapted version of the step-40 tutorial program. compared +// to the plain mpi/step-40 test, this test uses hp::DoFHandler, but +// it does not actually use different active_fe_indices on different +// cells + +#include "../tests.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace Step40 +{ + using namespace dealii; + + + template + class LaplaceProblem + { + public: + LaplaceProblem (); + ~LaplaceProblem (); + + void run (); + + private: + void setup_system (); + void assemble_system (); + void solve (); + void refine_grid (); + + MPI_Comm mpi_communicator; + + parallel::distributed::Triangulation triangulation; + + hp::DoFHandler dof_handler; + hp::FECollection fe; + + IndexSet locally_owned_dofs; + IndexSet locally_relevant_dofs; + + ConstraintMatrix constraints; + + PETScWrappers::MPI::SparseMatrix system_matrix; + PETScWrappers::MPI::Vector locally_relevant_solution; + PETScWrappers::MPI::Vector system_rhs; + + ConditionalOStream pcout; + }; + + + + + template + LaplaceProblem::LaplaceProblem () + : + mpi_communicator (MPI_COMM_WORLD), + triangulation (mpi_communicator, + typename Triangulation::MeshSmoothing + (Triangulation::smoothing_on_refinement | + Triangulation::smoothing_on_coarsening)), + dof_handler (triangulation), + fe (FE_Q(2)), + pcout (Utilities::MPI::this_mpi_process(mpi_communicator) + == 0 + ? + deallog.get_file_stream() + : + std::cout, + (Utilities::MPI::this_mpi_process(mpi_communicator) + == 0)) + {} + + + + template + LaplaceProblem::~LaplaceProblem () + { + dof_handler.clear (); + } + + + + template + void LaplaceProblem::setup_system () + { + dof_handler.distribute_dofs (fe); + + locally_owned_dofs = dof_handler.locally_owned_dofs (); + DoFTools::extract_locally_relevant_dofs (dof_handler, + locally_relevant_dofs); + + locally_relevant_solution.reinit (locally_owned_dofs, + locally_relevant_dofs, + mpi_communicator); + system_rhs.reinit (locally_owned_dofs, mpi_communicator); + system_rhs = PetscScalar(); + + constraints.clear (); + constraints.reinit (locally_relevant_dofs); + DoFTools::make_hanging_node_constraints (dof_handler, constraints); + VectorTools::interpolate_boundary_values (dof_handler, + 0, + Functions::ZeroFunction(), + constraints); + constraints.close (); + + DynamicSparsityPattern csp (dof_handler.n_dofs(), + dof_handler.n_dofs(), + locally_relevant_dofs); + DoFTools::make_sparsity_pattern (dof_handler, + csp, + constraints, false); + SparsityTools::distribute_sparsity_pattern (csp, + dof_handler.n_locally_owned_dofs_per_processor(), + mpi_communicator, + locally_relevant_dofs); + system_matrix.reinit (mpi_communicator, + csp, + dof_handler.n_locally_owned_dofs_per_processor(), + dof_handler.n_locally_owned_dofs_per_processor(), + Utilities::MPI::this_mpi_process(mpi_communicator)); + } + + + + + template + void LaplaceProblem::assemble_system () + { + const QGauss quadrature_formula(3); + hp::QCollection q_collection; + q_collection.push_back (quadrature_formula); + + hp::FEValues x_fe_values (fe, q_collection, + update_values | update_gradients | + update_quadrature_points | + update_JxW_values); + + FullMatrix cell_matrix; + Vector cell_rhs; + + std::vector local_dof_indices; + + typename hp::DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (; cell!=endc; ++cell) + if (cell->is_locally_owned()) + { + x_fe_values.reinit (cell); + const FEValues &fe_values + = x_fe_values.get_present_fe_values (); + + const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell; + const unsigned int n_q_points = fe_values.get_quadrature().size(); + + cell_matrix.reinit (dofs_per_cell, dofs_per_cell); + cell_rhs.reinit (dofs_per_cell); + + local_dof_indices.resize (dofs_per_cell); + + cell_matrix = PetscScalar(); + cell_rhs = PetscScalar(); + + for (unsigned int q_point=0; q_point + 0.5+0.25*std::sin(4.0 * numbers::PI * + fe_values.quadrature_point(q_point)[0]) + ? 1 : -1); + + for (unsigned int i=0; iget_dof_indices (local_dof_indices); + constraints.distribute_local_to_global (cell_matrix, + cell_rhs, + local_dof_indices, + system_matrix, + system_rhs); + } + + system_matrix.compress (VectorOperation::add); + system_rhs.compress (VectorOperation::add); + } + + + + + template + void LaplaceProblem::solve () + { + PETScWrappers::MPI::Vector + completely_distributed_solution (mpi_communicator, + dof_handler.n_dofs(), + dof_handler.n_locally_owned_dofs()); + + SolverControl solver_control (dof_handler.n_dofs(), 1e-12); + + PETScWrappers::SolverCG solver(solver_control, mpi_communicator); + +#ifndef PETSC_USE_COMPLEX + PETScWrappers::PreconditionBoomerAMG + preconditioner(system_matrix, + PETScWrappers::PreconditionBoomerAMG::AdditionalData(true)); + + check_solver_within_range( + solver.solve (system_matrix, completely_distributed_solution, system_rhs, + preconditioner), + solver_control.last_step(), 10, 10); +#else + check_solver_within_range( + solver.solve (system_matrix, completely_distributed_solution, system_rhs, + PETScWrappers::PreconditionJacobi(system_matrix)), + solver_control.last_step(), 120, 260); +#endif + + pcout << " Solved in " << solver_control.last_step() + << " iterations." << std::endl; + + constraints.distribute (completely_distributed_solution); + + locally_relevant_solution = completely_distributed_solution; + } + + + + + template + void LaplaceProblem::refine_grid () + { + triangulation.refine_global (1); + } + + + + + template + void LaplaceProblem::run () + { + const unsigned int n_cycles = 2; + for (unsigned int cycle=0; cycle laplace_problem_2d; + laplace_problem_2d.run (); + } + } + catch (std::exception &exc) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Exception on processing: " << std::endl + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + + return 1; + } + catch (...) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Unknown exception!" << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + } + + return 0; +} + + + + +int main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1); + + if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) + { + initlog(); + + deallog.push("mpi"); + test_mpi(); + deallog.pop(); + } + else + test_mpi(); +} diff --git a/tests/mpi/hp_step-40.with_petsc=true.mpirun=10.output b/tests/mpi/hp_step-40.with_petsc=true.mpirun=10.output new file mode 100644 index 0000000000..908c37c096 --- /dev/null +++ b/tests/mpi/hp_step-40.with_petsc=true.mpirun=10.output @@ -0,0 +1,17 @@ + +Cycle 0: + Number of active cells: 1024 + 104+100+104+100+104+104+100+104+100+104+ + Number of degrees of freedom: 4225 + 465+416+432+416+416+432+416+416+400+416+ +DEAL:mpi::Solver stopped within 10 - 10 iterations + Solved in 10 iterations. + +Cycle 1: + Number of active cells: 4096 + 408+412+408+412+408+408+412+408+412+408+ + Number of degrees of freedom: 16641 + 1729+1680+1664+1680+1632+1664+1680+1632+1648+1632+ +DEAL:mpi::Solver stopped within 10 - 10 iterations + Solved in 10 iterations. + diff --git a/tests/mpi/hp_step-40.with_petsc=true.mpirun=10.output.2 b/tests/mpi/hp_step-40.with_petsc=true.mpirun=10.output.2 new file mode 100644 index 0000000000..c71dce0a8a --- /dev/null +++ b/tests/mpi/hp_step-40.with_petsc=true.mpirun=10.output.2 @@ -0,0 +1,17 @@ + +Cycle 0: + Number of active cells: 1024 + 104+100+104+100+104+104+100+104+100+104+ + Number of degrees of freedom: 4225 + 465+416+432+416+416+432+416+416+400+416+ +DEAL:mpi::Solver stopped within 120 - 260 iterations + Solved in 126 iterations. + +Cycle 1: + Number of active cells: 4096 + 408+412+408+412+408+408+412+408+412+408+ + Number of degrees of freedom: 16641 + 1729+1680+1664+1680+1632+1664+1680+1632+1648+1632+ +DEAL:mpi::Solver stopped within 120 - 260 iterations + Solved in 251 iterations. + diff --git a/tests/mpi/hp_step-40.with_petsc=true.mpirun=3.output b/tests/mpi/hp_step-40.with_petsc=true.mpirun=3.output new file mode 100644 index 0000000000..bdd3598997 --- /dev/null +++ b/tests/mpi/hp_step-40.with_petsc=true.mpirun=3.output @@ -0,0 +1,17 @@ + +Cycle 0: + Number of active cells: 1024 + 340+344+340+ + Number of degrees of freedom: 4225 + 1453+1412+1360+ +DEAL:mpi::Solver stopped within 10 - 10 iterations + Solved in 10 iterations. + +Cycle 1: + Number of active cells: 4096 + 1364+1368+1364+ + Number of degrees of freedom: 16641 + 5645+5540+5456+ +DEAL:mpi::Solver stopped within 10 - 10 iterations + Solved in 10 iterations. + diff --git a/tests/mpi/hp_step-40.with_petsc=true.mpirun=3.output.2 b/tests/mpi/hp_step-40.with_petsc=true.mpirun=3.output.2 new file mode 100644 index 0000000000..ea0c6a3ad8 --- /dev/null +++ b/tests/mpi/hp_step-40.with_petsc=true.mpirun=3.output.2 @@ -0,0 +1,17 @@ + +Cycle 0: + Number of active cells: 1024 + 340+344+340+ + Number of degrees of freedom: 4225 + 1453+1412+1360+ +DEAL:mpi::Solver stopped within 120 - 260 iterations + Solved in 126 iterations. + +Cycle 1: + Number of active cells: 4096 + 1364+1368+1364+ + Number of degrees of freedom: 16641 + 5645+5540+5456+ +DEAL:mpi::Solver stopped within 120 - 260 iterations + Solved in 251 iterations. + diff --git a/tests/mpi/hp_step-40.with_petsc=true.mpirun=4.output b/tests/mpi/hp_step-40.with_petsc=true.mpirun=4.output new file mode 100644 index 0000000000..776ec99ef4 --- /dev/null +++ b/tests/mpi/hp_step-40.with_petsc=true.mpirun=4.output @@ -0,0 +1,17 @@ + +Cycle 0: + Number of active cells: 1024 + 256+256+256+256+ + Number of degrees of freedom: 4225 + 1089+1056+1056+1024+ +DEAL:mpi::Solver stopped within 10 - 10 iterations + Solved in 10 iterations. + +Cycle 1: + Number of active cells: 4096 + 1024+1024+1024+1024+ + Number of degrees of freedom: 16641 + 4225+4160+4160+4096+ +DEAL:mpi::Solver stopped within 10 - 10 iterations + Solved in 10 iterations. + diff --git a/tests/mpi/hp_step-40.with_petsc=true.mpirun=4.output.2 b/tests/mpi/hp_step-40.with_petsc=true.mpirun=4.output.2 new file mode 100644 index 0000000000..41f14322bf --- /dev/null +++ b/tests/mpi/hp_step-40.with_petsc=true.mpirun=4.output.2 @@ -0,0 +1,17 @@ + +Cycle 0: + Number of active cells: 1024 + 256+256+256+256+ + Number of degrees of freedom: 4225 + 1089+1056+1056+1024+ +DEAL:mpi::Solver stopped within 120 - 260 iterations + Solved in 126 iterations. + +Cycle 1: + Number of active cells: 4096 + 1024+1024+1024+1024+ + Number of degrees of freedom: 16641 + 4225+4160+4160+4096+ +DEAL:mpi::Solver stopped within 120 - 260 iterations + Solved in 251 iterations. +