From ca558c99c8a2d38e49ecd7ce1318cfbc6556f133 Mon Sep 17 00:00:00 2001 From: Wolfgang Bangerth Date: Tue, 20 Nov 2012 17:55:04 +0000 Subject: [PATCH] New test. It turns out that there was no test previously that actually ran a solver. This is one. git-svn-id: https://svn.dealii.org/trunk@27608 0785d39b-7218-0410-832d-ea1e28bc413d --- tests/mpi/step-40.cc | 397 ++++++++++++++++++++++++++ tests/mpi/step-40/ncpu_10/cmp/generic | 28 ++ tests/mpi/step-40/ncpu_3/cmp/generic | 28 ++ tests/mpi/step-40/ncpu_4/cmp/generic | 28 ++ tests/mpi/step_40/ncpu_10/cmp/generic | 12 + tests/mpi/step_40/ncpu_3/cmp/generic | 5 + tests/mpi/step_40/ncpu_4/cmp/generic | 6 + 7 files changed, 504 insertions(+) create mode 100644 tests/mpi/step-40.cc create mode 100644 tests/mpi/step-40/ncpu_10/cmp/generic create mode 100644 tests/mpi/step-40/ncpu_3/cmp/generic create mode 100644 tests/mpi/step-40/ncpu_4/cmp/generic create mode 100644 tests/mpi/step_40/ncpu_10/cmp/generic create mode 100644 tests/mpi/step_40/ncpu_3/cmp/generic create mode 100644 tests/mpi/step_40/ncpu_4/cmp/generic diff --git a/tests/mpi/step-40.cc b/tests/mpi/step-40.cc new file mode 100644 index 0000000000..caef596fea --- /dev/null +++ b/tests/mpi/step-40.cc @@ -0,0 +1,397 @@ +//--------------------------------------------------------------------------- +// $Id$ +// Version: $Name$ +// +// Copyright (C) 2009, 2012 by the deal.II authors +// +// This file is subject to QPL and may not be distributed +// without copyright and license information. Please refer +// to the file deal.II/doc/license.html for the text and +// further information on this license. +// +//--------------------------------------------------------------------------- + +// A lightly adapted version of the step-40 tutorial program + +#include "../tests.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace Step40 +{ + using namespace dealii; + + + template + class LaplaceProblem + { + public: + LaplaceProblem (); + ~LaplaceProblem (); + + void run (); + + private: + void setup_system (); + void assemble_system (); + void solve (); + void refine_grid (); + + MPI_Comm mpi_communicator; + + parallel::distributed::Triangulation triangulation; + + DoFHandler dof_handler; + FE_Q fe; + + IndexSet locally_owned_dofs; + IndexSet locally_relevant_dofs; + + ConstraintMatrix constraints; + + PETScWrappers::MPI::SparseMatrix system_matrix; + PETScWrappers::MPI::Vector locally_relevant_solution; + PETScWrappers::MPI::Vector system_rhs; + + ConditionalOStream pcout; + }; + + + + + template + LaplaceProblem::LaplaceProblem () + : + mpi_communicator (MPI_COMM_WORLD), + triangulation (mpi_communicator, + typename Triangulation::MeshSmoothing + (Triangulation::smoothing_on_refinement | + Triangulation::smoothing_on_coarsening)), + dof_handler (triangulation), + fe (2), + pcout (Utilities::MPI::this_mpi_process(mpi_communicator) + == 0 + ? + deallog.get_file_stream() + : + std::cout, + (Utilities::MPI::this_mpi_process(mpi_communicator) + == 0)) + {} + + + + template + LaplaceProblem::~LaplaceProblem () + { + dof_handler.clear (); + } + + + + template + void LaplaceProblem::setup_system () + { + dof_handler.distribute_dofs (fe); + + locally_owned_dofs = dof_handler.locally_owned_dofs (); + DoFTools::extract_locally_relevant_dofs (dof_handler, + locally_relevant_dofs); + + locally_relevant_solution.reinit (mpi_communicator, + locally_owned_dofs, + locally_relevant_dofs); + locally_relevant_solution = 0; + system_rhs.reinit (mpi_communicator, + dof_handler.n_dofs(), + dof_handler.n_locally_owned_dofs()); + system_rhs = 0; + + constraints.clear (); + constraints.reinit (locally_relevant_dofs); + DoFTools::make_hanging_node_constraints (dof_handler, constraints); + VectorTools::interpolate_boundary_values (dof_handler, + 0, + ZeroFunction(), + constraints); + constraints.close (); + + CompressedSimpleSparsityPattern csp (dof_handler.n_dofs(), + dof_handler.n_dofs(), + locally_relevant_dofs); + DoFTools::make_sparsity_pattern (dof_handler, + csp, + constraints, false); + SparsityTools::distribute_sparsity_pattern (csp, + dof_handler.n_locally_owned_dofs_per_processor(), + mpi_communicator, + locally_relevant_dofs); + system_matrix.reinit (mpi_communicator, + csp, + dof_handler.n_locally_owned_dofs_per_processor(), + dof_handler.n_locally_owned_dofs_per_processor(), + Utilities::MPI::this_mpi_process(mpi_communicator)); + } + + + + + template + void LaplaceProblem::assemble_system () + { + const QGauss quadrature_formula(3); + + FEValues fe_values (fe, quadrature_formula, + update_values | update_gradients | + update_quadrature_points | + update_JxW_values); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); + Vector cell_rhs (dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (; cell!=endc; ++cell) + if (cell->is_locally_owned()) + { + cell_matrix = 0; + cell_rhs = 0; + + fe_values.reinit (cell); + + for (unsigned int q_point=0; q_point + 0.5+0.25*std::sin(4.0 * numbers::PI * + fe_values.quadrature_point(q_point)[0]) + ? 1 : -1); + + for (unsigned int i=0; iget_dof_indices (local_dof_indices); + constraints.distribute_local_to_global (cell_matrix, + cell_rhs, + local_dof_indices, + system_matrix, + system_rhs); + } + + system_matrix.compress (); + system_rhs.compress (); + } + + + + + template + void LaplaceProblem::solve () + { + PETScWrappers::MPI::Vector + completely_distributed_solution (mpi_communicator, + dof_handler.n_dofs(), + dof_handler.n_locally_owned_dofs()); + + SolverControl solver_control (dof_handler.n_dofs(), 1e-12); + + PETScWrappers::SolverCG solver(solver_control, mpi_communicator); + + PETScWrappers::PreconditionBoomerAMG + preconditioner(system_matrix, + PETScWrappers::PreconditionBoomerAMG::AdditionalData(true)); + + solver.solve (system_matrix, completely_distributed_solution, system_rhs, + preconditioner); + + pcout << " Solved in " << solver_control.last_step() + << " iterations." << std::endl; + + constraints.distribute (completely_distributed_solution); + + locally_relevant_solution = completely_distributed_solution; + locally_relevant_solution.update_ghost_values(); + } + + + + + template + void LaplaceProblem::refine_grid () + { + Vector estimated_error_per_cell (triangulation.n_active_cells()); + KellyErrorEstimator::estimate (dof_handler, + QGauss(3), + typename FunctionMap::type(), + locally_relevant_solution, + estimated_error_per_cell); + parallel::distributed::GridRefinement:: + refine_and_coarsen_fixed_number (triangulation, + estimated_error_per_cell, + 0.3, 0.03); + triangulation.execute_coarsening_and_refinement (); + } + + + + + template + void LaplaceProblem::run () + { + const unsigned int n_cycles = 3; + for (unsigned int cycle=0; cycle laplace_problem_2d; + laplace_problem_2d.run (); + } + } + catch (std::exception &exc) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Exception on processing: " << std::endl + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + + return 1; + } + catch (...) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Unknown exception!" << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + } + + return 0; +} + + + + +int main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi (argc, argv); + + if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) + { + std::ofstream logfile(output_file_for_mpi("step-40").c_str()); + deallog.attach(logfile); + deallog.depth_console(0); + deallog.threshold_double(1.e-10); + + deallog.push("mpi"); + test_mpi(); + deallog.pop(); + } + else + test_mpi(); +} diff --git a/tests/mpi/step-40/ncpu_10/cmp/generic b/tests/mpi/step-40/ncpu_10/cmp/generic new file mode 100644 index 0000000000..b5ba2f8c17 --- /dev/null +++ b/tests/mpi/step-40/ncpu_10/cmp/generic @@ -0,0 +1,28 @@ + +Cycle 0: + Number of active cells: 1024 + 104+100+104+100+104+104+100+104+100+104+ + Number of degrees of freedom: 4225 + 465+416+432+416+416+432+416+416+400+416+ +DEAL:mpi::Starting value 0.695309 +DEAL:mpi::Convergence step 10 value 0 + Solved in 10 iterations. + +Cycle 1: + Number of active cells: 1948 + 196+195+195+194+194+194+194+195+195+196+ + Number of degrees of freedom: 8375 + 885+845+845+849+812+840+843+817+819+820+ +DEAL:mpi::Starting value 0.832002 +DEAL:mpi::Convergence step 10 value 0 + Solved in 10 iterations. + +Cycle 2: + Number of active cells: 3664 + 366+366+367+366+367+366+367+367+366+366+ + Number of degrees of freedom: 16165 + 1665+1610+1622+1641+1594+1640+1642+1578+1611+1562+ +DEAL:mpi::Starting value 1.03315 +DEAL:mpi::Convergence step 11 value 0 + Solved in 11 iterations. + diff --git a/tests/mpi/step-40/ncpu_3/cmp/generic b/tests/mpi/step-40/ncpu_3/cmp/generic new file mode 100644 index 0000000000..d7d7c5ed90 --- /dev/null +++ b/tests/mpi/step-40/ncpu_3/cmp/generic @@ -0,0 +1,28 @@ + +Cycle 0: + Number of active cells: 1024 + 340+344+340+ + Number of degrees of freedom: 4225 + 1453+1412+1360+ +DEAL:mpi::Starting value 0.694820 +DEAL:mpi::Convergence step 10 value 0 + Solved in 10 iterations. + +Cycle 1: + Number of active cells: 1960 + 652+656+652+ + Number of degrees of freedom: 8421 + 2868+2803+2750+ +DEAL:mpi::Starting value 0.843064 +DEAL:mpi::Convergence step 10 value 0 + Solved in 10 iterations. + +Cycle 2: + Number of active cells: 3652 + 1217+1218+1217+ + Number of degrees of freedom: 16095 + 5425+5378+5292+ +DEAL:mpi::Starting value 1.03054 +DEAL:mpi::Convergence step 11 value 0 + Solved in 11 iterations. + diff --git a/tests/mpi/step-40/ncpu_4/cmp/generic b/tests/mpi/step-40/ncpu_4/cmp/generic new file mode 100644 index 0000000000..c210d8c297 --- /dev/null +++ b/tests/mpi/step-40/ncpu_4/cmp/generic @@ -0,0 +1,28 @@ + +Cycle 0: + Number of active cells: 1024 + 256+256+256+256+ + Number of degrees of freedom: 4225 + 1089+1056+1056+1024+ +DEAL:mpi::Starting value 0.694817 +DEAL:mpi::Convergence step 10 value 0 + Solved in 10 iterations. + +Cycle 1: + Number of active cells: 1960 + 490+490+490+490+ + Number of degrees of freedom: 8421 + 2152+2107+2105+2057+ +DEAL:mpi::Starting value 0.835969 +DEAL:mpi::Convergence step 10 value 0 + Solved in 10 iterations. + +Cycle 2: + Number of active cells: 3658 + 916+913+913+916+ + Number of degrees of freedom: 16109 + 4079+4025+4032+3973+ +DEAL:mpi::Starting value 1.04003 +DEAL:mpi::Convergence step 10 value 0 + Solved in 10 iterations. + diff --git a/tests/mpi/step_40/ncpu_10/cmp/generic b/tests/mpi/step_40/ncpu_10/cmp/generic new file mode 100644 index 0000000000..30b5ea49cd --- /dev/null +++ b/tests/mpi/step_40/ncpu_10/cmp/generic @@ -0,0 +1,12 @@ + +DEAL:mpi::Running on 10 CPU(s). +DEAL:mpi::got message '1' from CPU 2! +DEAL:mpi::got message '2' from CPU 3! +DEAL:mpi::got message '3' from CPU 4! +DEAL:mpi::got message '4' from CPU 5! +DEAL:mpi::got message '5' from CPU 6! +DEAL:mpi::got message '6' from CPU 7! +DEAL:mpi::got message '7' from CPU 8! +DEAL:mpi::got message '8' from CPU 9! +DEAL:mpi::got message '9' from CPU 10! +DEAL:mpi::done diff --git a/tests/mpi/step_40/ncpu_3/cmp/generic b/tests/mpi/step_40/ncpu_3/cmp/generic new file mode 100644 index 0000000000..588f4ae9eb --- /dev/null +++ b/tests/mpi/step_40/ncpu_3/cmp/generic @@ -0,0 +1,5 @@ + +DEAL:mpi::Running on 3 CPU(s). +DEAL:mpi::got message '1' from CPU 2! +DEAL:mpi::got message '2' from CPU 3! +DEAL:mpi::done diff --git a/tests/mpi/step_40/ncpu_4/cmp/generic b/tests/mpi/step_40/ncpu_4/cmp/generic new file mode 100644 index 0000000000..8dcacbe8b3 --- /dev/null +++ b/tests/mpi/step_40/ncpu_4/cmp/generic @@ -0,0 +1,6 @@ + +DEAL:mpi::Running on 4 CPU(s). +DEAL:mpi::got message '1' from CPU 2! +DEAL:mpi::got message '2' from CPU 3! +DEAL:mpi::got message '3' from CPU 4! +DEAL:mpi::done -- 2.39.5