From f9f8531ef0da53cdc538df0a9b464163d50c0c85 Mon Sep 17 00:00:00 2001 From: Wolfgang Bangerth Date: Sun, 11 Mar 2018 00:26:47 -0700 Subject: [PATCH] Add a test that works on only a subset of MPI processes. The test is intended to verify that we don't have stray references to MPI_COMM_WORLD. --- tests/mpi/step-40_cuthill_mckee_MPI-subset.cc | 476 ++++++++++++++++++ ...with_petsc_with_hypre=true.mpirun=7.output | 17 + 2 files changed, 493 insertions(+) create mode 100644 tests/mpi/step-40_cuthill_mckee_MPI-subset.cc create mode 100644 tests/mpi/step-40_cuthill_mckee_MPI-subset.with_petsc_with_hypre=true.mpirun=7.output diff --git a/tests/mpi/step-40_cuthill_mckee_MPI-subset.cc b/tests/mpi/step-40_cuthill_mckee_MPI-subset.cc new file mode 100644 index 0000000000..f7ecd106f9 --- /dev/null +++ b/tests/mpi/step-40_cuthill_mckee_MPI-subset.cc @@ -0,0 +1,476 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2009 - 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// This is a variation of the step-40_cuthill_mckee test, but instead +// of working on MPI_COMM_WORLD, it works on only a subset of +// processes. The idea of the test is to verify that we can do that, +// i.e., that we don't have any accidental references to +// MPI_COMM_WORLD that would lead to deadlocks if we were working on a +// different communicator. + +#include "../tests.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace Step40 +{ + using namespace dealii; + + + template + class LaplaceProblem + { + public: + LaplaceProblem (MPI_Comm comm); + ~LaplaceProblem (); + + void run (); + + private: + void setup_system (); + void assemble_system (); + void solve (); + void refine_grid (); + + MPI_Comm mpi_communicator; + + parallel::distributed::Triangulation triangulation; + + DoFHandler dof_handler; + FE_Q fe; + + IndexSet locally_owned_dofs; + IndexSet locally_relevant_dofs; + + ConstraintMatrix constraints; + + PETScWrappers::MPI::SparseMatrix system_matrix; + PETScWrappers::MPI::Vector locally_relevant_solution; + PETScWrappers::MPI::Vector system_rhs; + + ConditionalOStream pcout; + }; + + + + + template + LaplaceProblem::LaplaceProblem (MPI_Comm comm) + : + mpi_communicator (comm), + triangulation (mpi_communicator, + typename Triangulation::MeshSmoothing + (Triangulation::smoothing_on_refinement | + Triangulation::smoothing_on_coarsening)), + dof_handler (triangulation), + fe (2), + pcout (Utilities::MPI::this_mpi_process(mpi_communicator) + == 0 + ? + deallog.get_file_stream() + : + std::cout, + (Utilities::MPI::this_mpi_process(mpi_communicator) + == 0)) + {} + + + + template + LaplaceProblem::~LaplaceProblem () + { + dof_handler.clear (); + } + + + + template + void LaplaceProblem::setup_system () + { + dof_handler.distribute_dofs (fe); + locally_owned_dofs = dof_handler.locally_owned_dofs(); + DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); + + { + std::vector starting_indices; + + + const QGauss face_quadrature_formula(fe.degree + 1); + FEFaceValues fe_face_values(fe, face_quadrature_formula, + update_values | update_quadrature_points | update_normal_vectors | + update_JxW_values); + + Tensor<1, dim> u; + Point down {0,-1}; + std::vector< types::global_dof_index >dof_indices (fe.n_dofs_per_face(), 0); + + + for (const auto &cell: dof_handler.active_cell_iterators()) + { + if (cell->is_locally_owned()) + { + + for (unsigned int face = 0; face < GeometryInfo::faces_per_cell; ++face) + { + if ((cell->face(face)->at_boundary()) + || (cell->neighbor(face)->active() && cell->neighbor(face)->is_ghost())) + { + fe_face_values.reinit(cell, face); + //for Q_2 this is in middle of face, dim=2 or what quadrature point to give? + u=fe_face_values.normal_vector(1); + if (u*down< 0) + { + cell->face(face)->get_dof_indices(dof_indices); + starting_indices.insert(std::end(starting_indices), + std::begin(dof_indices), std::end(dof_indices)); + } + } + } + } + } + + // remove duplicates by creating a set + std::set no_duplicates_please (starting_indices.begin(), + starting_indices.end()); + // back to vector for the DoFRenumbering function + starting_indices.clear(); + starting_indices.assign(no_duplicates_please.begin(), no_duplicates_please.end()); + + DoFRenumbering::Cuthill_McKee(dof_handler,false, true, starting_indices); + + locally_owned_dofs = dof_handler.locally_owned_dofs(); + DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); + } + + locally_relevant_solution.reinit (locally_owned_dofs, + locally_relevant_dofs, + mpi_communicator); + system_rhs.reinit (locally_owned_dofs, mpi_communicator); + system_rhs = PetscScalar(); + + constraints.clear (); + constraints.reinit (locally_relevant_dofs); + DoFTools::make_hanging_node_constraints (dof_handler, constraints); + VectorTools::interpolate_boundary_values (dof_handler, + 0, + Functions::ZeroFunction(), + constraints); + constraints.close (); + + DynamicSparsityPattern csp (dof_handler.n_dofs(), + dof_handler.n_dofs(), + locally_relevant_dofs); + DoFTools::make_sparsity_pattern (dof_handler, + csp, + constraints, false); + SparsityTools::distribute_sparsity_pattern (csp, + dof_handler.n_locally_owned_dofs_per_processor(), + mpi_communicator, + locally_relevant_dofs); + system_matrix.reinit (mpi_communicator, + csp, + dof_handler.n_locally_owned_dofs_per_processor(), + dof_handler.n_locally_owned_dofs_per_processor(), + Utilities::MPI::this_mpi_process(mpi_communicator)); + } + + + + + template + void LaplaceProblem::assemble_system () + { + const QGauss quadrature_formula(3); + + FEValues fe_values (fe, quadrature_formula, + update_values | update_gradients | + update_quadrature_points | + update_JxW_values); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); + Vector cell_rhs (dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (; cell!=endc; ++cell) + if (cell->is_locally_owned()) + { + cell_matrix = PetscScalar(); + cell_rhs = PetscScalar(); + + fe_values.reinit (cell); + + for (unsigned int q_point=0; q_point + 0.5+0.25*std::sin(4.0 * numbers::PI * + fe_values.quadrature_point(q_point)[0]) + ? 1 : -1); + + for (unsigned int i=0; iget_dof_indices (local_dof_indices); + constraints.distribute_local_to_global (cell_matrix, + cell_rhs, + local_dof_indices, + system_matrix, + system_rhs); + } + + system_matrix.compress (VectorOperation::add); + system_rhs.compress (VectorOperation::add); + } + + + + + template + void LaplaceProblem::solve () + { + PETScWrappers::MPI::Vector + completely_distributed_solution (mpi_communicator, + dof_handler.n_dofs(), + dof_handler.n_locally_owned_dofs()); + + SolverControl solver_control (dof_handler.n_dofs(), 1e-12); + + PETScWrappers::SolverCG solver(solver_control, mpi_communicator); + +#ifndef PETSC_USE_COMPLEX + PETScWrappers::PreconditionBoomerAMG + preconditioner(system_matrix, + PETScWrappers::PreconditionBoomerAMG::AdditionalData(true)); + + check_solver_within_range( + solver.solve (system_matrix, completely_distributed_solution, system_rhs, + preconditioner), + solver_control.last_step(), 11, 11); +#else + check_solver_within_range( + solver.solve (system_matrix, completely_distributed_solution, system_rhs, + PETScWrappers::PreconditionJacobi(system_matrix)), + solver_control.last_step(), 120, 260); +#endif + + pcout << " Solved in " << solver_control.last_step() + << " iterations." << std::endl; + + constraints.distribute (completely_distributed_solution); + + locally_relevant_solution = completely_distributed_solution; + } + + + + + template + void LaplaceProblem::refine_grid () + { + triangulation.refine_global (1); + } + + + + + template + void LaplaceProblem::run () + { + const unsigned int n_cycles = 2; + for (unsigned int cycle=0; cycle laplace_problem_2d (comm); + laplace_problem_2d.run (); + } + } + catch (std::exception &exc) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Exception on processing: " << std::endl + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + + return 1; + } + catch (...) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Unknown exception!" << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + } + + return 0; +} + + + + +int main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1); + + // create a group of 4 out of the 7 processes that we want to run + // this program with + const unsigned int n_procs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD); + Assert (n_procs == 7, ExcInternalError()); + + MPI_Group whole_group; + MPI_Comm_group (MPI_COMM_WORLD, &whole_group); + + MPI_Group subset_group; + const int subset_ranks[] = { 6, 0, 2, 3 }; + MPI_Group_incl (whole_group, 4, subset_ranks, &subset_group); + + MPI_Comm subset_comm; + MPI_Comm_create (MPI_COMM_WORLD, subset_group, &subset_comm); + + // now only run the program on the subset of processors identified + // in 'comm'. all of the other processes simply do nothing and will + // wait for termination in the destructor of MPI_InitFinalize until + // the worker processes are ready to join them + if (std::find (std::begin(subset_ranks), + std::end(subset_ranks), + Utilities::MPI::this_mpi_process (MPI_COMM_WORLD)) + != std::end(subset_ranks)) + { + if (Utilities::MPI::this_mpi_process (subset_comm) == 0) + { + initlog(); + + // check that creation above worked correctly + Assert (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 6, + ExcInternalError()); + Assert (Utilities::MPI::n_mpi_processes (subset_comm) == 4, + ExcInternalError()); + + deallog.push("mpi"); + test_mpi(subset_comm); + deallog.pop(); + } + else + test_mpi(subset_comm); + } +} diff --git a/tests/mpi/step-40_cuthill_mckee_MPI-subset.with_petsc_with_hypre=true.mpirun=7.output b/tests/mpi/step-40_cuthill_mckee_MPI-subset.with_petsc_with_hypre=true.mpirun=7.output new file mode 100644 index 0000000000..3a2fb97a8c --- /dev/null +++ b/tests/mpi/step-40_cuthill_mckee_MPI-subset.with_petsc_with_hypre=true.mpirun=7.output @@ -0,0 +1,17 @@ + +Cycle 0: + Number of active cells: 1024 + 256+256+256+256+ + Number of degrees of freedom: 4225 + 1089+1056+1056+1024+ +DEAL:mpi::Solver stopped after 10 iterations + Solved in 10 iterations. + +Cycle 1: + Number of active cells: 4096 + 1024+1024+1024+1024+ + Number of degrees of freedom: 16641 + 4225+4160+4160+4096+ +DEAL:mpi::Solver stopped within 11 - 11 iterations + Solved in 11 iterations. + -- 2.39.5