From 1cd8ee98aad8cc314fecc467a1afcbdfb76fac92 Mon Sep 17 00:00:00 2001 From: Stefano Zampini Date: Tue, 29 Nov 2022 00:11:29 +0300 Subject: [PATCH] PetscWrappers: remove limitation on multithreading the test passes for me --- source/lac/petsc_vector_base.cc | 15 +- tests/petsc/assemble_matrix_parallel_01.cc | 493 ++++++++++++++++++ .../petsc/assemble_matrix_parallel_01.output | 13 + 3 files changed, 507 insertions(+), 14 deletions(-) create mode 100644 tests/petsc/assemble_matrix_parallel_01.cc create mode 100644 tests/petsc/assemble_matrix_parallel_01.output diff --git a/source/lac/petsc_vector_base.cc b/source/lac/petsc_vector_base.cc index 309fb0e361..5852be90d9 100644 --- a/source/lac/petsc_vector_base.cc +++ b/source/lac/petsc_vector_base.cc @@ -18,7 +18,6 @@ #ifdef DEAL_II_WITH_PETSC # include -# include # include # include @@ -125,11 +124,7 @@ namespace PETScWrappers : vector(nullptr) , ghosted(false) , last_action(::dealii::VectorOperation::unknown) - { - Assert(MultithreadInfo::is_running_single_threaded(), - ExcMessage("PETSc does not support multi-threaded access, set " - "the thread limit to 1 in MPI_InitFinalize().")); - } + {} @@ -139,10 +134,6 @@ namespace PETScWrappers , ghost_indices(v.ghost_indices) , last_action(::dealii::VectorOperation::unknown) { - Assert(MultithreadInfo::is_running_single_threaded(), - ExcMessage("PETSc does not support multi-threaded access, set " - "the thread limit to 1 in MPI_InitFinalize().")); - PetscErrorCode ierr = VecDuplicate(v.vector, &vector); AssertThrow(ierr == 0, ExcPETScError(ierr)); @@ -159,10 +150,6 @@ namespace PETScWrappers , last_action(::dealii::VectorOperation::unknown) { /* TODO GHOSTED */ - Assert(MultithreadInfo::is_running_single_threaded(), - ExcMessage("PETSc does not support multi-threaded access, set " - "the thread limit to 1 in MPI_InitFinalize().")); - const PetscErrorCode ierr = PetscObjectReference(reinterpret_cast(vector)); AssertNothrow(ierr == 0, ExcPETScError(ierr)); diff --git a/tests/petsc/assemble_matrix_parallel_01.cc b/tests/petsc/assemble_matrix_parallel_01.cc new file mode 100644 index 0000000000..3f79a650f4 --- /dev/null +++ b/tests/petsc/assemble_matrix_parallel_01.cc @@ -0,0 +1,493 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2009 - 2020 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +// same as deal.II/assemble_matrix_parallel_01, but for PETSc matrices +// and vectors +// +// This test requires PETSc to be configured with the option +// "--with-threadsafety" in case of debug builds of PETSc +// For optimized builds, the above option is needed only in case +// users want PETSc to produce useful logs with "-log_view" runtime +// option +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "../tests.h" + +std::ofstream logfile("output"); + + + +namespace Assembly +{ + namespace Scratch + { + template + struct Data + { + Data(const hp::FECollection &fe, + const hp::QCollection & quadrature) + : hp_fe_values(fe, + quadrature, + update_values | update_gradients | + update_quadrature_points | update_JxW_values) + {} + + Data(const Data &data) + : hp_fe_values(data.hp_fe_values.get_mapping_collection(), + data.hp_fe_values.get_fe_collection(), + data.hp_fe_values.get_quadrature_collection(), + data.hp_fe_values.get_update_flags()) + {} + + hp::FEValues hp_fe_values; + }; + } // namespace Scratch + + namespace Copy + { + struct Data + { + std::vector local_dof_indices; + FullMatrix local_matrix; + Vector local_rhs; + }; + } // namespace Copy +} // namespace Assembly + +template +class LaplaceProblem +{ +public: + LaplaceProblem(); + ~LaplaceProblem(); + + void + run(); + +private: + void + setup_system(); + void + test_equality(); + void + assemble_reference(); + void + assemble_test(); + void + solve(); + void + create_coarse_grid(); + void + postprocess(); + + void + local_assemble(const typename DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::Data & scratch, + Assembly::Copy::Data & data); + void + copy_local_to_global(const Assembly::Copy::Data &data); + + std::vector + get_conflict_indices( + typename DoFHandler::active_cell_iterator const &cell) const; + + Triangulation triangulation; + + DoFHandler dof_handler; + hp::FECollection fe_collection; + hp::QCollection quadrature_collection; + hp::QCollection face_quadrature_collection; + + AffineConstraints constraints; + + PETScWrappers::MPI::SparseMatrix reference_matrix; + PETScWrappers::MPI::SparseMatrix test_matrix; + + PETScWrappers::MPI::Vector reference_rhs; + PETScWrappers::MPI::Vector test_rhs; + + std::vector::active_cell_iterator>> + graph; + + const unsigned int max_degree; +}; + + + +template +class BoundaryValues : public Function +{ +public: + BoundaryValues() + : Function() + {} + + virtual double + value(const Point &p, const unsigned int component) const; +}; + + +template +double +BoundaryValues::value(const Point &p, + const unsigned int /*component*/) const +{ + double sum = 0; + for (unsigned int d = 0; d < dim; ++d) + sum += std::sin(numbers::PI * p[d]); + return sum; +} + + +template +class RightHandSide : public Function +{ +public: + RightHandSide() + : Function() + {} + + virtual double + value(const Point &p, const unsigned int component) const; +}; + + +template +double +RightHandSide::value(const Point &p, + const unsigned int /*component*/) const +{ + double product = 1; + for (unsigned int d = 0; d < dim; ++d) + product *= (p[d] + 1); + return product; +} + + +template +LaplaceProblem::LaplaceProblem() + : dof_handler(triangulation) + , max_degree(5) +{ + if (dim == 2) + for (unsigned int degree = 2; degree <= max_degree; ++degree) + { + fe_collection.push_back(FE_Q(degree)); + quadrature_collection.push_back(QGauss(degree + 1)); + face_quadrature_collection.push_back(QGauss(degree + 1)); + } + else + for (unsigned int degree = 1; degree < max_degree - 1; ++degree) + { + fe_collection.push_back(FE_Q(degree)); + quadrature_collection.push_back(QGauss(degree + 1)); + face_quadrature_collection.push_back(QGauss(degree + 1)); + } +} + + +template +LaplaceProblem::~LaplaceProblem() +{ + dof_handler.clear(); +} + + + +template +std::vector +LaplaceProblem::get_conflict_indices( + typename DoFHandler::active_cell_iterator const &cell) const +{ + std::vector local_dof_indices( + cell->get_fe().dofs_per_cell); + cell->get_dof_indices(local_dof_indices); + + constraints.resolve_indices(local_dof_indices); + return local_dof_indices; +} + +template +void +LaplaceProblem::setup_system() +{ + reference_matrix.clear(); + test_matrix.clear(); + dof_handler.distribute_dofs(fe_collection); + + constraints.clear(); + + DoFTools::make_hanging_node_constraints(dof_handler, constraints); + + // add boundary conditions as inhomogeneous constraints here, do it after + // having added the hanging node constraints in order to be consistent and + // skip dofs that are already constrained (i.e., are hanging nodes on the + // boundary in 3D). In contrast to step-27, we choose a sine function. + VectorTools::interpolate_boundary_values(dof_handler, + 0, + BoundaryValues(), + constraints); + constraints.close(); + + graph = GraphColoring::make_graph_coloring( + dof_handler.begin_active(), + dof_handler.end(), + static_cast( + typename DoFHandler::active_cell_iterator const &)>>( + std::bind(&LaplaceProblem::get_conflict_indices, + this, + std::placeholders::_1))); + + + DynamicSparsityPattern csp(dof_handler.n_dofs(), dof_handler.n_dofs()); + DoFTools::make_sparsity_pattern(dof_handler, csp, constraints, false); + reference_matrix.reinit(dof_handler.locally_owned_dofs(), + csp, + MPI_COMM_WORLD); + test_matrix.reinit(reference_matrix); + reference_rhs.reinit(reference_matrix.locally_owned_range_indices(), + reference_matrix.get_mpi_communicator()); + test_rhs.reinit(reference_rhs); +} + + + +template +void +LaplaceProblem::local_assemble( + const typename DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::Data & scratch, + Assembly::Copy::Data & data) +{ + const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell; + + data.local_matrix.reinit(dofs_per_cell, dofs_per_cell); + data.local_matrix = 0; + + data.local_rhs.reinit(dofs_per_cell); + data.local_rhs = 0; + + scratch.hp_fe_values.reinit(cell); + + const FEValues &fe_values = scratch.hp_fe_values.get_present_fe_values(); + + const RightHandSide rhs_function; + + for (unsigned int q_point = 0; q_point < fe_values.n_quadrature_points; + ++q_point) + { + const double rhs_value = + rhs_function.value(fe_values.quadrature_point(q_point), 0); + for (unsigned int i = 0; i < dofs_per_cell; ++i) + { + for (unsigned int j = 0; j < dofs_per_cell; ++j) + data.local_matrix(i, j) += + (fe_values.shape_grad(i, q_point) * + fe_values.shape_grad(j, q_point) * fe_values.JxW(q_point)); + + data.local_rhs(i) += (fe_values.shape_value(i, q_point) * rhs_value * + fe_values.JxW(q_point)); + } + } + + data.local_dof_indices.resize(dofs_per_cell); + cell->get_dof_indices(data.local_dof_indices); +} + + + +template +void +LaplaceProblem::copy_local_to_global(const Assembly::Copy::Data &data) +{ + constraints.distribute_local_to_global(data.local_matrix, + data.local_rhs, + data.local_dof_indices, + test_matrix, + test_rhs); +} + + + +template +void +LaplaceProblem::assemble_reference() +{ + test_matrix = 0; + test_rhs = 0; + + Assembly::Copy::Data copy_data; + Assembly::Scratch::Data assembly_data(fe_collection, + quadrature_collection); + + for (unsigned int color = 0; color < graph.size(); ++color) + for (typename std::vector< + typename DoFHandler::active_cell_iterator>::const_iterator p = + graph[color].begin(); + p != graph[color].end(); + ++p) + { + local_assemble(*p, assembly_data, copy_data); + copy_local_to_global(copy_data); + } + test_matrix.compress(VectorOperation::add); + test_rhs.compress(VectorOperation::add); + + reference_matrix.add(1., test_matrix); + reference_rhs = test_rhs; +} + + + +template +void +LaplaceProblem::assemble_test() +{ + test_matrix = 0; + test_rhs = 0; + + WorkStream::run(graph, + std::bind(&LaplaceProblem::local_assemble, + this, + std::placeholders::_1, + std::placeholders::_2, + std::placeholders::_3), + std::bind(&LaplaceProblem::copy_local_to_global, + this, + std::placeholders::_1), + Assembly::Scratch::Data(fe_collection, + quadrature_collection), + Assembly::Copy::Data(), + 2 * MultithreadInfo::n_threads(), + 1); + test_matrix.compress(VectorOperation::add); + test_rhs.compress(VectorOperation::add); + + test_matrix.add(-1, reference_matrix); + + // there should not even be roundoff difference between matrices + deallog << "error in matrix: " << test_matrix.frobenius_norm() << std::endl; + test_rhs.add(-1., reference_rhs); + deallog << "error in vector: " << test_rhs.l2_norm() << std::endl; +} + + + +template +void +LaplaceProblem::postprocess() +{ + Vector estimated_error_per_cell(triangulation.n_active_cells()); + for (unsigned int i = 0; i < estimated_error_per_cell.size(); ++i) + estimated_error_per_cell(i) = i; + + GridRefinement::refine_and_coarsen_fixed_number(triangulation, + estimated_error_per_cell, + 0.3, + 0.03); + triangulation.execute_coarsening_and_refinement(); + + for (typename DoFHandler::active_cell_iterator cell = + dof_handler.begin_active(); + cell != dof_handler.end(); + ++cell) + cell->set_active_fe_index(rand() % fe_collection.size()); +} + + + +template +void +LaplaceProblem::run() +{ + for (unsigned int cycle = 0; cycle < 3; ++cycle) + { + if (cycle == 0) + { + GridGenerator::hyper_cube(triangulation, 0, 1/* , + Point(), + 0.5, 1., (dim==3) ? 96 : 12, false*/); + triangulation.refine_global(2); + } + + setup_system(); + + assemble_reference(); + assemble_test(); + + if (cycle < 2) + postprocess(); + } +} + + + +int +main(int argc, char **argv) +{ + deallog << std::setprecision(2); + logfile << std::setprecision(2); + deallog.attach(logfile); + + Utilities::MPI::MPI_InitFinalize mpi_initialization( + argc, argv, testing_max_num_threads()); + + { + deallog.push("2d"); + LaplaceProblem<2> laplace_problem; + laplace_problem.run(); + deallog.pop(); + } + + { + deallog.push("3d"); + LaplaceProblem<3> laplace_problem; + laplace_problem.run(); + deallog.pop(); + } +} diff --git a/tests/petsc/assemble_matrix_parallel_01.output b/tests/petsc/assemble_matrix_parallel_01.output new file mode 100644 index 0000000000..3b8bf4746d --- /dev/null +++ b/tests/petsc/assemble_matrix_parallel_01.output @@ -0,0 +1,13 @@ + +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 +DEAL:3d::error in matrix: 0 +DEAL:3d::error in vector: 0 +DEAL:3d::error in matrix: 0 +DEAL:3d::error in vector: 0 +DEAL:3d::error in matrix: 0 +DEAL:3d::error in vector: 0 -- 2.39.5