From 8dd9b653a8fdb650c4063aa3bd97185b470c18ba Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Wed, 16 Feb 2022 20:35:38 +0100 Subject: [PATCH] Add matrix-free benchmark based on step-37 --- tests/performance/timing_step_37.cc | 681 ++++++++++++++++++ ...p_37.threads=1.mpirun=max.release.run_only | 0 2 files changed, 681 insertions(+) create mode 100644 tests/performance/timing_step_37.cc create mode 100644 tests/performance/timing_step_37.threads=1.mpirun=max.release.run_only diff --git a/tests/performance/timing_step_37.cc b/tests/performance/timing_step_37.cc new file mode 100644 index 0000000000..9fb9d8c376 --- /dev/null +++ b/tests/performance/timing_step_37.cc @@ -0,0 +1,681 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2022 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +// +// Description: +// +// A performance benchmark based on step 37 (without variable coefficient) +// that measures timings for grid creation, setup of unknowns, multigrid +// levels and solve for a Poisson problem with the performance-oriented +// matrix-free framework. +// +// Status: experimental +// + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#define ENABLE_MPI + +#include "performance_test_driver.h" + +using namespace dealii; + +dealii::ConditionalOStream debug_output(std::cout, false); + +const unsigned int degree_finite_element = 3; + + + +template +class LaplaceOperator + : public MatrixFreeOperators::Base> +{ +public: + using value_type = number; + + LaplaceOperator(); + + void + vmult(LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src) const; + + void + vmult(LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src, + const std::function + &operation_before_loop, + const std::function + &operation_after_loop) const; + + virtual void + compute_diagonal() override; + +private: + virtual void + apply_add( + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src) const override; + + void + local_apply(const MatrixFree & data, + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src, + const std::pair &cell_range) const; + + void + local_compute_diagonal( + const MatrixFree & data, + LinearAlgebra::distributed::Vector & dst, + const unsigned int & dummy, + const std::pair &cell_range) const; +}; + + + +template +LaplaceOperator::LaplaceOperator() + : MatrixFreeOperators::Base>() +{} + + + +template +void +LaplaceOperator::local_apply( + const MatrixFree & data, + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src, + const std::pair & cell_range) const +{ + FEEvaluation phi(data); + + for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell) + { + phi.reinit(cell); + phi.gather_evaluate(src, EvaluationFlags::gradients); + for (unsigned int q = 0; q < phi.n_q_points; ++q) + phi.submit_gradient(phi.get_gradient(q), q); + phi.integrate_scatter(EvaluationFlags::gradients, dst); + } +} + + + +template +void +LaplaceOperator::apply_add( + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src) const +{ + this->data->cell_loop(&LaplaceOperator::local_apply, this, dst, src); +} + + + +template +void +LaplaceOperator::vmult( + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src) const +{ + this->data->cell_loop(&LaplaceOperator::local_apply, this, dst, src, true); + for (unsigned int i : this->data->get_constrained_dofs()) + dst.local_element(i) = src.local_element(i); +} + + + +template +void +LaplaceOperator::vmult( + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src, + const std::function + &operation_before_loop, + const std::function + &operation_after_loop) const +{ + this->data->cell_loop(&LaplaceOperator::local_apply, + this, + dst, + src, + operation_before_loop, + operation_after_loop); + for (unsigned int i : this->data->get_constrained_dofs()) + dst.local_element(i) = src.local_element(i); +} + + + +template +void +LaplaceOperator::compute_diagonal() +{ + this->inverse_diagonal_entries.reset( + new DiagonalMatrix>()); + LinearAlgebra::distributed::Vector &inverse_diagonal = + this->inverse_diagonal_entries->get_vector(); + this->data->initialize_dof_vector(inverse_diagonal); + unsigned int dummy = 0; + this->data->cell_loop(&LaplaceOperator::local_compute_diagonal, + this, + inverse_diagonal, + dummy); + + this->set_constrained_entries_to_one(inverse_diagonal); + + for (unsigned int i = 0; i < inverse_diagonal.locally_owned_size(); ++i) + { + Assert(inverse_diagonal.local_element(i) > 0., + ExcMessage("No diagonal entry in a positive definite operator " + "should be zero")); + inverse_diagonal.local_element(i) = + 1. / inverse_diagonal.local_element(i); + } +} + + + +template +void +LaplaceOperator::local_compute_diagonal( + const MatrixFree & data, + LinearAlgebra::distributed::Vector &dst, + const unsigned int &, + const std::pair &cell_range) const +{ + FEEvaluation phi(data); + + AlignedVector> diagonal(phi.dofs_per_cell); + + for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell) + { + phi.reinit(cell); + for (unsigned int i = 0; i < phi.dofs_per_cell; ++i) + { + for (unsigned int j = 0; j < phi.dofs_per_cell; ++j) + phi.submit_dof_value(VectorizedArray(), j); + phi.submit_dof_value(make_vectorized_array(1.), i); + + phi.evaluate(EvaluationFlags::gradients); + for (unsigned int q = 0; q < phi.n_q_points; ++q) + phi.submit_gradient(phi.get_gradient(q), q); + phi.integrate(EvaluationFlags::gradients); + diagonal[i] = phi.get_dof_value(i); + } + for (unsigned int i = 0; i < phi.dofs_per_cell; ++i) + phi.submit_dof_value(diagonal[i], i); + phi.distribute_local_to_global(dst); + } +} + + + +template +class MGTransferMF + : public MGTransferMatrixFree +{ +public: + void + setup(const MGLevelObject &laplace, + const MGConstrainedDoFs & mg_constrained_dofs) + { + this->MGTransferMatrixFree:: + initialize_constraints(mg_constrained_dofs); + laplace_operator = &laplace; + } + + template + void + copy_to_mg( + const DoFHandler &mg_dof, + MGLevelObject< + LinearAlgebra::distributed::Vector> &dst, + const InVector &src) const + { + for (unsigned int level = dst.min_level(); level <= dst.max_level(); + ++level) + (*laplace_operator)[level].initialize_dof_vector(dst[level]); + MGLevelGlobalTransfer>::copy_to_mg(mg_dof, dst, src); + } + +private: + const MGLevelObject *laplace_operator; +}; + + + +template +class LaplaceProblem +{ +public: + LaplaceProblem(); + std::vector + run(); + +private: + void + setup_grid(); + void + setup_dofs(); + void + setup_matrix_free(); + void + assemble_rhs(); + void + setup_transfer(); + void + setup_smoother(); + void + solve(); + + parallel::distributed::Triangulation triangulation; + + FE_Q fe; + DoFHandler dof_handler; + + MappingQ1 mapping; + + AffineConstraints constraints; + using SystemMatrixType = LaplaceOperator; + SystemMatrixType system_matrix; + + MGConstrainedDoFs mg_constrained_dofs; + using LevelMatrixType = LaplaceOperator; + MGLevelObject mg_matrices; + + LinearAlgebra::distributed::Vector solution; + LinearAlgebra::distributed::Vector system_rhs; + + MGTransferMF mg_transfer; + + using SmootherType = + PreconditionChebyshev>; + mg::SmootherRelaxation> + mg_smoother; +}; + + + +template +LaplaceProblem::LaplaceProblem() +#ifdef DEAL_II_WITH_P4EST + : triangulation( + MPI_COMM_WORLD, + Triangulation::limit_level_difference_at_vertices, + parallel::distributed::Triangulation::construct_multigrid_hierarchy) +#else + : triangulation(Triangulation::limit_level_difference_at_vertices) +#endif + , fe(degree_finite_element) + , dof_handler(triangulation) +{} + + + +template +void +LaplaceProblem::setup_grid() +{ + GridGenerator::hyper_cube(triangulation, 0., 1.); + switch (get_testing_environment()) + { + case TestingEnvironment::light: + triangulation.refine_global(5); + break; + case TestingEnvironment::medium: + triangulation.refine_global(6); + break; + case TestingEnvironment::heavy: + triangulation.refine_global(7); + break; + } +} + + + +template +void +LaplaceProblem::setup_dofs() +{ + system_matrix.clear(); + mg_matrices.clear_elements(); + + dof_handler.distribute_dofs(fe); + dof_handler.distribute_mg_dofs(); + + debug_output << "Number of DoFs: " << dof_handler.n_dofs() << std::endl; + + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); + + constraints.clear(); + constraints.reinit(locally_relevant_dofs); + DoFTools::make_hanging_node_constraints(dof_handler, constraints); + VectorTools::interpolate_boundary_values( + mapping, dof_handler, 0, Functions::ZeroFunction(), constraints); + constraints.close(); +} + + + +template +void +LaplaceProblem::setup_matrix_free() +{ + typename MatrixFree::AdditionalData additional_data; + additional_data.tasks_parallel_scheme = + MatrixFree::AdditionalData::none; + additional_data.mapping_update_flags = + (update_gradients | update_JxW_values | update_quadrature_points); + std::shared_ptr> system_mf_storage( + new MatrixFree()); + system_mf_storage->reinit(mapping, + dof_handler, + constraints, + QGauss<1>(fe.degree + 1), + additional_data); + system_matrix.initialize(system_mf_storage); + + + system_matrix.initialize_dof_vector(solution); + system_matrix.initialize_dof_vector(system_rhs); + + const unsigned int nlevels = triangulation.n_global_levels(); + mg_matrices.resize(0, nlevels - 1); + + std::set dirichlet_boundary; + dirichlet_boundary.insert(0); + mg_constrained_dofs.initialize(dof_handler); + mg_constrained_dofs.make_zero_boundary_constraints(dof_handler, + dirichlet_boundary); + + for (unsigned int level = 0; level < nlevels; ++level) + { + IndexSet relevant_dofs; + DoFTools::extract_locally_relevant_level_dofs(dof_handler, + level, + relevant_dofs); + AffineConstraints level_constraints; + level_constraints.reinit(relevant_dofs); + level_constraints.add_lines( + mg_constrained_dofs.get_boundary_indices(level)); + level_constraints.close(); + + typename MatrixFree::AdditionalData additional_data; + additional_data.tasks_parallel_scheme = + MatrixFree::AdditionalData::none; + additional_data.mapping_update_flags = + (update_gradients | update_JxW_values | update_quadrature_points); + additional_data.mg_level = level; + std::shared_ptr> mg_mf_storage_level( + new MatrixFree()); + mg_mf_storage_level->reinit(mapping, + dof_handler, + level_constraints, + QGauss<1>(fe.degree + 1), + additional_data); + + mg_matrices[level].initialize(mg_mf_storage_level, + mg_constrained_dofs, + level); + } +} + + + +template +void +LaplaceProblem::assemble_rhs() +{ + Timer time; + + system_rhs = 0; + FEEvaluation phi( + *system_matrix.get_matrix_free()); + for (unsigned int cell = 0; + cell < system_matrix.get_matrix_free()->n_cell_batches(); + ++cell) + { + phi.reinit(cell); + for (unsigned int q = 0; q < phi.n_q_points; ++q) + phi.submit_value(make_vectorized_array(1.0), q); + phi.integrate(EvaluationFlags::values); + phi.distribute_local_to_global(system_rhs); + } + system_rhs.compress(VectorOperation::add); +} + + + +template +void +LaplaceProblem::setup_transfer() +{ + mg_transfer.setup(mg_matrices, mg_constrained_dofs); + std::vector> partitioners( + dof_handler.get_triangulation().n_global_levels()); + for (unsigned int level = 0; level < partitioners.size(); ++level) + { + LinearAlgebra::distributed::Vector vec; + mg_matrices[level].initialize_dof_vector(vec); + partitioners[level] = vec.get_partitioner(); + } + mg_transfer.build(dof_handler, partitioners); +} + + + +template +void +LaplaceProblem::setup_smoother() +{ + MGLevelObject smoother_data; + smoother_data.resize(0, triangulation.n_global_levels() - 1); + for (unsigned int level = 0; level < triangulation.n_global_levels(); ++level) + { + if (level > 0) + { + smoother_data[level].smoothing_range = 15.; + smoother_data[level].degree = 5; + smoother_data[level].eig_cg_n_iterations = 10; + } + else + { + smoother_data[0].smoothing_range = 1e-3; + smoother_data[0].degree = numbers::invalid_unsigned_int; + smoother_data[0].eig_cg_n_iterations = mg_matrices[0].m(); + } + mg_matrices[level].compute_diagonal(); + smoother_data[level].preconditioner = + mg_matrices[level].get_matrix_diagonal_inverse(); + } + mg_smoother.initialize(mg_matrices, smoother_data); +} + + +template +void +LaplaceProblem::solve() +{ + MGCoarseGridApplySmoother> + mg_coarse; + mg_coarse.initialize(mg_smoother); + + mg::Matrix> mg_matrix(mg_matrices); + + MGLevelObject> + mg_interface_matrices; + mg_interface_matrices.resize(0, triangulation.n_global_levels() - 1); + for (unsigned int level = 0; level < triangulation.n_global_levels(); ++level) + mg_interface_matrices[level].initialize(mg_matrices[level]); + mg::Matrix> mg_interface( + mg_interface_matrices); + + Multigrid> mg( + mg_matrix, mg_coarse, mg_transfer, mg_smoother, mg_smoother); + mg.set_edge_matrices(mg_interface, mg_interface); + + PreconditionMG, + MGTransferMF> + preconditioner(dof_handler, mg, mg_transfer); + + + SolverControl solver_control(100, 1e-12 * system_rhs.l2_norm()); + SolverCG> cg(solver_control); + + constraints.set_zero(solution); + cg.solve(system_matrix, solution, system_rhs, preconditioner); +} + + + +template +std::vector +LaplaceProblem::run() +{ + std::map timer; + + timer["setup_grid"].start(); + setup_grid(); + timer["setup_grid"].stop(); + + timer["setup_dofs"].start(); + setup_dofs(); + timer["setup_dofs"].stop(); + + timer["setup_matrix_free"].start(); + setup_matrix_free(); + timer["setup_matrix_free"].stop(); + + timer["assemble_rhs"].start(); + assemble_rhs(); + timer["assemble_rhs"].stop(); + + timer["setup_transfer"].start(); + setup_transfer(); + timer["setup_transfer"].stop(); + + timer["setup_smoother"].start(); + setup_smoother(); + timer["setup_smoother"].stop(); + + timer["solve"].start(); + solve(); + timer["solve"].stop(); + + const unsigned int n_repeat = 50; + timer["matvec_double"].start(); + for (unsigned int t = 0; t < n_repeat; ++t) + system_matrix.vmult(system_rhs, solution); + timer["matvec_double"].stop(); + + LinearAlgebra::distributed::Vector vec1, vec2; + mg_matrices[mg_matrices.max_level()].initialize_dof_vector(vec1); + vec2.reinit(vec1); + timer["matvec_float"].start(); + for (unsigned int t = 0; t < n_repeat; ++t) + mg_matrices[mg_matrices.max_level()].vmult(vec2, vec1); + timer["matvec_float"].stop(); + + mg_matrices[mg_matrices.max_level() - 1].initialize_dof_vector(vec1); + vec2.reinit(vec1); + timer["matvec_float_coarser"].start(); + for (unsigned int t = 0; t < n_repeat; ++t) + mg_matrices[mg_matrices.max_level() - 1].vmult(vec2, vec1); + timer["matvec_float_coarser"].stop(); + + debug_output << std::endl; + return {timer["setup_grid"].wall_time(), + timer["setup_dofs"].wall_time(), + timer["setup_matrix_free"].wall_time(), + timer["assemble_rhs"].wall_time(), + timer["setup_transfer"].wall_time(), + timer["setup_smoother"].wall_time(), + timer["solve"].wall_time(), + timer["matvec_double"].wall_time(), + timer["matvec_float"].wall_time(), + timer["matvec_float_coarser"].wall_time()}; +} + + +std::tuple> +describe_measurements() +{ + return {Metric::timing, + 4, + {"setup_grid", + "setup_dofs", + "setup_matrix_free", + "assemble_rhs", + "setup_transfer", + "setup_smoother", + "solve", + "matvec_double", + "matvec_float", + "matvec_float_coarser"}}; +} + + +Measurement +perform_single_measurement() +{ + return LaplaceProblem<3>().run(); +} diff --git a/tests/performance/timing_step_37.threads=1.mpirun=max.release.run_only b/tests/performance/timing_step_37.threads=1.mpirun=max.release.run_only new file mode 100644 index 0000000000..e69de29bb2 -- 2.39.5