From c8a51525452101f2a3cacf22e3be218913d8d078 Mon Sep 17 00:00:00 2001 From: Peter Munch Date: Thu, 20 Jul 2023 11:41:10 +0200 Subject: [PATCH] Add performance test for CUDAWrapper::MatrixFree --- .../performance/timing_matrix_free_kokkos.cc | 342 ++++++++++++++++++ ...ds=1.mpirun=max.exclusive.release.run_only | 0 2 files changed, 342 insertions(+) create mode 100644 tests/performance/timing_matrix_free_kokkos.cc create mode 100644 tests/performance/timing_matrix_free_kokkos.threads=1.mpirun=max.exclusive.release.run_only diff --git a/tests/performance/timing_matrix_free_kokkos.cc b/tests/performance/timing_matrix_free_kokkos.cc new file mode 100644 index 0000000000..a9283e7dfe --- /dev/null +++ b/tests/performance/timing_matrix_free_kokkos.cc @@ -0,0 +1,342 @@ +/* --------------------------------------------------------------------- + * + * Copyright (C) 2023 by the deal.II authors + * + * This file is part of the deal.II library. + * + * The deal.II library is free software; you can use it, redistribute + * it, and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * The full text of the license can be found in the file LICENSE.md at + * the top level directory of deal.II. + * + * --------------------------------------------------------------------- + * + * Description: + * + * This test compares the MatrixFree and CUDAWrapper::MatrixFree + * infrastructure on the CPU. Considered are the initialization + * costs and the costs for an operator evaluation. + * CUDAWrapper::MatrixFree was written with CUDA and now uses + * Kokkos as backend and, as consequnce, favors GPU hardware. This + * performance test is meant to track the improvement of + * the performance of CUDAWrapper::MatrixFree on the CPU. + * + * Status: experimental + */ + +#include + +#include +#include +#include + +#include + +#include + +#include +#include +#include +#include + +#include + +#define ENABLE_MPI + +#include "performance_test_driver.h" + +using namespace dealii; + + + +template +class LaplaceOperator; + +template +class LaplaceOperator +{ +public: + using VectorType = + LinearAlgebra::distributed::Vector; + + LaplaceOperator(const Mapping & mapping, + const DoFHandler & dof_handler, + const AffineConstraints &constraints, + const Quadrature<1> & quadrature) + { + typename MatrixFree::AdditionalData additional_data; + additional_data.mapping_update_flags = update_gradients; + + matrix_free.reinit( + mapping, dof_handler, constraints, quadrature, additional_data); + } + + void + initialize_dof_vector(VectorType &vec) const + { + matrix_free.initialize_dof_vector(vec); + } + + void + vmult(VectorType &dst, const VectorType &src) const + { + matrix_free.cell_loop(&LaplaceOperator::local_apply, this, dst, src); + } + +private: + void + local_apply(const MatrixFree & data, + VectorType & dst, + const VectorType & src, + const std::pair &cell_range) const + { + FEEvaluation phi(data); + for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell) + { + phi.reinit(cell); + + phi.read_dof_values_plain(src); + phi.evaluate(EvaluationFlags::gradients); + for (unsigned int q = 0; q < phi.n_q_points; ++q) + phi.submit_gradient(phi.get_gradient(q), q); + phi.integrate(EvaluationFlags::gradients); + phi.distribute_local_to_global(dst); + } + } + + MatrixFree matrix_free; +}; + + + +template +class LaplaceOperatorQuad +{ +public: + DEAL_II_HOST_DEVICE void + operator()( + CUDAWrappers::FEEvaluation + * fe_eval, + const int q_point) const + { + fe_eval->submit_gradient(fe_eval->get_gradient(q_point), q_point); + } +}; + +template +class LaplaceOperatorLocal +{ +public: + DEAL_II_HOST_DEVICE void + operator()( + const unsigned int cell, + const typename CUDAWrappers::MatrixFree::Data *gpu_data, + CUDAWrappers::SharedData * shared_data, + const Number * src, + Number * dst) const + { + (void)cell; // TODO? + + CUDAWrappers::FEEvaluation + fe_eval(/*cell,*/ gpu_data, shared_data); + fe_eval.read_dof_values(src); + fe_eval.evaluate(false, true); + fe_eval.apply_for_each_quad_point( + LaplaceOperatorQuad()); + fe_eval.integrate(false, true); + fe_eval.distribute_local_to_global(dst); + } + static const unsigned int n_dofs_1d = fe_degree + 1; + static const unsigned int n_local_dofs = Utilities::pow(fe_degree + 1, dim); + static const unsigned int n_q_points = Utilities::pow(fe_degree + 1, dim); +}; + +template +class LaplaceOperator +{ +public: + using VectorType = + LinearAlgebra::distributed::Vector; + + LaplaceOperator(const Mapping & mapping, + const DoFHandler & dof_handler, + const AffineConstraints &constraints, + const Quadrature<1> & quadrature) + { + typename CUDAWrappers::MatrixFree::AdditionalData + additional_data; + additional_data.mapping_update_flags = + update_JxW_values | update_gradients | + update_quadrature_points; // TODO: remove update_quadrature_points + + matrix_free.reinit( + mapping, dof_handler, constraints, quadrature, additional_data); + } + + void + initialize_dof_vector(VectorType &vec) const + { + matrix_free.initialize_dof_vector(vec); + } + + void + vmult(VectorType &dst, const VectorType &src) const + { + LaplaceOperatorLocal local_operator; + matrix_free.cell_loop(local_operator, src, dst); + } + +private: + CUDAWrappers::MatrixFree matrix_free; +}; + + + +template +class AnalyticalFunction : public Function +{ +public: + virtual T + value(const Point &p, const unsigned int component = 0) const override + { + (void)component; + + double temp = 0.0; + + for (unsigned int d = 0; d < dim; ++d) + temp += std::sin(p[d]); + + return temp; + } +}; + + + +template +std::vector +run(const unsigned int n_refinements) +{ + ConvergenceTable table; + + const MPI_Comm comm = MPI_COMM_WORLD; + + using Number = double; + using VectorType = LinearAlgebra::distributed::Vector; + + const unsigned n_repetitions = 100; + + parallel::distributed::Triangulation tria(comm); + + GridGenerator::hyper_cube(tria); + tria.refine_global(n_refinements); + + table.add_value("n_levels", tria.n_global_levels()); + table.add_value("degree", degree); + + table.add_value("n_cells", tria.n_global_active_cells()); + + const MappingQ1 mapping; + const FE_Q fe(degree); + const QGauss<1> quadrature(degree + 1); + + DoFHandler dof_handler(tria); + dof_handler.distribute_dofs(fe); + + table.add_value("n_dofs", dof_handler.n_dofs()); + + AffineConstraints constraints; + + std::chrono::time_point temp = + std::chrono::system_clock::now(); + LaplaceOperator laplace_operator( + mapping, dof_handler, constraints, quadrature); + + const double dt_setup = std::chrono::duration_cast( + std::chrono::system_clock::now() - temp) + .count() / + 1e9; + + VectorType src, dst; + + laplace_operator.initialize_dof_vector(src); + laplace_operator.initialize_dof_vector(dst); + + { + LinearAlgebra::distributed::Vector src_host(src.get_partitioner()); + + VectorTools::interpolate(dof_handler, + AnalyticalFunction(), + src_host); + + LinearAlgebra::ReadWriteVector rw_vector( + src.get_partitioner()->locally_owned_range()); + rw_vector.import(src_host, VectorOperation::insert); + src.import(rw_vector, VectorOperation::insert); + + dst = 0.0; + } + + double dt_vmult = 0; + + for (unsigned int i = 0; i < n_repetitions; ++i) + { + MPI_Barrier(MPI_COMM_WORLD); + + std::chrono::time_point temp = + std::chrono::system_clock::now(); + + laplace_operator.vmult(dst, src); + + MPI_Barrier(MPI_COMM_WORLD); + + const double dt = std::chrono::duration_cast( + std::chrono::system_clock::now() - temp) + .count() / + 1e9; + + dt_vmult += dt; + } + + dt_vmult = Utilities::MPI::sum(dt_vmult, MPI_COMM_WORLD) / + Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD) / n_repetitions; + + + table.add_value("time_setup", dt_setup); + table.set_scientific("time_setup", true); + table.add_value("time_avg", dt_vmult); + table.set_scientific("time_avg", true); + + if (Utilities::MPI::this_mpi_process(comm) == 0) + { +#if 0 + table.write_text(std::cout); + std::cout << std::endl; +#endif + } + + return {dt_setup, dt_vmult}; +} + + +std::tuple> +describe_measurements() +{ + return {Metric::timing, + 4, + {"mf_setup", "mf_vmult", "mf_kokkos_setup", "mf_kokkos_vmult"}}; +} + +Measurement +perform_single_measurement() +{ + const unsigned int dim = 3; + const unsigned int fe_degree = 4; + const unsigned int n_refinements = 5; + + const auto result0 = run(n_refinements); + const auto result1 = run(n_refinements); + + return {result0[0], result0[1], result1[0], result1[1]}; +} diff --git a/tests/performance/timing_matrix_free_kokkos.threads=1.mpirun=max.exclusive.release.run_only b/tests/performance/timing_matrix_free_kokkos.threads=1.mpirun=max.exclusive.release.run_only new file mode 100644 index 0000000000..e69de29bb2 -- 2.39.5