From 164ac1d6136503db44f7ca0c2be1ae294ca47303 Mon Sep 17 00:00:00 2001 From: Bruno Turcksin Date: Fri, 14 Dec 2018 14:48:26 +0000 Subject: [PATCH] Improve documentation of functors and replace functor by Functor --- .../deal.II/matrix_free/cuda_fe_evaluation.h | 19 +++++++--- .../deal.II/matrix_free/cuda_matrix_free.h | 31 +++++++++++----- .../matrix_free/cuda_matrix_free.templates.h | 36 +++++++++---------- 3 files changed, 55 insertions(+), 31 deletions(-) diff --git a/include/deal.II/matrix_free/cuda_fe_evaluation.h b/include/deal.II/matrix_free/cuda_fe_evaluation.h index d63a8624a2..8bb8b6c4ef 100644 --- a/include/deal.II/matrix_free/cuda_fe_evaluation.h +++ b/include/deal.II/matrix_free/cuda_fe_evaluation.h @@ -160,12 +160,21 @@ namespace CUDAWrappers __device__ void submit_gradient(const gradient_type &grad_in, const unsigned int q_point); + // clang-format off /** - * Apply the function @p func on every quadrature point. + * Apply the functor @p func on every quadrature point. + * + * @p func needs to define + * \code + * __device__ void operator()( + * CUDAWrappers::FEEvaluation *fe_eval, + * const unsigned int q_point) const; + * \endcode */ - template + // clang-format on + template __device__ void - apply_quad_point_operations(const functor &func); + apply_quad_point_operations(const Functor &func); private: types::global_dof_index *local_to_global; @@ -427,10 +436,10 @@ namespace CUDAWrappers int n_q_points_1d, int n_components_, typename Number> - template + template __device__ void FEEvaluation:: - apply_quad_point_operations(const functor &func) + apply_quad_point_operations(const Functor &func) { const unsigned int q_point = (dim == 1 ? diff --git a/include/deal.II/matrix_free/cuda_matrix_free.h b/include/deal.II/matrix_free/cuda_matrix_free.h index 7bdec67676..67af1c1d7a 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.h @@ -208,13 +208,28 @@ namespace CUDAWrappers Data get_data(unsigned int color) const; + // clang-format off /** * This method runs the loop over all cells and apply the local operation on * each element in parallel. @p func is a functor which is applied on each color. + * + * @p func needs to define + * \code + * __device__ void operator()( + * const unsigned int cell, + * const typename CUDAWrappers::MatrixFree::Data *gpu_data, + * CUDAWrappers::SharedData * shared_data, + * const Number * src, + * Number * dst) const; + * static const unsigned int n_dofs_1d; + * static const unsigned int n_local_dofs; + * static const unsigned int n_q_points; + * \endcode */ - template + // clang-format on + template void - cell_loop(const functor & func, + cell_loop(const Functor & func, const VectorType &src, VectorType & dst) const; @@ -282,9 +297,9 @@ namespace CUDAWrappers * Helper function. Loop over all the cells and apply the functor on each * element in parallel. This function is used when MPI is not used. */ - template + template void - serial_cell_loop(const functor & func, + serial_cell_loop(const Functor & func, const VectorType &src, VectorType & dst) const; @@ -292,10 +307,10 @@ namespace CUDAWrappers * Helper function. Loop over all the cells and apply the functor on each * element in parallel. This function is used when MPI is used. */ - template + template void distributed_cell_loop( - const functor & func, + const Functor & func, const LinearAlgebra::distributed::Vector &src, LinearAlgebra::distributed::Vector &dst) const; @@ -304,10 +319,10 @@ namespace CUDAWrappers * error. This function exists only because cell_loop needs * distributed_cell_loop() to exist for LinearAlgebra::CUDAWrappers::Vector. */ - template + template void distributed_cell_loop( - const functor & func, + const Functor & func, const LinearAlgebra::CUDAWrappers::Vector &src, LinearAlgebra::CUDAWrappers::Vector & dst) const; diff --git a/include/deal.II/matrix_free/cuda_matrix_free.templates.h b/include/deal.II/matrix_free/cuda_matrix_free.templates.h index ecfd74a09a..001d5fa0ce 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.templates.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.templates.h @@ -472,34 +472,34 @@ namespace CUDAWrappers - template + template __global__ void - apply_kernel_shmem(functor func, + apply_kernel_shmem(Functor func, const typename MatrixFree::Data gpu_data, const Number * src, Number * dst) { constexpr unsigned int cells_per_block = - cells_per_block_shmem(dim, functor::n_dofs_1d - 1); + cells_per_block_shmem(dim, Functor::n_dofs_1d - 1); constexpr unsigned int n_dofs_per_block = - cells_per_block * functor::n_local_dofs; + cells_per_block * Functor::n_local_dofs; constexpr unsigned int n_q_points_per_block = - cells_per_block * functor::n_q_points; + cells_per_block * Functor::n_q_points; // TODO make use of dynamically allocated shared memory __shared__ Number values[n_dofs_per_block]; __shared__ Number gradients[dim][n_q_points_per_block]; - const unsigned int local_cell = threadIdx.x / functor::n_dofs_1d; + const unsigned int local_cell = threadIdx.x / Functor::n_dofs_1d; const unsigned int cell = local_cell + cells_per_block * (blockIdx.x + gridDim.x * blockIdx.y); Number *gq[dim]; for (int d = 0; d < dim; ++d) - gq[d] = &gradients[d][local_cell * functor::n_q_points]; + gq[d] = &gradients[d][local_cell * Functor::n_q_points]; SharedData shared_data( - &values[local_cell * functor::n_local_dofs], gq); + &values[local_cell * Functor::n_local_dofs], gq); if (cell < gpu_data.n_cells) func(cell, &gpu_data, &shared_data, src, dst); @@ -741,9 +741,9 @@ namespace CUDAWrappers template - template + template void - MatrixFree::cell_loop(const functor & func, + MatrixFree::cell_loop(const Functor & func, const VectorType &src, VectorType & dst) const { @@ -971,15 +971,15 @@ namespace CUDAWrappers template - template + template void - MatrixFree::serial_cell_loop(const functor & func, + MatrixFree::serial_cell_loop(const Functor & func, const VectorType &src, VectorType & dst) const { // Execute the loop on the cells for (unsigned int i = 0; i < n_colors; ++i) - internal::apply_kernel_shmem + internal::apply_kernel_shmem <<>>(func, get_data(i), src.get_values(), @@ -989,10 +989,10 @@ namespace CUDAWrappers template - template + template void MatrixFree::distributed_cell_loop( - const functor & func, + const Functor & func, const LinearAlgebra::distributed::Vector &src, LinearAlgebra::distributed::Vector &dst) const { @@ -1005,7 +1005,7 @@ namespace CUDAWrappers // Execute the loop on the cells for (unsigned int i = 0; i < n_colors; ++i) - internal::apply_kernel_shmem + internal::apply_kernel_shmem <<>>(func, get_data(i), ghosted_src.get_values(), @@ -1019,10 +1019,10 @@ namespace CUDAWrappers template - template + template void MatrixFree::distributed_cell_loop( - const functor &, + const Functor &, const LinearAlgebra::CUDAWrappers::Vector &, LinearAlgebra::CUDAWrappers::Vector &) const { -- 2.39.5