From: Bruno Turcksin Date: Fri, 29 Jun 2018 19:06:56 +0000 (+0000) Subject: Move cuda kernels to their own file and namespace. X-Git-Tag: v9.1.0-rc1~836^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F7013%2Fhead;p=dealii.git Move cuda kernels to their own file and namespace. Move kernels from cuda_vector to their own file, remove reference to Vector, and add a couple of new kernels. --- diff --git a/include/deal.II/lac/cuda_kernels.h b/include/deal.II/lac/cuda_kernels.h new file mode 100644 index 0000000000..2e29e06b39 --- /dev/null +++ b/include/deal.II/lac/cuda_kernels.h @@ -0,0 +1,355 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#ifndef dealii_cuda_kernels_h +#define dealii_cuda_kernels_h + +#include + +#ifdef DEAL_II_WITH_CUDA + + +# include +# include + +# include + +DEAL_II_NAMESPACE_OPEN + +namespace LinearAlgebra +{ + namespace CUDAWrappers + { + /** + * Namespace containing the CUDA kernels. + */ + namespace kernel + { + using ::dealii::CUDAWrappers::block_size; + using ::dealii::CUDAWrappers::chunk_size; + typedef types::global_dof_index size_type; + + /** + * Multiply each entry of @p val of size @p N by @p a. + */ + template + __global__ void + vec_scale(Number *val, const Number a, const size_type N); + + + + /** + * Functor defining the addition of two Numbers. + */ + struct Binop_Addition + { + template + __device__ static inline Number + operation(const Number a, const Number b) + { + return a + b; + } + }; + + + + /** + * Functor defining the subtraction of two Numbers. + */ + struct Binop_Subtraction + { + template + __device__ static inline Number + operation(const Number a, const Number b) + { + return a - b; + } + }; + + + + /** + * Apply the functor @tparam Binop to each element of @p v1 and @p v2. + */ + template + __global__ void + vector_bin_op(Number *v1, Number *v2, const size_type N); + + + + /** + * Structure implementing the functions used to add elements when using a + * reduction. + */ + template + struct ElemSum + { + __device__ static Number + reduction_op(const Number a, const Number b); + + __device__ static Number + atomic_op(Number *dst, const Number a); + + __device__ static Number + element_wise_op(const Number a); + + __device__ static Number + null_value(); + }; + + + + /** + * Structure implementing the functions used to compute the L1 norm when + * using a reduction. + */ + template + struct L1Norm + { + __device__ static Number + reduction_op(const Number a, const Number b); + + __device__ static Number + atomic_op(Number *dst, const Number a); + + __device__ static Number + element_wise_op(const Number a); + + __device__ static Number + null_value(); + }; + + + + /** + * Structure implementing the functions used to compute the L-infinity + * norm when using a reduction. + */ + template + struct LInfty + { + __device__ static Number + reduction_op(const Number a, const Number b); + + __device__ static Number + atomic_op(Number *dst, const Number a); + + __device__ static Number + element_wise_op(const Number a); + + __device__ static Number + null_value(); + }; + + + + /** + * Perform a reduction on @p v using @tparam Operation + */ + template + __global__ void + reduction(Number *result, const Number *v, const size_type N); + + + + /** + * Structure implementing the functions used to compute the dot product + * norm when using a double vector reduction. + */ + template + struct DotProduct + { + __device__ static Number + binary_op(const Number a, const Number b); + + __device__ static Number + reduction_op(const Number a, const Number b); + + __device__ static Number + atomic_op(Number *dst, const Number a); + + __device__ static Number + null_value(); + }; + + + + /** + * Perform a binary operation on each element of @p v1 and @p v2 followed + * by reduction on the resulting array. + */ + template + __global__ void + double_vector_reduction(Number * result, + const Number * v1, + const Number * v2, + const size_type N); + + + + /** + * Add @p a to each element of @p val. + */ + template + __global__ void + vec_add(Number *val, const Number a, const size_type N); + + + + /** + * Addition of a multiple of a vector, i.e., val += a*V_val. + */ + template + __global__ void + add_aV(Number * val, + const Number a, + const Number * V_val, + const size_type N); + + + + /** + * Addition of multiple scaled vector, i.e., val += a*V_val + + * b*W_val. + */ + template + __global__ void + add_aVbW(Number * val, + const Number a, + const Number * V_val, + const Number b, + const Number * W_val, + const size_type N); + + + + /** + * Scaling and simple addition of a multiple of a vector, i.e. val = + * = s*val + a*V_val + */ + template + __global__ void + sadd(const Number s, + Number * val, + const Number a, + const Number * V_val, + const size_type N); + + + + /** + * Scaling and multiple additions of scaled vectors, i.e. val = + * = s*val + a*V_val + b*W_val + */ + template + __global__ void + sadd(const Number s, + Number * val, + const Number a, + const Number * V_val, + const Number b, + const Number * W_val, + const size_type N); + + + + /** + * Scale each element of this vector by the corresponding element in the + * argument. + */ + template + __global__ void + scale(Number *val, const Number *V_val, const size_type N); + + + + /** + * Assignment val = a*V_val. + */ + template + __global__ void + equ(Number *val, const Number a, const Number *V_val, const size_type N); + + + + /** + * Assignment val = a*V_val + b*W_val. + */ + template + __global__ void + equ(Number * val, + const Number a, + const Number * V_val, + const Number b, + const Number * W_val, + const size_type N); + + + + /** + * Perform a combined operation of a vector addition and a subsequent + * inner product, returning the value of the inner product. + */ + template + __global__ void + add_and_dot(Number * res, + Number * v1, + const Number * v2, + const Number * v3, + const Number a, + const size_type N); + + + + /** + * Set each element of @p val to @p s. + */ + template + __global__ void + set(Number *val, const Number s, const size_type N); + + + /** + * Set each element @v val to @p v using @p indices as permutation, i.e., + * val[indices[i]] = v[i]. + */ + template + __global__ void + set_permutated(Number * val, + const Number * v, + const size_type *indices, + const size_type N); + + + + /** + * Add each element @v val to @p v using @p indices as permutation, i.e., + * val[indices[i]] += v[i]. + */ + template + __global__ void + add_permutated(Number * val, + const Number * v, + const size_type *indices, + const size_type N); + } // namespace kernel + } // namespace CUDAWrappers +} // namespace LinearAlgebra + +DEAL_II_NAMESPACE_CLOSE + +#endif + +#endif diff --git a/include/deal.II/lac/cuda_kernels.templates.h b/include/deal.II/lac/cuda_kernels.templates.h new file mode 100644 index 0000000000..8ae0e51ab7 --- /dev/null +++ b/include/deal.II/lac/cuda_kernels.templates.h @@ -0,0 +1,575 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#ifndef dealii__cuda_kernels_templates_h +#define dealii__cuda_kernels_templates_h + +#include + +DEAL_II_NAMESPACE_OPEN + +namespace LinearAlgebra +{ + namespace CUDAWrappers + { + namespace kernel + { + template + __global__ void + vec_scale(Number *val, const Number a, const size_type N) + { + const size_type idx_base = + threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); + for (unsigned int i = 0; i < chunk_size; ++i) + { + const size_type idx = idx_base + i * block_size; + if (idx < N) + val[idx] *= a; + } + } + + + + template + __global__ void + vector_bin_op(Number *v1, Number *v2, const size_type N) + { + const size_type idx_base = + threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); + for (unsigned int i = 0; i < chunk_size; ++i) + { + const size_type idx = idx_base + i * block_size; + if (idx < N) + v1[idx] = Binop::operation(v1[idx], v2[idx]); + } + } + + + + template + __device__ Number + ElemSum::reduction_op(const Number a, const Number b) + { + return (a + b); + } + + + + template + __device__ Number + ElemSum::atomic_op(Number *dst, const Number a) + { + return atomicAdd_wrapper(dst, a); + } + + + + template + __device__ Number + ElemSum::element_wise_op(const Number a) + { + return a; + } + + + + template + __device__ Number + ElemSum::null_value() + { + return Number(); + } + + + + template + __device__ Number + L1Norm::reduction_op(const Number a, const Number b) + { + return (a + b); + } + + + + template + __device__ Number + L1Norm::atomic_op(Number *dst, const Number a) + { + return atomicAdd_wrapper(dst, a); + } + + + + template + __device__ Number + L1Norm::element_wise_op(const Number a) + { + return std::fabs(a); + } + + + + template + __device__ Number + L1Norm::null_value() + { + return Number(); + } + + + + template + __device__ Number + LInfty::reduction_op(const Number a, const Number b) + { + if (a > b) + return a; + else + return b; + } + + + + template + __device__ Number + LInfty::atomic_op(Number *dst, const Number a) + { + return atomicMax_wrapper(dst, a); + } + + + + template + __device__ Number + LInfty::element_wise_op(const Number a) + { + return std::fabs(a); + } + + + + template + __device__ Number + LInfty::null_value() + { + return Number(); + } + + + + template + __device__ void + reduce_within_warp(volatile Number *result_buffer, size_type local_idx) + { + if (block_size >= 64) + result_buffer[local_idx] = + Operation::reduction_op(result_buffer[local_idx], + result_buffer[local_idx + 32]); + if (block_size >= 32) + result_buffer[local_idx] = + Operation::reduction_op(result_buffer[local_idx], + result_buffer[local_idx + 16]); + if (block_size >= 16) + result_buffer[local_idx] = + Operation::reduction_op(result_buffer[local_idx], + result_buffer[local_idx + 8]); + if (block_size >= 8) + result_buffer[local_idx] = + Operation::reduction_op(result_buffer[local_idx], + result_buffer[local_idx + 4]); + if (block_size >= 4) + result_buffer[local_idx] = + Operation::reduction_op(result_buffer[local_idx], + result_buffer[local_idx + 2]); + if (block_size >= 2) + result_buffer[local_idx] = + Operation::reduction_op(result_buffer[local_idx], + result_buffer[local_idx + 1]); + } + + + + template + __device__ void + reduce(Number * result, + Number * result_buffer, + const size_type local_idx, + const size_type global_idx, + const size_type N) + { + for (size_type s = block_size / 2; s > 32; s = s >> 1) + { + if (local_idx < s) + result_buffer[local_idx] = + Operation::reduction_op(result_buffer[local_idx], + result_buffer[local_idx + s]); + __syncthreads(); + } + + if (local_idx < 32) + reduce_within_warp(result_buffer, local_idx); + + if (local_idx == 0) + Operation::atomic_op(result, result_buffer[0]); + } + + + + template + __global__ void + reduction(Number *result, const Number *v, const size_type N) + { + __shared__ Number result_buffer[block_size]; + + const size_type global_idx = + threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); + const size_type local_idx = threadIdx.x; + + if (global_idx < N) + result_buffer[local_idx] = Operation::element_wise_op(v[global_idx]); + else + result_buffer[local_idx] = Operation::null_value(); + + __syncthreads(); + + reduce( + result, result_buffer, local_idx, global_idx, N); + } + + + + template + __device__ Number + DotProduct::binary_op(const Number a, const Number b) + { + return a * b; + } + + + + template + __device__ Number + DotProduct::reduction_op(const Number a, const Number b) + { + return a + b; + } + + + + template + __device__ Number + DotProduct::atomic_op(Number *dst, const Number a) + { + return atomicAdd_wrapper(dst, a); + } + + + + template + __device__ Number + DotProduct::null_value() + { + return Number(); + } + + + + template + __global__ void + double_vector_reduction(Number * result, + const Number * v1, + const Number * v2, + const size_type N) + { + __shared__ Number result_buffer[block_size]; + + const size_type global_idx = + threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); + const size_type local_idx = threadIdx.x; + + if (global_idx < N) + result_buffer[local_idx] = + Operation::binary_op(v1[global_idx], v2[global_idx]); + else + result_buffer[local_idx] = Operation::null_value(); + + for (unsigned int i = 1; i < chunk_size; ++i) + { + const size_type idx = global_idx + i * block_size; + if (idx < N) + result_buffer[local_idx] = + Operation::reduction_op(result_buffer[local_idx], + Operation::binary_op(v1[idx], v2[idx])); + } + + __syncthreads(); + + reduce( + result, result_buffer, local_idx, global_idx, N); + } + + + + template + __global__ void + vec_add(Number *val, const Number a, const size_type N) + { + const size_type idx_base = + threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); + for (unsigned int i = 0; i < chunk_size; ++i) + { + const size_type idx = idx_base + i * block_size; + if (idx < N) + val[idx] += a; + } + } + + + + template + __global__ void + add_aV(Number * val, + const Number a, + const Number * V_val, + const size_type N) + { + const size_type idx_base = + threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); + for (unsigned int i = 0; i < chunk_size; ++i) + { + const size_type idx = idx_base + i * block_size; + if (idx < N) + val[idx] += a * V_val[idx]; + } + } + + + + template + __global__ void + add_aVbW(Number * val, + const Number a, + const Number * V_val, + const Number b, + const Number * W_val, + const size_type N) + { + const size_type idx_base = + threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); + for (unsigned int i = 0; i < chunk_size; ++i) + { + const size_type idx = idx_base + i * block_size; + if (idx < N) + val[idx] += a * V_val[idx] + b * W_val[idx]; + } + } + + + + template + __global__ void + sadd(const Number s, + Number * val, + const Number a, + const Number * V_val, + const size_type N) + { + const size_type idx_base = + threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); + for (unsigned int i = 0; i < chunk_size; ++i) + { + const size_type idx = idx_base + i * block_size; + if (idx < N) + val[idx] = s * val[idx] + a * V_val[idx]; + } + } + + + + template + __global__ void + sadd(const Number s, + Number * val, + const Number a, + const Number * V_val, + const Number b, + const Number * W_val, + const size_type N) + { + const size_type idx_base = + threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); + for (unsigned int i = 0; i < chunk_size; ++i) + { + const size_type idx = idx_base + i * block_size; + if (idx < N) + val[idx] = s * val[idx] + a * V_val[idx] + b * W_val[idx]; + } + } + + + + template + __global__ void + scale(Number *val, const Number *V_val, const size_type N) + { + const size_type idx_base = + threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); + for (unsigned int i = 0; i < chunk_size; ++i) + { + const size_type idx = idx_base + i * block_size; + if (idx < N) + val[idx] *= V_val[idx]; + } + } + + + + template + __global__ void + equ(Number *val, const Number a, const Number *V_val, const size_type N) + { + const size_type idx_base = + threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); + for (unsigned int i = 0; i < chunk_size; ++i) + { + const size_type idx = idx_base + i * block_size; + if (idx < N) + val[idx] = a * V_val[idx]; + } + } + + + + template + __global__ void + equ(Number * val, + const Number a, + const Number * V_val, + const Number b, + const Number * W_val, + const size_type N) + { + const size_type idx_base = + threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); + for (unsigned int i = 0; i < chunk_size; ++i) + { + const size_type idx = idx_base + i * block_size; + if (idx < N) + val[idx] = a * V_val[idx] + b * W_val[idx]; + } + } + + + + template + __global__ void + add_and_dot(Number * res, + Number * v1, + const Number * v2, + const Number * v3, + const Number a, + const size_type N) + { + __shared__ Number res_buf[block_size]; + + const unsigned int global_idx = + threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); + const unsigned int local_idx = threadIdx.x; + if (global_idx < N) + { + v1[global_idx] += a * v2[global_idx]; + res_buf[local_idx] = + v1[global_idx] * + Number(numbers::NumberTraits::conjugate(v3[global_idx])); + } + else + res_buf[local_idx] = 0.; + + for (unsigned int i = 1; i < block_size; ++i) + { + const unsigned int idx = global_idx + i * block_size; + if (idx < N) + { + v1[idx] += a * v2[idx]; + res_buf[local_idx] += v1[idx] * v3[idx]; + } + } + + __syncthreads(); + + reduce>( + res, res_buf, local_idx, global_idx, N); + } + + + + template + __global__ void + set(Number *val, const Number s, const size_type N) + { + const size_type idx_base = + threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); + for (unsigned int i = 0; i < chunk_size; ++i) + { + const size_type idx = idx_base + i * block_size; + if (idx < N) + val[idx] = s; + } + } + + + + template + __global__ void + set_permutated(Number * val, + const Number * v, + const size_type *indices, + const size_type N) + { + const size_type idx_base = + threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); + for (unsigned int i = 0; i < chunk_size; ++i) + { + const size_type idx = idx_base + i * block_size; + if (idx < N) + val[indices[idx]] = v[idx]; + } + } + + + + template + __global__ void + add_permutated(Number * val, + const Number * v, + const size_type *indices, + const size_type N) + { + const size_type idx_base = + threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); + for (unsigned int i = 0; i < chunk_size; ++i) + { + const size_type idx = idx_base + i * block_size; + if (idx < N) + val[indices[idx]] += v[idx]; + } + } + } // namespace kernel + } // namespace CUDAWrappers +} // namespace LinearAlgebra + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/source/lac/CMakeLists.txt b/source/lac/CMakeLists.txt index e98cd54e7c..0abdfcba5d 100644 --- a/source/lac/CMakeLists.txt +++ b/source/lac/CMakeLists.txt @@ -146,6 +146,7 @@ ENDIF() IF(DEAL_II_WITH_CUDA) SET(_separate_src ${_separate_src} + cuda_kernels.cu cuda_solver_direct.cu cuda_sparse_matrix.cu cuda_vector.cu diff --git a/source/lac/cuda_kernels.cu b/source/lac/cuda_kernels.cu new file mode 100644 index 0000000000..1ec88bd11d --- /dev/null +++ b/source/lac/cuda_kernels.cu @@ -0,0 +1,221 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#include + +DEAL_II_NAMESPACE_OPEN + +namespace LinearAlgebra +{ + namespace CUDAWrappers + { + namespace kernel + { + ///////////////////////////////////////////////////////////////////////// + // Explicit instantiation // + ///////////////////////////////////////////////////////////////////////// + + template __global__ void + vec_scale(float *, const float a, const size_type); + template __global__ void + vector_bin_op(float * v1, + float * v2, + const size_type N); + template __global__ void + vector_bin_op(float * v1, + float * v2, + const size_type N); + template struct ElemSum; + template struct L1Norm; + template struct LInfty; + template __global__ void + reduction>(float * result, + const float * v, + const size_type N); + template __global__ void + reduction>(float * result, + const float * v, + const size_type N); + template __global__ void + reduction>(float * result, + const float * v, + const size_type N); + template struct DotProduct; + template __global__ void + double_vector_reduction>(float * result, + const float * v1, + const float * v2, + const size_type N); + template __global__ void + vec_add(float *val, const float, const size_type N); + template __global__ void + add_aV(float * val, + const float a, + const float * V_val, + const size_type N); + template __global__ void + add_aVbW(float * val, + const float a, + const float * V_val, + const float b, + const float * W_val, + const size_type N); + template __global__ void + sadd(const float s, + float * val, + const float a, + const float * V_val, + const size_type N); + template __global__ void + sadd(const float s, + float * val, + const float a, + const float * V_val, + const float b, + const float * W_val, + const size_type N); + template __global__ void + scale(float *val, const float *V_val, const size_type N); + template __global__ void + equ(float * val, + const float a, + const float * V_val, + const size_type N); + template __global__ void + equ(float * val, + const float a, + const float * V_val, + const float b, + const float * W_val, + const size_type N); + template __global__ void + add_and_dot(float * res, + float * v1, + const float * v2, + const float * v3, + const float a, + const size_type N); + template __global__ void + set(float *val, const float s, const size_type N); + template __global__ void + set_permutated(float * val, + const float * v, + const size_type *indices, + const size_type N); + template __global__ void + add_permutated(float * val, + const float * v, + const size_type *indices, + const size_type N); + + + + template __global__ void + vec_scale(double *, const double a, const size_type); + template __global__ void + vector_bin_op(double * v1, + double * v2, + const size_type N); + template __global__ void + vector_bin_op(double * v1, + double * v2, + const size_type N); + template struct ElemSum; + template struct L1Norm; + template struct LInfty; + template __global__ void + reduction>(double * result, + const double * v, + const size_type N); + template __global__ void + reduction>(double * result, + const double * v, + const size_type N); + template __global__ void + reduction>(double * result, + const double * v, + const size_type N); + template struct DotProduct; + template __global__ void + double_vector_reduction>(double * result, + const double *v1, + const double *v2, + const size_type N); + template __global__ void + vec_add(double *val, const double, const size_type N); + template __global__ void + add_aV(double * val, + const double a, + const double * V_val, + const size_type N); + template __global__ void + add_aVbW(double * val, + const double a, + const double * V_val, + const double b, + const double * W_val, + const size_type N); + template __global__ void + sadd(const double s, + double * val, + const double a, + const double * V_val, + const size_type N); + template __global__ void + sadd(const double s, + double * val, + const double a, + const double * V_val, + const double b, + const double * W_val, + const size_type N); + template __global__ void + scale(double *val, const double *V_val, const size_type N); + template __global__ void + equ(double * val, + const double a, + const double * V_val, + const size_type N); + template __global__ void + equ(double * val, + const double a, + const double * V_val, + const double b, + const double * W_val, + const size_type N); + template __global__ void + add_and_dot(double * res, + double * v1, + const double * v2, + const double * v3, + const double a, + const size_type N); + template __global__ void + set(double *val, const double s, const size_type N); + template __global__ void + set_permutated(double * val, + const double * v, + const size_type *indices, + const size_type N); + template __global__ void + add_permutated(double * val, + const double * v, + const size_type *indices, + const size_type N); + } // namespace kernel + } // namespace CUDAWrappers +} // namespace LinearAlgebra + +DEAL_II_NAMESPACE_CLOSE diff --git a/source/lac/cuda_vector.cu b/source/lac/cuda_vector.cu index 725dc483c3..c94d1b011e 100644 --- a/source/lac/cuda_vector.cu +++ b/source/lac/cuda_vector.cu @@ -17,6 +17,7 @@ #include #include +#include #include #include @@ -32,478 +33,6 @@ namespace LinearAlgebra { using ::dealii::CUDAWrappers::block_size; using ::dealii::CUDAWrappers::chunk_size; - namespace internal - { - template - __global__ void - vec_scale(Number * val, - const Number a, - const typename Vector::size_type N) - { - const typename Vector::size_type idx_base = - threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); - for (unsigned int i = 0; i < chunk_size; ++i) - { - const typename Vector::size_type idx = - idx_base + i * block_size; - if (idx < N) - val[idx] *= a; - } - } - - - - struct Binop_Addition - { - template - __device__ static inline Number - operation(const Number a, const Number b) - { - return a + b; - } - }; - - - - struct Binop_Subtraction - { - template - __device__ static inline Number - operation(const Number a, const Number b) - { - return a - b; - } - }; - - - - template - __global__ void - vector_bin_op(Number * v1, - Number * v2, - const typename Vector::size_type N) - { - const typename Vector::size_type idx_base = - threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); - for (unsigned int i = 0; i < chunk_size; ++i) - { - const typename Vector::size_type idx = - idx_base + i * block_size; - if (idx < N) - v1[idx] = Binop::operation(v1[idx], v2[idx]); - } - } - - - - template - struct ElemSum - { - __device__ static Number - reduction_op(const Number a, const Number b) - { - return (a + b); - } - - __device__ static Number - atomic_op(Number *dst, const Number a) - { - return atomicAdd_wrapper(dst, a); - } - - __device__ static Number - element_wise_op(const Number a) - { - return a; - } - - __device__ static Number - null_value() - { - return Number(); - } - }; - - - - template - struct L1Norm - { - __device__ static Number - reduction_op(const Number a, const Number b) - { - return (a + b); - } - - __device__ static Number - atomic_op(Number *dst, const Number a) - { - return atomicAdd_wrapper(dst, a); - } - - __device__ static Number - element_wise_op(const Number a) - { - return std::fabs(a); - } - - __device__ static Number - null_value() - { - return Number(); - } - }; - - - - template - struct LInfty - { - __device__ static Number - reduction_op(const Number a, const Number b) - { - if (a > b) - return a; - else - return b; - } - - __device__ static Number - atomic_op(Number *dst, const Number a) - { - return atomicMax_wrapper(dst, a); - } - - __device__ static Number - element_wise_op(const Number a) - { - return std::fabs(a); - } - - __device__ static Number - null_value() - { - return Number(); - } - }; - - - - template - __device__ void - reduce_within_warp(volatile Number * result_buffer, - typename Vector::size_type local_idx) - { - if (block_size >= 64) - result_buffer[local_idx] = - Operation::reduction_op(result_buffer[local_idx], - result_buffer[local_idx + 32]); - if (block_size >= 32) - result_buffer[local_idx] = - Operation::reduction_op(result_buffer[local_idx], - result_buffer[local_idx + 16]); - if (block_size >= 16) - result_buffer[local_idx] = - Operation::reduction_op(result_buffer[local_idx], - result_buffer[local_idx + 8]); - if (block_size >= 8) - result_buffer[local_idx] = - Operation::reduction_op(result_buffer[local_idx], - result_buffer[local_idx + 4]); - if (block_size >= 4) - result_buffer[local_idx] = - Operation::reduction_op(result_buffer[local_idx], - result_buffer[local_idx + 2]); - if (block_size >= 2) - result_buffer[local_idx] = - Operation::reduction_op(result_buffer[local_idx], - result_buffer[local_idx + 1]); - } - - - - template - __device__ void - reduce(Number * result, - Number * result_buffer, - const typename Vector::size_type local_idx, - const typename Vector::size_type global_idx, - const typename Vector::size_type N) - { - for (typename Vector::size_type s = block_size / 2; s > 32; - s = s >> 1) - { - if (local_idx < s) - result_buffer[local_idx] = - Operation::reduction_op(result_buffer[local_idx], - result_buffer[local_idx + s]); - __syncthreads(); - } - - if (local_idx < 32) - reduce_within_warp(result_buffer, local_idx); - - if (local_idx == 0) - Operation::atomic_op(result, result_buffer[0]); - } - - - - template - __global__ void - reduction(Number * result, - const Number * v, - const typename Vector::size_type N) - { - __shared__ Number result_buffer[block_size]; - - const typename Vector::size_type global_idx = - threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); - const typename Vector::size_type local_idx = threadIdx.x; - - if (global_idx < N) - result_buffer[local_idx] = Operation::element_wise_op(v[global_idx]); - else - result_buffer[local_idx] = Operation::null_value(); - - __syncthreads(); - - reduce( - result, result_buffer, local_idx, global_idx, N); - } - - - - template - struct DotProduct - { - __device__ static Number - binary_op(const Number a, const Number b) - { - return a * b; - } - - __device__ static Number - reduction_op(const Number a, const Number b) - { - return a + b; - } - - __device__ static Number - atomic_op(Number *dst, const Number a) - { - return atomicAdd_wrapper(dst, a); - } - - __device__ static Number - null_value() - { - return Number(); - } - }; - - - - template - __global__ void - double_vector_reduction(Number * result, - Number * v1, - Number * v2, - const typename Vector::size_type N) - { - __shared__ Number result_buffer[block_size]; - - const typename Vector::size_type global_idx = - threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); - const typename Vector::size_type local_idx = threadIdx.x; - - if (global_idx < N) - result_buffer[local_idx] = - Operation::binary_op(v1[global_idx], v2[global_idx]); - else - result_buffer[local_idx] = Operation::null_value(); - - for (unsigned int i = 1; i < chunk_size; ++i) - { - const typename Vector::size_type idx = - global_idx + i * block_size; - if (idx < N) - result_buffer[local_idx] = - Operation::reduction_op(result_buffer[local_idx], - Operation::binary_op(v1[idx], v2[idx])); - } - - __syncthreads(); - - reduce( - result, result_buffer, local_idx, global_idx, N); - } - - - - template - __global__ void - vec_add(Number * val, - const Number a, - const typename Vector::size_type N) - { - const typename Vector::size_type idx_base = - threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); - for (unsigned int i = 0; i < chunk_size; ++i) - { - const typename Vector::size_type idx = - idx_base + i * block_size; - if (idx < N) - val[idx] += a; - } - } - - - - template - __global__ void - add_aV(Number * val, - const Number a, - Number * V_val, - const typename Vector::size_type N) - { - const typename Vector::size_type idx_base = - threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); - for (unsigned int i = 0; i < chunk_size; ++i) - { - const typename Vector::size_type idx = - idx_base + i * block_size; - if (idx < N) - val[idx] += a * V_val[idx]; - } - } - - - - template - __global__ void - add_aVbW(Number * val, - const Number a, - Number * V_val, - const Number b, - Number * W_val, - const typename Vector::size_type N) - { - const typename Vector::size_type idx_base = - threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); - for (unsigned int i = 0; i < chunk_size; ++i) - { - const typename Vector::size_type idx = - idx_base + i * block_size; - if (idx < N) - val[idx] += a * V_val[idx] + b * W_val[idx]; - } - } - - - - template - __global__ void - sadd(const Number s, - Number * val, - const Number a, - const Number * V_val, - const typename Vector::size_type N) - { - const typename Vector::size_type idx_base = - threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); - for (unsigned int i = 0; i < chunk_size; ++i) - { - const typename Vector::size_type idx = - idx_base + i * block_size; - if (idx < N) - val[idx] = s * val[idx] + a * V_val[idx]; - } - } - - - - template - __global__ void - scale(Number * val, - const Number * V_val, - const typename Vector::size_type N) - { - const typename Vector::size_type idx_base = - threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); - for (unsigned int i = 0; i < chunk_size; ++i) - { - const typename Vector::size_type idx = - idx_base + i * block_size; - if (idx < N) - val[idx] *= V_val[idx]; - } - } - - - - template - __global__ void - equ(Number * val, - const Number a, - const Number * V_val, - const typename Vector::size_type N) - { - const typename Vector::size_type idx_base = - threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); - for (unsigned int i = 0; i < chunk_size; ++i) - { - const typename Vector::size_type idx = - idx_base + i * block_size; - if (idx < N) - val[idx] = a * V_val[idx]; - } - } - - - - template - __global__ void - add_and_dot(Number * res, - Number * v1, - const Number * v2, - const Number * v3, - const Number a, - const typename Vector::size_type N) - { - __shared__ Number res_buf[block_size]; - - const unsigned int global_idx = - threadIdx.x + blockIdx.x * (blockDim.x * chunk_size); - const unsigned int local_idx = threadIdx.x; - if (global_idx < N) - { - v1[global_idx] += a * v2[global_idx]; - res_buf[local_idx] = - v1[global_idx] * - Number(numbers::NumberTraits::conjugate(v3[global_idx])); - } - else - res_buf[local_idx] = 0.; - - for (unsigned int i = 1; i < block_size; ++i) - { - const unsigned int idx = global_idx + i * block_size; - if (idx < N) - { - v1[idx] += a * v2[idx]; - res_buf[local_idx] += v1[idx] * v3[idx]; - } - } - - __syncthreads(); - - reduce>( - res, res_buf, local_idx, global_idx, N); - } - } // namespace internal - - template Vector::Vector() @@ -633,7 +162,7 @@ namespace LinearAlgebra // Add the two vectors const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size); - internal::vector_bin_op + kernel::vector_bin_op <<>>(val, tmp, n_elements); // Check that the kernel was launched correctly AssertCuda(cudaGetLastError()); @@ -671,7 +200,7 @@ namespace LinearAlgebra { AssertIsFinite(factor); const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size); - internal::vec_scale + kernel::vec_scale <<>>(val, factor, n_elements); // Check that the kernel was launched correctly @@ -691,7 +220,7 @@ namespace LinearAlgebra AssertIsFinite(factor); Assert(factor != Number(0.), ExcZero()); const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size); - internal::vec_scale + kernel::vec_scale <<>>(val, 1. / factor, n_elements); // Check that the kernel was launched correctly @@ -720,7 +249,7 @@ namespace LinearAlgebra const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size); - internal::vector_bin_op + kernel::vector_bin_op <<>>(val, down_V.val, n_elements); // Check that the kernel was launched correctly @@ -749,7 +278,7 @@ namespace LinearAlgebra const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size); - internal::vector_bin_op + kernel::vector_bin_op <<>>(val, down_V.val, n_elements); // Check that the kernel was launched correctly @@ -782,7 +311,7 @@ namespace LinearAlgebra error_code = cudaMemset(result_device, Number(), sizeof(Number)); const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size); - internal::double_vector_reduction> + kernel::double_vector_reduction> <<>>(result_device, val, down_V.val, @@ -811,7 +340,7 @@ namespace LinearAlgebra { AssertIsFinite(a); const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size); - internal::vec_add<<>>(val, a, n_elements); + kernel::vec_add<<>>(val, a, n_elements); // Check that the kernel was launched correctly AssertCuda(cudaGetLastError()); @@ -838,7 +367,7 @@ namespace LinearAlgebra "Cannot add two vectors with different numbers of elements.")); const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size); - internal::add_aV<<>>( + kernel::add_aV<<>>( val, a, down_V.val, n_elements); // Check that the kernel was launched correctly @@ -880,7 +409,7 @@ namespace LinearAlgebra "Cannot add two vectors with different numbers of elements.")); const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size); - internal::add_aVbW<<>>( + kernel::add_aVbW<<>>( val, a, down_V.val, b, down_W.val, n_elements); // Check that the kernel was launched correctly @@ -911,7 +440,7 @@ namespace LinearAlgebra "Cannot add two vectors with different numbers of elements.")); const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size); - internal::sadd<<>>( + kernel::sadd<<>>( s, val, a, down_V.val, n_elements); // Check that the kernel was launched correctly @@ -938,7 +467,7 @@ namespace LinearAlgebra "Cannot scale two vectors with different numbers of elements.")); const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size); - internal::scale + kernel::scale <<>>(val, down_scaling_factors.val, n_elements); @@ -969,8 +498,10 @@ namespace LinearAlgebra "Cannot assign two vectors with different numbers of elements.")); const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size); - internal::equ<<>>( - val, a, down_V.val, n_elements); + kernel::equ<<>>(val, + a, + down_V.val, + n_elements); // Check that the kernel was launched correctly AssertCuda(cudaGetLastError()); @@ -999,7 +530,7 @@ namespace LinearAlgebra error_code = cudaMemset(result_device, Number(), sizeof(Number)); const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size); - internal::reduction> + kernel::reduction> <<>>(result_device, val, n_elements); @@ -1031,7 +562,7 @@ namespace LinearAlgebra error_code = cudaMemset(result_device, Number(), sizeof(Number)); const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size); - internal::reduction> + kernel::reduction> <<>>(result_device, val, n_elements); @@ -1071,7 +602,7 @@ namespace LinearAlgebra error_code = cudaMemset(result_device, Number(), sizeof(Number)); const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size); - internal::reduction> + kernel::reduction> <<>>(result_device, val, n_elements); @@ -1121,7 +652,7 @@ namespace LinearAlgebra AssertCuda(error_code); const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size); - internal::add_and_dot<<>>( + kernel::add_and_dot<<>>( res_d, val, down_V.val, down_W.val, a, n_elements); Number res; diff --git a/tests/all-headers/CMakeLists.txt b/tests/all-headers/CMakeLists.txt index 1d84c7f4c5..9cefd6824b 100644 --- a/tests/all-headers/CMakeLists.txt +++ b/tests/all-headers/CMakeLists.txt @@ -50,6 +50,8 @@ FILE(GLOB_RECURSE _headers RELATIVE ${_include_dir}/deal.II # them from the list # LIST(REMOVE_ITEM _headers "lac/cuda_atomic.h" + "lac/cuda_kernels.h" + "lac/cuda_kernels.templates.h" "matrix_free/cuda_fe_evaluation.h" "matrix_free/cuda_matrix_free.templates.h" "matrix_free/cuda_tensor_product_kernels.h")