From: Bruno Turcksin Date: Mon, 12 Feb 2018 22:28:31 +0000 (-0500) Subject: Move cuda related macros in their own file X-Git-Tag: v9.0.0-rc1~439^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=8dca4c2ec6d9218e288f7fcd643868cde696eafd;p=dealii.git Move cuda related macros in their own file --- diff --git a/include/deal.II/base/cuda_size.h b/include/deal.II/base/cuda_size.h new file mode 100644 index 0000000000..0582e6fa0b --- /dev/null +++ b/include/deal.II/base/cuda_size.h @@ -0,0 +1,38 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#ifndef dealii_cuda_size_h +#define dealii_cuda_size_h + +#include + +DEAL_II_NAMESPACE_OPEN + +namespace CUDAWrappers +{ + /** + * Define the size of a block when launching a CUDA kernel. + */ + constexpr int block_size = 512; + + /** + * Define the size of chunk of data worked on by a thread. + */ + constexpr int chunk_size = 8; +} + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/source/lac/cuda_vector.cu b/source/lac/cuda_vector.cu index 36b2be442b..9b9477afe8 100644 --- a/source/lac/cuda_vector.cu +++ b/source/lac/cuda_vector.cu @@ -17,19 +17,19 @@ #include #include #include +#include #include #ifdef DEAL_II_WITH_CUDA DEAL_II_NAMESPACE_OPEN -#define BLOCK_SIZE 512 -#define CHUNK_SIZE 8 - namespace LinearAlgebra { namespace CUDAWrappers { + using ::dealii::CUDAWrappers::block_size; + using ::dealii::CUDAWrappers::chunk_size; namespace internal { template @@ -39,11 +39,11 @@ namespace LinearAlgebra { const typename Vector::size_type idx_base = threadIdx.x + blockIdx.x * - (blockDim.x*CHUNK_SIZE); - for (unsigned int i=0; i::size_type idx = idx_base + - i*BLOCK_SIZE; + i*block_size; if (idx::size_type idx_base = threadIdx.x + blockIdx.x * - (blockDim.x*CHUNK_SIZE); - for (unsigned int i=0; i::size_type idx = idx_base + - i*BLOCK_SIZE; + i*block_size; if (idx::size_type local_idx) { - if (BLOCK_SIZE >= 64) + if (block_size >= 64) result_buffer[local_idx] = Operation::reduction_op(result_buffer[local_idx], result_buffer[local_idx+32]); - if (BLOCK_SIZE >= 32) + if (block_size >= 32) result_buffer[local_idx] = Operation::reduction_op(result_buffer[local_idx], result_buffer[local_idx+16]); - if (BLOCK_SIZE >= 16) + if (block_size >= 16) result_buffer[local_idx] = Operation::reduction_op(result_buffer[local_idx], result_buffer[local_idx+8]); - if (BLOCK_SIZE >= 8) + if (block_size >= 8) result_buffer[local_idx] = Operation::reduction_op(result_buffer[local_idx], result_buffer[local_idx+4]); - if (BLOCK_SIZE >= 4) + if (block_size >= 4) result_buffer[local_idx] = Operation::reduction_op(result_buffer[local_idx], result_buffer[local_idx+2]); - if (BLOCK_SIZE >= 2) + if (block_size >= 2) result_buffer[local_idx] = Operation::reduction_op(result_buffer[local_idx], result_buffer[local_idx+1]); @@ -214,7 +214,7 @@ namespace LinearAlgebra const typename Vector::size_type global_idx, const typename Vector::size_type N) { - for (typename Vector::size_type s=BLOCK_SIZE/2; s>32; s=s>>1) + for (typename Vector::size_type s=block_size/2; s>32; s=s>>1) { if (local_idx < s) result_buffer[local_idx] = Operation::reduction_op(result_buffer[local_idx], @@ -236,10 +236,10 @@ namespace LinearAlgebra const Number *v, const typename Vector::size_type N) { - __shared__ Number result_buffer[BLOCK_SIZE]; + __shared__ Number result_buffer[block_size]; const typename Vector::size_type global_idx = threadIdx.x + - blockIdx.x*(blockDim.x*CHUNK_SIZE); + blockIdx.x*(blockDim.x*chunk_size); const typename Vector::size_type local_idx = threadIdx.x; if (global_idx::size_type N) { - __shared__ Number result_buffer[BLOCK_SIZE]; + __shared__ Number result_buffer[block_size]; const typename Vector::size_type global_idx = threadIdx.x + - blockIdx.x*(blockDim.x*CHUNK_SIZE); + blockIdx.x*(blockDim.x*chunk_size); const typename Vector::size_type local_idx = threadIdx.x; if (global_idx::size_type idx = global_idx + - i*BLOCK_SIZE; + i*block_size; if (idx::size_type idx_base = threadIdx.x + blockIdx.x * - (blockDim.x*CHUNK_SIZE); - for (unsigned int i=0; i::size_type idx = idx_base + - i*BLOCK_SIZE; + i*block_size; if (idx::size_type idx_base = threadIdx.x + blockIdx.x * - (blockDim.x*CHUNK_SIZE); - for (unsigned int i=0; i::size_type idx = idx_base + - i*BLOCK_SIZE; + i*block_size; if (idx::size_type idx_base = threadIdx.x + blockIdx.x * - (blockDim.x*CHUNK_SIZE); - for (unsigned int i=0; i::size_type idx = idx_base + - i*BLOCK_SIZE; + i*block_size; if (idx::size_type idx_base = threadIdx.x + blockIdx.x * - (blockDim.x*CHUNK_SIZE); - for (unsigned int i=0; i::size_type idx = idx_base + - i*BLOCK_SIZE; + i*block_size; if (idx::size_type idx_base = threadIdx.x + blockIdx.x * - (blockDim.x*CHUNK_SIZE); - for (unsigned int i=0; i::size_type idx = idx_base + - i*BLOCK_SIZE; + i*block_size; if (idx::size_type idx_base = threadIdx.x + blockIdx.x * - (blockDim.x*CHUNK_SIZE); - for (unsigned int i=0; i::size_type idx = idx_base + - i*BLOCK_SIZE; + i*block_size; if (idx::size_type N) { - __shared__ Number res_buf[BLOCK_SIZE]; + __shared__ Number res_buf[block_size]; const unsigned int global_idx = threadIdx.x + blockIdx.x * - (blockDim.x*CHUNK_SIZE); + (blockDim.x*chunk_size); const unsigned int local_idx = threadIdx.x; if (global_idx < N) { @@ -456,9 +456,9 @@ namespace LinearAlgebra else res_buf[local_idx] = 0.; - for (unsigned int i=1; i - <<>>(val, tmp, n_elements); + <<>>(val, tmp, n_elements); // Check that the kernel was launched correctly AssertCuda(cudaGetLastError()); // Check that there was no problem during the execution of the kernel @@ -632,8 +632,8 @@ namespace LinearAlgebra Vector &Vector::operator*= (const Number factor) { AssertIsFinite(factor); - const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); - internal::vec_scale <<>>(val, + const int n_blocks = 1 + (n_elements-1)/(chunk_size*block_size); + internal::vec_scale <<>>(val, factor, n_elements); // Check that the kernel was launched correctly @@ -651,8 +651,8 @@ namespace LinearAlgebra { AssertIsFinite(factor); Assert(factor!=Number(0.), ExcZero()); - const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); - internal::vec_scale <<>>(val, + const int n_blocks = 1 + (n_elements-1)/(chunk_size*block_size); + internal::vec_scale <<>>(val, 1./factor, n_elements); // Check that the kernel was launched correctly @@ -677,10 +677,10 @@ namespace LinearAlgebra Assert(down_V.size()==this->size(), ExcMessage("Cannot add two vectors with different numbers of elements")); - const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + const int n_blocks = 1 + (n_elements-1)/(chunk_size*block_size); internal::vector_bin_op - <<>>(val, down_V.val, n_elements); + <<>>(val, down_V.val, n_elements); // Check that the kernel was launched correctly AssertCuda(cudaGetLastError()); @@ -704,10 +704,10 @@ namespace LinearAlgebra Assert(down_V.size()==this->size(), ExcMessage("Cannot add two vectors with different numbers of elements.")); - const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + const int n_blocks = 1 + (n_elements-1)/(chunk_size*block_size); internal::vector_bin_op - <<>>(val, down_V.val, n_elements); + <<>>(val, down_V.val, n_elements); // Check that the kernel was launched correctly AssertCuda(cudaGetLastError()); @@ -736,9 +736,9 @@ namespace LinearAlgebra AssertCuda(error_code); error_code = cudaMemset(result_device, Number(), sizeof(Number)); - const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + const int n_blocks = 1 + (n_elements-1)/(chunk_size*block_size); internal::double_vector_reduction> - <<>> (result_device, val, + <<>> (result_device, val, down_V.val, static_cast(n_elements)); @@ -760,8 +760,8 @@ namespace LinearAlgebra void Vector::add(const Number a) { AssertIsFinite(a); - const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); - internal::vec_add <<>>(val, a, + const int n_blocks = 1 + (n_elements-1)/(chunk_size*block_size); + internal::vec_add <<>>(val, a, n_elements); // Check that the kernel was launched correctly @@ -786,8 +786,8 @@ namespace LinearAlgebra Assert(down_V.size() == this->size(), ExcMessage("Cannot add two vectors with different numbers of elements.")); - const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); - internal::add_aV <<>> (val, + const int n_blocks = 1 + (n_elements-1)/(chunk_size*block_size); + internal::add_aV <<>> (val, a, down_V.val, n_elements); // Check that the kernel was launched correctly @@ -823,8 +823,8 @@ namespace LinearAlgebra Assert(down_W.size() == this->size(), ExcMessage("Cannot add two vectors with different numbers of elements.")); - const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); - internal::add_aVbW <<>> (val, + const int n_blocks = 1 + (n_elements-1)/(chunk_size*block_size); + internal::add_aVbW <<>> (val, a, down_V.val, b, down_W.val, n_elements); // Check that the kernel was launched correctly @@ -851,8 +851,8 @@ namespace LinearAlgebra Assert(down_V.size() == this->size(), ExcMessage("Cannot add two vectors with different numbers of elements.")); - const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); - internal::sadd <<>> (s, val, + const int n_blocks = 1 + (n_elements-1)/(chunk_size*block_size); + internal::sadd <<>> (s, val, a, down_V.val, n_elements); // Check that the kernel was launched correctly @@ -876,8 +876,8 @@ namespace LinearAlgebra Assert(down_scaling_factors.size() == this->size(), ExcMessage("Cannot scale two vectors with different numbers of elements.")); - const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); - internal::scale <<>> (val, + const int n_blocks = 1 + (n_elements-1)/(chunk_size*block_size); + internal::scale <<>> (val, down_scaling_factors.val, n_elements); // Check that the kernel was launched correctly @@ -902,8 +902,8 @@ namespace LinearAlgebra Assert(down_V.size() == this->size(), ExcMessage("Cannot assign two vectors with different numbers of elements.")); - const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); - internal::equ <<>> (val, a, + const int n_blocks = 1 + (n_elements-1)/(chunk_size*block_size); + internal::equ <<>> (val, a, down_V.val, n_elements); // Check that the kernel was launched correctly @@ -930,9 +930,9 @@ namespace LinearAlgebra AssertCuda(error_code); error_code = cudaMemset(result_device, Number(), sizeof(Number)); - const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + const int n_blocks = 1 + (n_elements-1)/(chunk_size*block_size); internal::reduction> - <<>> ( + <<>> ( result_device, val, n_elements); @@ -958,9 +958,9 @@ namespace LinearAlgebra AssertCuda(error_code); error_code = cudaMemset(result_device, Number(), sizeof(Number)); - const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + const int n_blocks = 1 + (n_elements-1)/(chunk_size*block_size); internal::reduction> - <<>> ( + <<>> ( result_device, val, n_elements); @@ -994,9 +994,9 @@ namespace LinearAlgebra AssertCuda(error_code); error_code = cudaMemset(result_device, Number(), sizeof(Number)); - const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + const int n_blocks = 1 + (n_elements-1)/(chunk_size*block_size); internal::reduction> - <<>> ( + <<>> ( result_device, val, n_elements); @@ -1041,8 +1041,8 @@ namespace LinearAlgebra error_code = cudaMemset(res_d, 0., sizeof(Number)); AssertCuda(error_code); - const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); - internal::add_and_dot <<>>( + const int n_blocks = 1 + (n_elements-1)/(chunk_size*block_size); + internal::add_and_dot <<>>( res_d, val, down_V.val, down_W.val, a, n_elements); Number res;