From: Daniel Arndt Date: Wed, 27 Nov 2019 23:42:45 +0000 (-0500) Subject: Allow using float shape functions X-Git-Tag: v9.2.0-rc1~827^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=5b28e6dfa04392124d6c200c5da3c5e927145de1;p=dealii.git Allow using float shape functions --- diff --git a/include/deal.II/matrix_free/cuda_matrix_free.templates.h b/include/deal.II/matrix_free/cuda_matrix_free.templates.h index f7ac0f58d5..9900fe10e2 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.templates.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.templates.h @@ -46,16 +46,80 @@ namespace CUDAWrappers namespace internal { // These variables are stored in the device constant memory. - // TODO: use a template parameter constexpr unsigned int max_elem_degree = 10; + + template + using DataArray = NumberType[(max_elem_degree + 1) * (max_elem_degree + 1)]; + __constant__ double - global_shape_values[(max_elem_degree + 1) * (max_elem_degree + 1)]; + global_shape_values_d[(max_elem_degree + 1) * (max_elem_degree + 1)]; + __constant__ float + global_shape_values_f[(max_elem_degree + 1) * (max_elem_degree + 1)]; + + template + __host__ __device__ inline DataArray & + get_global_shape_values(); + + template <> + __host__ __device__ inline DataArray & + get_global_shape_values() + { + return global_shape_values_d; + } + + template <> + __host__ __device__ inline DataArray & + get_global_shape_values() + { + return global_shape_values_f; + } + __constant__ double - global_shape_gradients[(max_elem_degree + 1) * (max_elem_degree + 1)]; + global_shape_gradients_d[(max_elem_degree + 1) * (max_elem_degree + 1)]; + __constant__ float + global_shape_gradients_f[(max_elem_degree + 1) * (max_elem_degree + 1)]; + + template + __host__ __device__ inline DataArray & + get_global_shape_gradients(); + + template <> + __host__ __device__ inline DataArray & + get_global_shape_gradients() + { + return global_shape_gradients_d; + } + + template <> + __host__ __device__ inline DataArray & + get_global_shape_gradients() + { + return global_shape_gradients_f; + } // for collocation methods - __constant__ double - global_co_shape_gradients[(max_elem_degree + 1) * (max_elem_degree + 1)]; + __constant__ double global_co_shape_gradients_d[(max_elem_degree + 1) * + (max_elem_degree + 1)]; + __constant__ float global_co_shape_gradients_f[(max_elem_degree + 1) * + (max_elem_degree + 1)]; + + template + __host__ __device__ inline DataArray & + get_global_co_shape_gradients(); + + template <> + __host__ __device__ inline DataArray & + get_global_co_shape_gradients() + { + return global_co_shape_gradients_d; + } + + template <> + __host__ __device__ inline DataArray & + get_global_co_shape_gradients() + { + return global_co_shape_gradients_f; + } template using CUDAVector = ::dealii::LinearAlgebra::CUDAWrappers::Vector; @@ -805,27 +869,30 @@ namespace CUDAWrappers unsigned int size_co_shape_values = n_q_points_1d * n_q_points_1d * sizeof(Number); - cudaError_t cuda_error = cudaMemcpyToSymbol(internal::global_shape_values, - shape_info.shape_values.data(), - size_shape_values, - 0, - cudaMemcpyHostToDevice); + cudaError_t cuda_error = + cudaMemcpyToSymbol(internal::get_global_shape_values(), + shape_info.shape_values.data(), + size_shape_values, + 0, + cudaMemcpyHostToDevice); AssertCuda(cuda_error); if (update_flags & update_gradients) { - cuda_error = cudaMemcpyToSymbol(internal::global_shape_gradients, - shape_info.shape_gradients.data(), - size_shape_values, - 0, - cudaMemcpyHostToDevice); + cuda_error = + cudaMemcpyToSymbol(internal::get_global_shape_gradients(), + shape_info.shape_gradients.data(), + size_shape_values, + 0, + cudaMemcpyHostToDevice); AssertCuda(cuda_error); - cuda_error = cudaMemcpyToSymbol(internal::global_co_shape_gradients, - shape_info_co.shape_gradients.data(), - size_co_shape_values, - 0, - cudaMemcpyHostToDevice); + cuda_error = + cudaMemcpyToSymbol(internal::get_global_co_shape_gradients(), + shape_info_co.shape_gradients.data(), + size_co_shape_values, + 0, + cudaMemcpyHostToDevice); AssertCuda(cuda_error); } diff --git a/include/deal.II/matrix_free/cuda_tensor_product_kernels.h b/include/deal.II/matrix_free/cuda_tensor_product_kernels.h index bf9a4a3d9a..a5871e1377 100644 --- a/include/deal.II/matrix_free/cuda_tensor_product_kernels.h +++ b/include/deal.II/matrix_free/cuda_tensor_product_kernels.h @@ -256,25 +256,37 @@ namespace CUDAWrappers { case 1: { - values<0, true, false, true>(global_shape_values, u, u); + values<0, true, false, true>(get_global_shape_values(), + u, + u); break; } case 2: { - values<0, true, false, true>(global_shape_values, u, u); + values<0, true, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); - values<1, true, false, true>(global_shape_values, u, u); + values<1, true, false, true>(get_global_shape_values(), + u, + u); break; } case 3: { - values<0, true, false, true>(global_shape_values, u, u); + values<0, true, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); - values<1, true, false, true>(global_shape_values, u, u); + values<1, true, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); - values<2, true, false, true>(global_shape_values, u, u); + values<2, true, false, true>(get_global_shape_values(), + u, + u); break; } @@ -300,25 +312,37 @@ namespace CUDAWrappers { case 1: { - values<0, false, false, true>(global_shape_values, u, u); + values<0, false, false, true>(get_global_shape_values(), + u, + u); break; } case 2: { - values<0, false, false, true>(global_shape_values, u, u); + values<0, false, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); - values<1, false, false, true>(global_shape_values, u, u); + values<1, false, false, true>(get_global_shape_values(), + u, + u); break; } case 3: { - values<0, false, false, true>(global_shape_values, u, u); + values<0, false, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); - values<1, false, false, true>(global_shape_values, u, u); + values<1, false, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); - values<2, false, false, true>(global_shape_values, u, u); + values<2, false, false, true>(get_global_shape_values(), + u, + u); break; } @@ -345,61 +369,61 @@ namespace CUDAWrappers { case 1: { - gradients<0, true, false, false>(global_shape_gradients, - u, - grad_u[0]); + gradients<0, true, false, false>( + get_global_shape_gradients(), u, grad_u[0]); break; } case 2: { - gradients<0, true, false, false>(global_shape_gradients, - u, - grad_u[0]); - values<0, true, false, false>(global_shape_values, u, grad_u[1]); + gradients<0, true, false, false>( + get_global_shape_gradients(), u, grad_u[0]); + values<0, true, false, false>(get_global_shape_values(), + u, + grad_u[1]); __syncthreads(); - values<1, true, false, true>(global_shape_values, + values<1, true, false, true>(get_global_shape_values(), grad_u[0], grad_u[0]); - gradients<1, true, false, true>(global_shape_gradients, - grad_u[1], - grad_u[1]); + gradients<1, true, false, true>( + get_global_shape_gradients(), grad_u[1], grad_u[1]); break; } case 3: { - gradients<0, true, false, false>(global_shape_gradients, - u, - grad_u[0]); - values<0, true, false, false>(global_shape_values, u, grad_u[1]); - values<0, true, false, false>(global_shape_values, u, grad_u[2]); + gradients<0, true, false, false>( + get_global_shape_gradients(), u, grad_u[0]); + values<0, true, false, false>(get_global_shape_values(), + u, + grad_u[1]); + values<0, true, false, false>(get_global_shape_values(), + u, + grad_u[2]); __syncthreads(); - values<1, true, false, true>(global_shape_values, + values<1, true, false, true>(get_global_shape_values(), grad_u[0], grad_u[0]); - gradients<1, true, false, true>(global_shape_gradients, - grad_u[1], - grad_u[1]); - values<1, true, false, true>(global_shape_values, + gradients<1, true, false, true>( + get_global_shape_gradients(), grad_u[1], grad_u[1]); + values<1, true, false, true>(get_global_shape_values(), grad_u[2], grad_u[2]); __syncthreads(); - values<2, true, false, true>(global_shape_values, + values<2, true, false, true>(get_global_shape_values(), grad_u[0], grad_u[0]); - values<2, true, false, true>(global_shape_values, + values<2, true, false, true>(get_global_shape_values(), grad_u[1], grad_u[1]); - gradients<2, true, false, true>(global_shape_gradients, - grad_u[2], - grad_u[2]); + gradients<2, true, false, true>( + get_global_shape_gradients(), grad_u[2], grad_u[2]); break; } @@ -427,49 +451,55 @@ namespace CUDAWrappers { case 1: { - values<0, true, false, true>(global_shape_values, u, u); + values<0, true, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); - gradients<0, true, false, false>(global_co_shape_gradients, - u, - grad_u[0]); + gradients<0, true, false, false>( + get_global_co_shape_gradients(), u, grad_u[0]); break; } case 2: { - values<0, true, false, true>(global_shape_values, u, u); + values<0, true, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); - values<1, true, false, true>(global_shape_values, u, u); + values<1, true, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); - gradients<0, true, false, false>(global_co_shape_gradients, - u, - grad_u[0]); - gradients<1, true, false, false>(global_co_shape_gradients, - u, - grad_u[1]); + gradients<0, true, false, false>( + get_global_co_shape_gradients(), u, grad_u[0]); + gradients<1, true, false, false>( + get_global_co_shape_gradients(), u, grad_u[1]); break; } case 3: { - values<0, true, false, true>(global_shape_values, u, u); + values<0, true, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); - values<1, true, false, true>(global_shape_values, u, u); + values<1, true, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); - values<2, true, false, true>(global_shape_values, u, u); + values<2, true, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); - gradients<0, true, false, false>(global_co_shape_gradients, - u, - grad_u[0]); - gradients<1, true, false, false>(global_co_shape_gradients, - u, - grad_u[1]); - gradients<2, true, false, false>(global_co_shape_gradients, - u, - grad_u[2]); + gradients<0, true, false, false>( + get_global_co_shape_gradients(), u, grad_u[0]); + gradients<1, true, false, false>( + get_global_co_shape_gradients(), u, grad_u[1]); + gradients<2, true, false, false>( + get_global_co_shape_gradients(), u, grad_u[2]); break; } @@ -497,64 +527,64 @@ namespace CUDAWrappers { case 1: { - gradients<0, false, add, false>(global_shape_gradients, - grad_u[dim], - u); + gradients<0, false, add, false>( + get_global_shape_gradients(), grad_u[dim], u); break; } case 2: { - gradients<0, false, false, true>(global_shape_gradients, - grad_u[0], - grad_u[0]); - values<0, false, false, true>(global_shape_values, + gradients<0, false, false, true>( + get_global_shape_gradients(), grad_u[0], grad_u[0]); + values<0, false, false, true>(get_global_shape_values(), grad_u[1], grad_u[1]); __syncthreads(); - values<1, false, add, false>(global_shape_values, grad_u[0], u); + values<1, false, add, false>(get_global_shape_values(), + grad_u[0], + u); __syncthreads(); - gradients<1, false, true, false>(global_shape_gradients, - grad_u[1], - u); + gradients<1, false, true, false>( + get_global_shape_gradients(), grad_u[1], u); break; } case 3: { - gradients<0, false, false, true>(global_shape_gradients, - grad_u[0], - grad_u[0]); - values<0, false, false, true>(global_shape_values, + gradients<0, false, false, true>( + get_global_shape_gradients(), grad_u[0], grad_u[0]); + values<0, false, false, true>(get_global_shape_values(), grad_u[1], grad_u[1]); - values<0, false, false, true>(global_shape_values, + values<0, false, false, true>(get_global_shape_values(), grad_u[2], grad_u[2]); __syncthreads(); - values<1, false, false, true>(global_shape_values, + values<1, false, false, true>(get_global_shape_values(), grad_u[0], grad_u[0]); - gradients<1, false, false, true>(global_shape_gradients, - grad_u[1], - grad_u[1]); - values<1, false, false, true>(global_shape_values, + gradients<1, false, false, true>( + get_global_shape_gradients(), grad_u[1], grad_u[1]); + values<1, false, false, true>(get_global_shape_values(), grad_u[2], grad_u[2]); __syncthreads(); - values<2, false, add, false>(global_shape_values, grad_u[0], u); + values<2, false, add, false>(get_global_shape_values(), + grad_u[0], + u); __syncthreads(); - values<2, false, true, false>(global_shape_values, grad_u[1], u); + values<2, false, true, false>(get_global_shape_values(), + grad_u[1], + u); __syncthreads(); - gradients<2, false, true, false>(global_shape_gradients, - grad_u[2], - u); + gradients<2, false, true, false>( + get_global_shape_gradients(), grad_u[2], u); break; } @@ -582,53 +612,59 @@ namespace CUDAWrappers { case 1: { - gradients<0, false, true, false>(global_co_shape_gradients, - grad_u[0], - u); + gradients<0, false, true, false>( + get_global_co_shape_gradients(), grad_u[0], u); __syncthreads(); - values<0, false, false, true>(global_shape_values, u, u); + values<0, false, false, true>(get_global_shape_values(), + u, + u); break; } case 2: { - gradients<1, false, true, false>(global_co_shape_gradients, - grad_u[1], - u); + gradients<1, false, true, false>( + get_global_co_shape_gradients(), grad_u[1], u); __syncthreads(); - gradients<0, false, true, false>(global_co_shape_gradients, - grad_u[0], - u); + gradients<0, false, true, false>( + get_global_co_shape_gradients(), grad_u[0], u); __syncthreads(); - values<1, false, false, true>(global_shape_values, u, u); + values<1, false, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); - values<0, false, false, true>(global_shape_values, u, u); + values<0, false, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); break; } case 3: { - gradients<2, false, true, false>(global_co_shape_gradients, - grad_u[2], - u); + gradients<2, false, true, false>( + get_global_co_shape_gradients(), grad_u[2], u); __syncthreads(); - gradients<1, false, true, false>(global_co_shape_gradients, - grad_u[1], - u); + gradients<1, false, true, false>( + get_global_co_shape_gradients(), grad_u[1], u); __syncthreads(); - gradients<0, false, true, false>(global_co_shape_gradients, - grad_u[0], - u); + gradients<0, false, true, false>( + get_global_co_shape_gradients(), grad_u[0], u); __syncthreads(); - values<2, false, false, true>(global_shape_values, u, u); + values<2, false, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); - values<1, false, false, true>(global_shape_values, u, u); + values<1, false, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); - values<0, false, false, true>(global_shape_values, u, u); + values<0, false, false, true>(get_global_shape_values(), + u, + u); __syncthreads(); break; diff --git a/tests/cuda/cuda_evaluate_1d_shape.cu b/tests/cuda/cuda_evaluate_1d_shape.cu index 23a7ba1c4f..e6e0a2fc8f 100644 --- a/tests/cuda/cuda_evaluate_1d_shape.cu +++ b/tests/cuda/cuda_evaluate_1d_shape.cu @@ -46,10 +46,10 @@ evaluate_tensor_product(double *dst, double *src) if (type == 0) evaluator.template values<0, dof_to_quad, add, false>( - CUDAWrappers::internal::global_shape_values, src, dst); + CUDAWrappers::internal::get_global_shape_values(), src, dst); if (type == 1) evaluator.template gradients<0, dof_to_quad, add, false>( - CUDAWrappers::internal::global_shape_values, src, dst); + CUDAWrappers::internal::get_global_shape_values(), src, dst); } template @@ -98,20 +98,20 @@ test() unsigned int size_shape_values = M * N * sizeof(double); - cudaError_t cuda_error = - cudaMemcpyToSymbol(CUDAWrappers::internal::global_shape_values, - shape_host.begin(), - size_shape_values, - 0, - cudaMemcpyHostToDevice); + cudaError_t cuda_error = cudaMemcpyToSymbol( + CUDAWrappers::internal::get_global_shape_values(), + shape_host.begin(), + size_shape_values, + 0, + cudaMemcpyHostToDevice); AssertCuda(cuda_error); - cuda_error = - cudaMemcpyToSymbol(CUDAWrappers::internal::global_shape_gradients, - shape_host.begin(), - size_shape_values, - 0, - cudaMemcpyHostToDevice); + cuda_error = cudaMemcpyToSymbol( + CUDAWrappers::internal::get_global_shape_gradients(), + shape_host.begin(), + size_shape_values, + 0, + cudaMemcpyHostToDevice); AssertCuda(cuda_error); // Launch the kernel diff --git a/tests/cuda/cuda_evaluate_2d_shape.cu b/tests/cuda/cuda_evaluate_2d_shape.cu index 24a8d992d1..b424baceb8 100644 --- a/tests/cuda/cuda_evaluate_2d_shape.cu +++ b/tests/cuda/cuda_evaluate_2d_shape.cu @@ -47,18 +47,18 @@ evaluate_tensor_product(double *dst, double *src) if (type == 0) { evaluator.template values<0, dof_to_quad, false, false>( - CUDAWrappers::internal::global_shape_values, src, src); + CUDAWrappers::internal::get_global_shape_values(), src, src); __syncthreads(); evaluator.template values<1, dof_to_quad, add, false>( - CUDAWrappers::internal::global_shape_values, src, dst); + CUDAWrappers::internal::get_global_shape_values(), src, dst); } if (type == 1) { evaluator.template gradients<0, dof_to_quad, false, false>( - CUDAWrappers::internal::global_shape_values, src, src); + CUDAWrappers::internal::get_global_shape_values(), src, src); __syncthreads(); evaluator.template gradients<1, dof_to_quad, add, false>( - CUDAWrappers::internal::global_shape_values, src, dst); + CUDAWrappers::internal::get_global_shape_values(), src, dst); } } @@ -123,20 +123,20 @@ test() unsigned int size_shape_values = M * N * sizeof(double); - cudaError_t cuda_error = - cudaMemcpyToSymbol(CUDAWrappers::internal::global_shape_values, - shape_host.begin(), - size_shape_values, - 0, - cudaMemcpyHostToDevice); + cudaError_t cuda_error = cudaMemcpyToSymbol( + CUDAWrappers::internal::get_global_shape_values(), + shape_host.begin(), + size_shape_values, + 0, + cudaMemcpyHostToDevice); AssertCuda(cuda_error); - cuda_error = - cudaMemcpyToSymbol(CUDAWrappers::internal::global_shape_gradients, - shape_host.begin(), - size_shape_values, - 0, - cudaMemcpyHostToDevice); + cuda_error = cudaMemcpyToSymbol( + CUDAWrappers::internal::get_global_shape_gradients(), + shape_host.begin(), + size_shape_values, + 0, + cudaMemcpyHostToDevice); AssertCuda(cuda_error); // Launch the kernel