From: Peter Munch Date: Mon, 23 Sep 2019 13:38:40 +0000 (+0200) Subject: Merge computation of values and gradients in the matrixfree CUDA code X-Git-Tag: v9.2.0-rc1~1025^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=4dbc095ed8e91419e2a6ba6f9276e337052654a2;p=dealii.git Merge computation of values and gradients in the matrixfree CUDA code --- diff --git a/doc/news/changes/minor/20190924PeterMunch-1 b/doc/news/changes/minor/20190924PeterMunch-1 new file mode 100644 index 0000000000..f317f2e2d0 --- /dev/null +++ b/doc/news/changes/minor/20190924PeterMunch-1 @@ -0,0 +1,4 @@ +Improved: Evaluate/integrate the gradients in CUDAWrappers::EvaluatorTensorProduct +with collocation method if both values and gradients are requested. +
+(Peter Munch, 2019/09/24) diff --git a/include/deal.II/matrix_free/cuda_fe_evaluation.h b/include/deal.II/matrix_free/cuda_fe_evaluation.h index b2bb935ccf..1305e379fb 100644 --- a/include/deal.II/matrix_free/cuda_fe_evaluation.h +++ b/include/deal.II/matrix_free/cuda_fe_evaluation.h @@ -314,13 +314,18 @@ namespace CUDAWrappers n_q_points_1d, Number> evaluator_tensor_product; - if (evaluate_grad == true) + if (evaluate_val == true && evaluate_grad == true) + { + evaluator_tensor_product.value_and_gradient_at_quad_pts(values, + gradients); + __syncthreads(); + } + else if (evaluate_grad == true) { evaluator_tensor_product.gradient_at_quad_pts(values, gradients); __syncthreads(); } - - if (evaluate_val == true) + else if (evaluate_val == true) { evaluator_tensor_product.value_at_quad_pts(values); __syncthreads(); @@ -346,16 +351,15 @@ namespace CUDAWrappers n_q_points_1d, Number> evaluator_tensor_product; - if (integrate_val == true) + if (integrate_val == true && integrate_grad == true) + { + evaluator_tensor_product.integrate_value_and_gradient(values, + gradients); + } + else if (integrate_val == true) { evaluator_tensor_product.integrate_value(values); __syncthreads(); - if (integrate_grad == true) - { - evaluator_tensor_product.integrate_gradient(values, - gradients); - __syncthreads(); - } } else if (integrate_grad == true) { diff --git a/include/deal.II/matrix_free/cuda_matrix_free.templates.h b/include/deal.II/matrix_free/cuda_matrix_free.templates.h index ace2e804f7..cca2c7e758 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.templates.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.templates.h @@ -26,6 +26,7 @@ # include +# include # include # include @@ -52,6 +53,10 @@ namespace CUDAWrappers __constant__ double global_shape_gradients[(max_elem_degree + 1) * (max_elem_degree + 1)]; + // for collocation methods + __constant__ double + global_co_shape_gradients[(max_elem_degree + 1) * (max_elem_degree + 1)]; + template using CUDAVector = ::dealii::LinearAlgebra::CUDAWrappers::Vector; @@ -835,6 +840,13 @@ namespace CUDAWrappers unsigned int size_shape_values = n_dofs_1d * n_q_points_1d * sizeof(Number); + FE_DGQArbitraryNodes<1> fe_quad_co(quad); + const ::dealii::internal::MatrixFreeFunctions::ShapeInfo + shape_info_co(quad, fe_quad_co); + + unsigned int size_co_shape_values = + n_q_points_1d * n_q_points_1d * sizeof(Number); + cudaError_t cuda_error = cudaMemcpyToSymbol(internal::global_shape_values, shape_info.shape_values.data(), size_shape_values, @@ -850,6 +862,13 @@ namespace CUDAWrappers 0, cudaMemcpyHostToDevice); AssertCuda(cuda_error); + + cuda_error = cudaMemcpyToSymbol(internal::global_co_shape_gradients, + shape_info_co.shape_gradients.data(), + size_co_shape_values, + 0, + cudaMemcpyHostToDevice); + AssertCuda(cuda_error); } // Setup the number of cells per CUDA thread block diff --git a/include/deal.II/matrix_free/cuda_tensor_product_kernels.h b/include/deal.II/matrix_free/cuda_tensor_product_kernels.h index 5c5b55acd3..bf9a4a3d9a 100644 --- a/include/deal.II/matrix_free/cuda_tensor_product_kernels.h +++ b/include/deal.II/matrix_free/cuda_tensor_product_kernels.h @@ -90,7 +90,7 @@ namespace CUDAWrappers */ template __device__ void - values(const Number *in, Number *out) const; + values(Number shape_values[], const Number *in, Number *out) const; /** * Evaluate the gradient of a finite element function at the quadrature @@ -98,7 +98,7 @@ namespace CUDAWrappers */ template __device__ void - gradients(const Number *in, Number *out) const; + gradients(Number shape_gradients[], const Number *in, Number *out) const; /** * Helper function for values() and gradients(). @@ -126,6 +126,13 @@ namespace CUDAWrappers __device__ void gradient_at_quad_pts(const Number *const u, Number *grad_u[dim]); + /** + * Evaluate the values and the gradients of the finite element function at + * the quadrature points. + */ + __device__ void + value_and_gradient_at_quad_pts(Number *const u, Number *grad_u[dim]); + /** * Helper function for integrate(). Integrate the gradients of the finite * element function. @@ -133,6 +140,13 @@ namespace CUDAWrappers template __device__ void integrate_gradient(Number *u, Number *grad_u[dim]); + + /** + * Helper function for integrate(). Integrate the values and the gradients + * of the finite element function. + */ + __device__ void + integrate_value_and_gradient(Number *u, Number *grad_u[dim]); }; @@ -155,11 +169,11 @@ namespace CUDAWrappers dim, fe_degree, n_q_points_1d, - Number>::values(const Number *in, Number *out) const + Number>::values(Number shape_values[], + const Number *in, + Number * out) const { - apply(global_shape_values, - in, - out); + apply(shape_values, in, out); } @@ -171,12 +185,11 @@ namespace CUDAWrappers dim, fe_degree, n_q_points_1d, - Number>::gradients(const Number *in, + Number>::gradients(Number shape_gradients[], + const Number *in, Number * out) const { - apply(global_shape_gradients, - in, - out); + apply(shape_gradients, in, out); } @@ -243,25 +256,25 @@ namespace CUDAWrappers { case 1: { - values<0, true, false, true>(u, u); + values<0, true, false, true>(global_shape_values, u, u); break; } case 2: { - values<0, true, false, true>(u, u); + values<0, true, false, true>(global_shape_values, u, u); __syncthreads(); - values<1, true, false, true>(u, u); + values<1, true, false, true>(global_shape_values, u, u); break; } case 3: { - values<0, true, false, true>(u, u); + values<0, true, false, true>(global_shape_values, u, u); __syncthreads(); - values<1, true, false, true>(u, u); + values<1, true, false, true>(global_shape_values, u, u); __syncthreads(); - values<2, true, false, true>(u, u); + values<2, true, false, true>(global_shape_values, u, u); break; } @@ -287,25 +300,25 @@ namespace CUDAWrappers { case 1: { - values<0, false, false, true>(u, u); + values<0, false, false, true>(global_shape_values, u, u); break; } case 2: { - values<0, false, false, true>(u, u); + values<0, false, false, true>(global_shape_values, u, u); __syncthreads(); - values<1, false, false, true>(u, u); + values<1, false, false, true>(global_shape_values, u, u); break; } case 3: { - values<0, false, false, true>(u, u); + values<0, false, false, true>(global_shape_values, u, u); __syncthreads(); - values<1, false, false, true>(u, u); + values<1, false, false, true>(global_shape_values, u, u); __syncthreads(); - values<2, false, false, true>(u, u); + values<2, false, false, true>(global_shape_values, u, u); break; } @@ -332,39 +345,131 @@ namespace CUDAWrappers { case 1: { - gradients<0, true, false, false>(u, grad_u[0]); + gradients<0, true, false, false>(global_shape_gradients, + u, + grad_u[0]); break; } case 2: { - gradients<0, true, false, false>(u, grad_u[0]); - values<0, true, false, false>(u, grad_u[1]); + gradients<0, true, false, false>(global_shape_gradients, + u, + grad_u[0]); + values<0, true, false, false>(global_shape_values, u, grad_u[1]); __syncthreads(); - values<1, true, false, true>(grad_u[0], grad_u[0]); - gradients<1, true, false, true>(grad_u[1], grad_u[1]); + values<1, true, false, true>(global_shape_values, + grad_u[0], + grad_u[0]); + gradients<1, true, false, true>(global_shape_gradients, + grad_u[1], + grad_u[1]); break; } case 3: { - gradients<0, true, false, false>(u, grad_u[0]); - values<0, true, false, false>(u, grad_u[1]); - values<0, true, false, false>(u, grad_u[2]); + gradients<0, true, false, false>(global_shape_gradients, + u, + grad_u[0]); + values<0, true, false, false>(global_shape_values, u, grad_u[1]); + values<0, true, false, false>(global_shape_values, u, grad_u[2]); + + __syncthreads(); + + values<1, true, false, true>(global_shape_values, + grad_u[0], + grad_u[0]); + gradients<1, true, false, true>(global_shape_gradients, + grad_u[1], + grad_u[1]); + values<1, true, false, true>(global_shape_values, + grad_u[2], + grad_u[2]); + + __syncthreads(); + + values<2, true, false, true>(global_shape_values, + grad_u[0], + grad_u[0]); + values<2, true, false, true>(global_shape_values, + grad_u[1], + grad_u[1]); + gradients<2, true, false, true>(global_shape_gradients, + grad_u[2], + grad_u[2]); + + break; + } + default: + { + // Do nothing. We should throw but we can't from a __device__ + // function. + } + } + } + + + + template + inline __device__ void + EvaluatorTensorProduct< + evaluate_general, + dim, + fe_degree, + n_q_points_1d, + Number>::value_and_gradient_at_quad_pts(Number *const u, + Number * grad_u[dim]) + { + switch (dim) + { + case 1: + { + values<0, true, false, true>(global_shape_values, u, u); + __syncthreads(); + gradients<0, true, false, false>(global_co_shape_gradients, + u, + grad_u[0]); + + break; + } + case 2: + { + values<0, true, false, true>(global_shape_values, u, u); + __syncthreads(); + values<1, true, false, true>(global_shape_values, u, u); __syncthreads(); - values<1, true, false, true>(grad_u[0], grad_u[0]); - gradients<1, true, false, true>(grad_u[1], grad_u[1]); - values<1, true, false, true>(grad_u[2], grad_u[2]); + gradients<0, true, false, false>(global_co_shape_gradients, + u, + grad_u[0]); + gradients<1, true, false, false>(global_co_shape_gradients, + u, + grad_u[1]); + break; + } + case 3: + { + values<0, true, false, true>(global_shape_values, u, u); + __syncthreads(); + values<1, true, false, true>(global_shape_values, u, u); + __syncthreads(); + values<2, true, false, true>(global_shape_values, u, u); __syncthreads(); - values<2, true, false, true>(grad_u[0], grad_u[0]); - values<2, true, false, true>(grad_u[1], grad_u[1]); - gradients<2, true, false, true>(grad_u[2], grad_u[2]); + gradients<0, true, false, false>(global_co_shape_gradients, + u, + grad_u[0]); + gradients<1, true, false, false>(global_co_shape_gradients, + u, + grad_u[1]); + gradients<2, true, false, false>(global_co_shape_gradients, + u, + grad_u[2]); break; } @@ -392,42 +497,139 @@ namespace CUDAWrappers { case 1: { - gradients<0, false, add, false>(grad_u[dim], u); + gradients<0, false, add, false>(global_shape_gradients, + grad_u[dim], + u); break; } case 2: { - gradients<0, false, false, true>(grad_u[0], grad_u[0]); - values<0, false, false, true>(grad_u[1], grad_u[1]); + gradients<0, false, false, true>(global_shape_gradients, + grad_u[0], + grad_u[0]); + values<0, false, false, true>(global_shape_values, + grad_u[1], + grad_u[1]); __syncthreads(); - values<1, false, add, false>(grad_u[0], u); + values<1, false, add, false>(global_shape_values, grad_u[0], u); __syncthreads(); - gradients<1, false, true, false>(grad_u[1], u); + gradients<1, false, true, false>(global_shape_gradients, + grad_u[1], + u); break; } case 3: { - gradients<0, false, false, true>(grad_u[0], grad_u[0]); - values<0, false, false, true>(grad_u[1], grad_u[1]); - values<0, false, false, true>(grad_u[2], grad_u[2]); + gradients<0, false, false, true>(global_shape_gradients, + grad_u[0], + grad_u[0]); + values<0, false, false, true>(global_shape_values, + grad_u[1], + grad_u[1]); + values<0, false, false, true>(global_shape_values, + grad_u[2], + grad_u[2]); __syncthreads(); - values<1, false, false, true>(grad_u[0], grad_u[0]); - gradients<1, false, false, true>(grad_u[1], grad_u[1]); - values<1, false, false, true>(grad_u[2], grad_u[2]); + values<1, false, false, true>(global_shape_values, + grad_u[0], + grad_u[0]); + gradients<1, false, false, true>(global_shape_gradients, + grad_u[1], + grad_u[1]); + values<1, false, false, true>(global_shape_values, + grad_u[2], + grad_u[2]); __syncthreads(); - values<2, false, add, false>(grad_u[0], u); + values<2, false, add, false>(global_shape_values, grad_u[0], u); + __syncthreads(); + values<2, false, true, false>(global_shape_values, grad_u[1], u); + __syncthreads(); + gradients<2, false, true, false>(global_shape_gradients, + grad_u[2], + u); + + break; + } + default: + { + // Do nothing. We should throw but we can't from a __device__ + // function. + } + } + } + + + + template + inline __device__ void + EvaluatorTensorProduct::integrate_value_and_gradient(Number *u, + Number + *grad_u[dim]) + { + switch (dim) + { + case 1: + { + gradients<0, false, true, false>(global_co_shape_gradients, + grad_u[0], + u); + __syncthreads(); + + values<0, false, false, true>(global_shape_values, u, u); + + break; + } + case 2: + { + gradients<1, false, true, false>(global_co_shape_gradients, + grad_u[1], + u); + __syncthreads(); + gradients<0, false, true, false>(global_co_shape_gradients, + grad_u[0], + u); + __syncthreads(); + + values<1, false, false, true>(global_shape_values, u, u); + __syncthreads(); + values<0, false, false, true>(global_shape_values, u, u); + __syncthreads(); + + break; + } + case 3: + { + gradients<2, false, true, false>(global_co_shape_gradients, + grad_u[2], + u); + __syncthreads(); + gradients<1, false, true, false>(global_co_shape_gradients, + grad_u[1], + u); + __syncthreads(); + gradients<0, false, true, false>(global_co_shape_gradients, + grad_u[0], + u); + __syncthreads(); + + values<2, false, false, true>(global_shape_values, u, u); + __syncthreads(); + values<1, false, false, true>(global_shape_values, u, u); __syncthreads(); - values<2, false, true, false>(grad_u[1], u); + values<0, false, false, true>(global_shape_values, u, u); __syncthreads(); - gradients<2, false, true, false>(grad_u[2], u); break; }