From: Bruno Turcksin Date: Fri, 31 Mar 2023 00:18:59 +0000 (-0400) Subject: Move __shared__ variables to Kokkos::View X-Git-Tag: v9.5.0-rc1~349^2~3 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=b73dd4153109c63a646c6c4e882cc9e3c570602e;p=dealii.git Move __shared__ variables to Kokkos::View --- diff --git a/include/deal.II/matrix_free/cuda_fe_evaluation.h b/include/deal.II/matrix_free/cuda_fe_evaluation.h index 420db4e13d..e32a9b2f28 100644 --- a/include/deal.II/matrix_free/cuda_fe_evaluation.h +++ b/include/deal.II/matrix_free/cuda_fe_evaluation.h @@ -290,8 +290,14 @@ namespace CUDAWrappers constraint_weights; // Internal buffer - Number *values; - Number *gradients[dim]; + Kokkos::Subview, + Kokkos::pair> + values; + Kokkos::Subview< + Kokkos::View, + Kokkos::pair, + Kokkos::pair> + gradients; }; @@ -329,10 +335,8 @@ namespace CUDAWrappers , co_shape_gradients(data->co_shape_gradients) , constraint_weights(data->constraint_weights) , values(shdata->values) - { - for (unsigned int i = 0; i < dim; ++i) - gradients[i] = shdata->gradients[i]; - } + , gradients(shdata->gradients) + {} @@ -350,9 +354,8 @@ namespace CUDAWrappers const unsigned int idx = internal::compute_index(); const types::global_dof_index src_idx = local_to_global[idx]; - // Use the read-only data cache. - KOKKOS_IF_ON_DEVICE(values[idx] = __ldg(&src[src_idx]); __syncthreads();) - KOKKOS_IF_ON_HOST(values[idx] = src[src_idx];) + values[idx] = src[src_idx]; + KOKKOS_IF_ON_DEVICE(__syncthreads();) internal::resolve_hanging_nodes(constraint_weights, constraint_mask, @@ -559,7 +562,7 @@ namespace CUDAWrappers { Number tmp = 0.; for (unsigned int d_2 = 0; d_2 < dim; ++d_2) - tmp += inv_jac(q_point, d_2, d_1) * gradients[d_2][q_point]; + tmp += inv_jac(q_point, d_2, d_1) * gradients(q_point, d_2); grad[d_1] = tmp; } @@ -584,7 +587,7 @@ namespace CUDAWrappers Number tmp = 0.; for (unsigned int d_2 = 0; d_2 < dim; ++d_2) tmp += inv_jac(q_point, d_1, d_2) * grad_in[d_2]; - gradients[d_1][q_point] = tmp * JxW[q_point]; + gradients(q_point, d_1) = tmp * JxW[q_point]; } } diff --git a/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h b/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h index 8ed76db905..8b040a1970 100644 --- a/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h +++ b/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h @@ -61,8 +61,10 @@ namespace CUDAWrappers Kokkos::View constraint_weights, const dealii::internal::MatrixFreeFunctions::ConstraintKinds - constraint_mask, - Number *values) + constraint_mask, + Kokkos::Subview< + Kokkos::View, + Kokkos::pair> values) { const unsigned int x_idx = threadIdx.x % (fe_degree + 1); const unsigned int y_idx = threadIdx.y; @@ -169,8 +171,10 @@ namespace CUDAWrappers Kokkos::View constraint_weights, const dealii::internal::MatrixFreeFunctions::ConstraintKinds - constraint_mask, - Number *values) + constraint_mask, + Kokkos::Subview< + Kokkos::View, + Kokkos::pair> values) { const unsigned int x_idx = threadIdx.x % (fe_degree + 1); const unsigned int y_idx = threadIdx.y; @@ -320,8 +324,10 @@ namespace CUDAWrappers Kokkos::View constraint_weights, const dealii::internal::MatrixFreeFunctions::ConstraintKinds - constraint_mask, - Number *values) + constraint_mask, + Kokkos::Subview< + Kokkos::View, + Kokkos::pair> values) { if (dim == 2) { diff --git a/include/deal.II/matrix_free/cuda_matrix_free.h b/include/deal.II/matrix_free/cuda_matrix_free.h index e7be918ef4..4eb44bd061 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.h @@ -630,8 +630,7 @@ namespace CUDAWrappers - // TODO find a better place to put these things - + // TODO We should rework this to use scratch memory /** * Structure to pass the shared memory into a general user function. */ @@ -639,27 +638,20 @@ namespace CUDAWrappers struct SharedData { /** - * Constructor. - */ - DEAL_II_HOST_DEVICE - SharedData(Number *vd, Number *gq[dim]) - : values(vd) - { - for (unsigned int d = 0; d < dim; ++d) - gradients[d] = gq[d]; - } - - /** - * Shared memory for dof and quad values. + * Memory for dof and quad values. */ - Number *values; + Kokkos::Subview, + Kokkos::pair> + values; /** - * Shared memory for computed gradients in reference coordinate system. - * The gradient in each direction is saved in a struct-of-array - * format, i.e. first, all gradients in the x-direction come... + * Memory for computed gradients in reference coordinate system. */ - Number *gradients[dim]; + Kokkos::Subview< + Kokkos::View, + Kokkos::pair, + Kokkos::pair> + gradients; }; diff --git a/include/deal.II/matrix_free/cuda_matrix_free.templates.h b/include/deal.II/matrix_free/cuda_matrix_free.templates.h index 18f330ee54..21a891060d 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.templates.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.templates.h @@ -374,35 +374,36 @@ namespace CUDAWrappers template __global__ void - apply_kernel_shmem(Functor func, - const typename MatrixFree::Data gpu_data, - const Number * src, - Number * dst) + apply_kernel_shmem( + Functor func, + const typename MatrixFree::Data gpu_data, + Kokkos::View values, + Kokkos::View gradients, + Number *const src, + Number * dst) { constexpr unsigned int cells_per_block = cells_per_block_shmem(dim, Functor::n_dofs_1d - 1); - constexpr unsigned int n_dofs_per_block = - cells_per_block * Functor::n_local_dofs; - constexpr unsigned int n_q_points_per_block = - cells_per_block * Functor::n_q_points; - // TODO make use of dynamically allocated shared memory - __shared__ Number values[n_dofs_per_block]; - __shared__ Number gradients[dim][n_q_points_per_block]; - const unsigned int local_cell = threadIdx.x / Functor::n_dofs_1d; const unsigned int cell = local_cell + cells_per_block * (blockIdx.x + gridDim.x * blockIdx.y); - Number *gq[dim]; - for (unsigned int d = 0; d < dim; ++d) - gq[d] = &gradients[d][local_cell * Functor::n_q_points]; - - SharedData shared_data( - &values[local_cell * Functor::n_local_dofs], gq); - if (cell < gpu_data.n_cells) - func(cell, &gpu_data, &shared_data, src, dst); + { + SharedData shared_data( + {Kokkos::subview( + values, + Kokkos::pair(cell * Functor::n_local_dofs, + (cell + 1) * Functor::n_local_dofs)), + Kokkos::subview(gradients, + Kokkos::pair(cell * Functor::n_q_points, + (cell + 1) * + Functor::n_q_points), + Kokkos::pair(0, dim))}); + + func(cell, &gpu_data, &shared_data, src, dst); + } } @@ -1007,17 +1008,38 @@ namespace CUDAWrappers const VectorType &src, VectorType & dst) const { + std::vector> + values_colors(n_colors); + std::vector> + gradients_colors(n_colors); // Execute the loop on the cells for (unsigned int i = 0; i < n_colors; ++i) if (n_cells[i] > 0) { + const unsigned int size = + (grid_dim[i].x * grid_dim[i].y * grid_dim[i].z) * cells_per_block * + Functor::n_local_dofs; + // std::cout << "color " << i << " " << size << std::endl; + values_colors[i] = + Kokkos::View( + Kokkos::view_alloc("values_" + std::to_string(i), + Kokkos::WithoutInitializing), + size); + gradients_colors[i] = + Kokkos::View( + Kokkos::view_alloc("gradients_" + std::to_string(i), + Kokkos::WithoutInitializing), + size); internal::apply_kernel_shmem <<>>(func, get_data(i), + values_colors[i], + gradients_colors[i], src.get_values(), dst.get_values()); AssertCudaKernel(); } + cudaDeviceSynchronize(); } @@ -1039,13 +1061,30 @@ namespace CUDAWrappers if (overlap_communication_computation) { src.update_ghost_values_start(0); + + Kokkos::View values; + Kokkos::View + gradients; // In parallel, it's possible that some processors do not own any // cells. if (n_cells[0] > 0) { + const unsigned int size = + (grid_dim[0].x * grid_dim[0].y * grid_dim[0].z) * + cells_per_block * Functor::n_local_dofs; + values = + Kokkos::View( + Kokkos::view_alloc("values", Kokkos::WithoutInitializing), + size); + gradients = Kokkos::View( + Kokkos::view_alloc("gradients", Kokkos::WithoutInitializing), + size); internal::apply_kernel_shmem <<>>(func, get_data(0), + values, + gradients, src.get_values(), dst.get_values()); AssertCudaKernel(); @@ -1056,9 +1095,22 @@ namespace CUDAWrappers // cells if (n_cells[1] > 0) { + const unsigned int size = + (grid_dim[1].x * grid_dim[1].y * grid_dim[1].z) * + cells_per_block * Functor::n_local_dofs; + values = + Kokkos::View( + Kokkos::view_alloc("values", Kokkos::WithoutInitializing), + size); + gradients = Kokkos::View( + Kokkos::view_alloc("gradients", Kokkos::WithoutInitializing), + size); internal::apply_kernel_shmem <<>>(func, get_data(1), + values, + gradients, src.get_values(), dst.get_values()); AssertCudaKernel(); @@ -1073,9 +1125,22 @@ namespace CUDAWrappers // not own any cells if (n_cells[2] > 0) { + const unsigned int size = + (grid_dim[2].x * grid_dim[2].y * grid_dim[2].z) * + cells_per_block * Functor::n_local_dofs; + values = + Kokkos::View( + Kokkos::view_alloc("values", Kokkos::WithoutInitializing), + size); + gradients = Kokkos::View( + Kokkos::view_alloc("gradients", Kokkos::WithoutInitializing), + size); internal::apply_kernel_shmem <<>>(func, get_data(2), + values, + gradients, src.get_values(), dst.get_values()); AssertCudaKernel(); @@ -1085,14 +1150,36 @@ namespace CUDAWrappers else { src.update_ghost_values(); + std::vector< + Kokkos::View> + values_colors(n_colors); + std::vector< + Kokkos::View> + gradients_colors(n_colors); // Execute the loop on the cells for (unsigned int i = 0; i < n_colors; ++i) if (n_cells[i] > 0) { + const unsigned int size = + (grid_dim[i].x * grid_dim[i].y * grid_dim[i].z) * + cells_per_block * Functor::n_local_dofs; + values_colors[i] = + Kokkos::View( + Kokkos::view_alloc("values_" + std::to_string(i), + Kokkos::WithoutInitializing), + size); + gradients_colors[i] = + Kokkos::View( + Kokkos::view_alloc("gradients_" + std::to_string(i), + Kokkos::WithoutInitializing), + size); internal::apply_kernel_shmem <<>>(func, get_data(i), + values_colors[i], + gradients_colors[i], src.get_values(), dst.get_values()); } @@ -1110,13 +1197,33 @@ namespace CUDAWrappers ghosted_src = src; ghosted_dst = dst; + std::vector> + values_colors(n_colors); + std::vector< + Kokkos::View> + gradients_colors(n_colors); // Execute the loop on the cells for (unsigned int i = 0; i < n_colors; ++i) if (n_cells[i] > 0) { + const unsigned int size = + (grid_dim[i].x * grid_dim[i].y * grid_dim[i].z) * + cells_per_block * Functor::n_local_dofs; + values_colors[i] = + Kokkos::View( + Kokkos::view_alloc("values_" + std::to_string(i), + Kokkos::WithoutInitializing), + size); + gradients_colors[i] = + Kokkos::View( + Kokkos::view_alloc("gradients_" + std::to_string(i), + Kokkos::WithoutInitializing), + size); internal::apply_kernel_shmem <<>>(func, get_data(i), + values_colors[i], + gradients_colors[i], ghosted_src.get_values(), ghosted_dst.get_values()); AssertCudaKernel(); diff --git a/include/deal.II/matrix_free/cuda_tensor_product_kernels.h b/include/deal.II/matrix_free/cuda_tensor_product_kernels.h index f2547950ea..583cd7962c 100644 --- a/include/deal.II/matrix_free/cuda_tensor_product_kernels.h +++ b/include/deal.II/matrix_free/cuda_tensor_product_kernels.h @@ -55,11 +55,14 @@ namespace CUDAWrappers int direction, bool dof_to_quad, bool add, - bool in_place> + bool in_place, + typename ViewTypeIn, + typename ViewTypeOut> DEAL_II_HOST_DEVICE void - apply(Kokkos::View shape_data, - const Number * in, - Number * out) + apply(const Kokkos::View + shape_data, + const ViewTypeIn in, + ViewTypeOut out) { KOKKOS_IF_ON_DEVICE( const unsigned int i = (dim == 1) ? 0 : threadIdx.x % n_q_points_1d; @@ -89,7 +92,7 @@ namespace CUDAWrappers (direction == 1) ? (i + n_q_points_1d * (q + n_q_points_1d * j)) : (i + n_q_points_1d * (j + n_q_points_1d * q)); - if (add) out[destination_idx] += t; + if (add) Kokkos::atomic_add(&out[destination_idx], t); else out[destination_idx] = t;) } @@ -134,74 +137,101 @@ namespace CUDAWrappers * Evaluate the values of a finite element function at the quadrature * points. */ - template + template DEAL_II_HOST_DEVICE void - values(const Number *in, Number *out) const; + values(const ViewTypeIn in, ViewTypeOut out) const; /** * Evaluate the gradient of a finite element function at the quadrature * points for a given @p direction. */ - template + template DEAL_II_HOST_DEVICE void - gradients(const Number *in, Number *out) const; + gradients(const ViewTypeIn in, ViewTypeOut out) const; /** - * TODO + * Evaluate the gradient of a finite element function at the quadrature + * points for a given @p direction for collocation methods. */ - template + template DEAL_II_HOST_DEVICE void - co_gradients(const Number *in, Number *out) const; + co_gradients(const ViewTypeIn in, ViewTypeOut out) const; /** * Evaluate the finite element function at the quadrature points. */ + template DEAL_II_HOST_DEVICE void - value_at_quad_pts(Number *u); + value_at_quad_pts(ViewType u); /** * Helper function for integrate(). Integrate the finite element function. */ + template DEAL_II_HOST_DEVICE void - integrate_value(Number *u); + integrate_value(ViewType u); /** * Evaluate the gradients of the finite element function at the quadrature * points. */ + template DEAL_II_HOST_DEVICE void - gradient_at_quad_pts(const Number *const u, Number *grad_u[dim]); + gradient_at_quad_pts(const ViewTypeIn u, ViewTypeOut grad_u); /** * Evaluate the values and the gradients of the finite element function at * the quadrature points. */ + template DEAL_II_HOST_DEVICE void - value_and_gradient_at_quad_pts(Number *const u, Number *grad_u[dim]); + value_and_gradient_at_quad_pts(ViewType1 u, ViewType2 grad_u); /** * Helper function for integrate(). Integrate the gradients of the finite * element function. */ - template + template DEAL_II_HOST_DEVICE void - integrate_gradient(Number *u, Number *grad_u[dim]); + integrate_gradient(ViewType1 u, ViewType2 grad_u); /** * Helper function for integrate(). Integrate the values and the gradients * of the finite element function. */ + template DEAL_II_HOST_DEVICE void - integrate_value_and_gradient(Number *u, Number *grad_u[dim]); + integrate_value_and_gradient(ViewType1 u, ViewType2 grad_u); - // TODO shape values + /** + * Values of the shape functions. + */ Kokkos::View shape_values; - // TODO shape gradients + /** + * Values of the shape function gradients. + */ Kokkos::View shape_gradients; - // TODO shape gradients for collocation methods + /** + * Values of the shape function gradients for collocation methods. + */ Kokkos::View co_shape_gradients; }; @@ -229,13 +259,19 @@ namespace CUDAWrappers template - template + template DEAL_II_HOST_DEVICE void EvaluatorTensorProduct::values(const Number *in, Number *out) const + Number>::values(const ViewTypeIn in, + ViewTypeOut out) const { apply( shape_values, in, out); @@ -244,14 +280,19 @@ namespace CUDAWrappers template - template + template DEAL_II_HOST_DEVICE void EvaluatorTensorProduct::gradients(const Number *in, - Number * out) const + Number>::gradients(const ViewTypeIn in, + ViewTypeOut out) const { apply( shape_gradients, in, out); @@ -260,14 +301,19 @@ namespace CUDAWrappers template - template + template DEAL_II_HOST_DEVICE void EvaluatorTensorProduct::co_gradients(const Number *in, - Number * out) const + Number>::co_gradients(const ViewTypeIn in, + ViewTypeOut out) const { apply( co_shape_gradients, in, out); @@ -276,12 +322,13 @@ namespace CUDAWrappers template + template DEAL_II_HOST_DEVICE inline void EvaluatorTensorProduct::value_at_quad_pts(Number *u) + Number>::value_at_quad_pts(ViewType u) { switch (dim) { @@ -320,12 +367,13 @@ namespace CUDAWrappers template + template DEAL_II_HOST_DEVICE inline void EvaluatorTensorProduct::integrate_value(Number *u) + Number>::integrate_value(ViewType u) { switch (dim) { @@ -364,51 +412,74 @@ namespace CUDAWrappers template + template DEAL_II_HOST_DEVICE inline void EvaluatorTensorProduct::gradient_at_quad_pts(const Number *const u, - Number *grad_u[dim]) + Number>::gradient_at_quad_pts(const ViewTypeIn u, + ViewTypeOut grad_u) { switch (dim) { case 1: { - gradients<0, true, false, false>(u, grad_u[0]); + gradients<0, true, false, false>( + u, Kokkos::subview(grad_u, Kokkos::ALL, 0)); break; } case 2: { - gradients<0, true, false, false>(u, grad_u[0]); - values<0, true, false, false>(u, grad_u[1]); + gradients<0, true, false, false>( + u, Kokkos::subview(grad_u, Kokkos::ALL, 0)); + values<0, true, false, false>( + u, Kokkos::subview(grad_u, Kokkos::ALL, 1)); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<1, true, false, true>(grad_u[0], grad_u[0]); - gradients<1, true, false, true>(grad_u[1], grad_u[1]); + values<1, true, false, true>( + Kokkos::subview(grad_u, Kokkos::ALL, 0), + Kokkos::subview(grad_u, Kokkos::ALL, 0)); + gradients<1, true, false, true>( + Kokkos::subview(grad_u, Kokkos::ALL, 1), + Kokkos::subview(grad_u, Kokkos::ALL, 1)); break; } case 3: { - gradients<0, true, false, false>(u, grad_u[0]); - values<0, true, false, false>(u, grad_u[1]); - values<0, true, false, false>(u, grad_u[2]); + gradients<0, true, false, false>( + u, Kokkos::subview(grad_u, Kokkos::ALL, 0)); + values<0, true, false, false>( + u, Kokkos::subview(grad_u, Kokkos::ALL, 1)); + values<0, true, false, false>( + u, Kokkos::subview(grad_u, Kokkos::ALL, 2)); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<1, true, false, true>(grad_u[0], grad_u[0]); - gradients<1, true, false, true>(grad_u[1], grad_u[1]); - values<1, true, false, true>(grad_u[2], grad_u[2]); + values<1, true, false, true>( + Kokkos::subview(grad_u, Kokkos::ALL, 0), + Kokkos::subview(grad_u, Kokkos::ALL, 0)); + gradients<1, true, false, true>( + Kokkos::subview(grad_u, Kokkos::ALL, 1), + Kokkos::subview(grad_u, Kokkos::ALL, 1)); + values<1, true, false, true>( + Kokkos::subview(grad_u, Kokkos::ALL, 2), + Kokkos::subview(grad_u, Kokkos::ALL, 2)); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<2, true, false, true>(grad_u[0], grad_u[0]); - values<2, true, false, true>(grad_u[1], grad_u[1]); - gradients<2, true, false, true>(grad_u[2], grad_u[2]); + values<2, true, false, true>( + Kokkos::subview(grad_u, Kokkos::ALL, 0), + Kokkos::subview(grad_u, Kokkos::ALL, 0)); + values<2, true, false, true>( + Kokkos::subview(grad_u, Kokkos::ALL, 1), + Kokkos::subview(grad_u, Kokkos::ALL, 1)); + gradients<2, true, false, true>( + Kokkos::subview(grad_u, Kokkos::ALL, 2), + Kokkos::subview(grad_u, Kokkos::ALL, 2)); break; } @@ -423,14 +494,15 @@ namespace CUDAWrappers template + template DEAL_II_HOST_DEVICE inline void - EvaluatorTensorProduct< - evaluate_general, - dim, - fe_degree, - n_q_points_1d, - Number>::value_and_gradient_at_quad_pts(Number *const u, - Number * grad_u[dim]) + EvaluatorTensorProduct::value_and_gradient_at_quad_pts(ViewType1 u, + ViewType2 + grad_u) { switch (dim) { @@ -439,7 +511,8 @@ namespace CUDAWrappers values<0, true, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - co_gradients<0, true, false, false>(u, grad_u[0]); + co_gradients<0, true, false, false>( + u, Kokkos::subview(grad_u, Kokkos::ALL, 0)); break; } @@ -450,8 +523,10 @@ namespace CUDAWrappers values<1, true, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - co_gradients<0, true, false, false>(u, grad_u[0]); - co_gradients<1, true, false, false>(u, grad_u[1]); + co_gradients<0, true, false, false>( + u, Kokkos::subview(grad_u, Kokkos::ALL, 0)); + co_gradients<1, true, false, false>( + u, Kokkos::subview(grad_u, Kokkos::ALL, 1)); break; } @@ -464,9 +539,12 @@ namespace CUDAWrappers values<2, true, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - co_gradients<0, true, false, false>(u, grad_u[0]); - co_gradients<1, true, false, false>(u, grad_u[1]); - co_gradients<2, true, false, false>(u, grad_u[2]); + co_gradients<0, true, false, false>( + u, Kokkos::subview(grad_u, Kokkos::ALL, 0)); + co_gradients<1, true, false, false>( + u, Kokkos::subview(grad_u, Kokkos::ALL, 1)); + co_gradients<2, true, false, false>( + u, Kokkos::subview(grad_u, Kokkos::ALL, 2)); break; } @@ -481,57 +559,77 @@ namespace CUDAWrappers template - template + template DEAL_II_HOST_DEVICE inline void EvaluatorTensorProduct::integrate_gradient(Number *u, - Number *grad_u[dim]) + Number>::integrate_gradient(ViewType1 u, + ViewType2 grad_u) { switch (dim) { case 1: { gradients<0, false, add, false>( - - grad_u[dim], u); + Kokkos::subview(grad_u, Kokkos::ALL, dim), u); break; } case 2: { - gradients<0, false, false, true>(grad_u[0], grad_u[0]); - values<0, false, false, true>(grad_u[1], grad_u[1]); + gradients<0, false, false, true>( + Kokkos::subview(grad_u, Kokkos::ALL, 0), + Kokkos::subview(grad_u, Kokkos::ALL, 0)); + values<0, false, false, true>( + Kokkos::subview(grad_u, Kokkos::ALL, 1), + Kokkos::subview(grad_u, Kokkos::ALL, 1)); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<1, false, add, false>(grad_u[0], u); + values<1, false, add, false>( + Kokkos::subview(grad_u, Kokkos::ALL, 0), u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - gradients<1, false, true, false>(grad_u[1], u); + gradients<1, false, true, false>( + Kokkos::subview(grad_u, Kokkos::ALL, 1), u); break; } case 3: { - gradients<0, false, false, true>(grad_u[0], grad_u[0]); - values<0, false, false, true>(grad_u[1], grad_u[1]); - values<0, false, false, true>(grad_u[2], grad_u[2]); + gradients<0, false, false, true>( + Kokkos::subview(grad_u, Kokkos::ALL, 0), + Kokkos::subview(grad_u, Kokkos::ALL, 0)); + values<0, false, false, true>( + Kokkos::subview(grad_u, Kokkos::ALL, 1), + Kokkos::subview(grad_u, Kokkos::ALL, 1)); + values<0, false, false, true>( + Kokkos::subview(grad_u, Kokkos::ALL, 2), + Kokkos::subview(grad_u, Kokkos::ALL, 2)); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<1, false, false, true>(grad_u[0], grad_u[0]); - gradients<1, false, false, true>(grad_u[1], grad_u[1]); - values<1, false, false, true>(grad_u[2], grad_u[2]); + values<1, false, false, true>( + Kokkos::subview(grad_u, Kokkos::ALL, 0), + Kokkos::subview(grad_u, Kokkos::ALL, 0)); + gradients<1, false, false, true>( + Kokkos::subview(grad_u, Kokkos::ALL, 1), + Kokkos::subview(grad_u, Kokkos::ALL, 1)); + values<1, false, false, true>( + Kokkos::subview(grad_u, Kokkos::ALL, 2), + Kokkos::subview(grad_u, Kokkos::ALL, 2)); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<2, false, add, false>(grad_u[0], u); + values<2, false, add, false>( + Kokkos::subview(grad_u, Kokkos::ALL, 0), u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<2, false, true, false>(grad_u[1], u); + values<2, false, true, false>( + Kokkos::subview(grad_u, Kokkos::ALL, 1), u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - gradients<2, false, true, false>(grad_u[2], u); + gradients<2, false, true, false>( + Kokkos::subview(grad_u, Kokkos::ALL, 2), u); break; } @@ -546,20 +644,22 @@ namespace CUDAWrappers template + template DEAL_II_HOST_DEVICE inline void EvaluatorTensorProduct::integrate_value_and_gradient(Number *u, - Number - *grad_u[dim]) + Number>::integrate_value_and_gradient(ViewType1 u, + ViewType2 + grad_u) { switch (dim) { case 1: { - co_gradients<0, false, true, false>(grad_u[0], u); + co_gradients<0, false, true, false>( + Kokkos::subview(grad_u, Kokkos::ALL, 0), u); KOKKOS_IF_ON_DEVICE(__syncthreads();) values<0, false, false, true>(u, u); @@ -568,9 +668,11 @@ namespace CUDAWrappers } case 2: { - co_gradients<1, false, true, false>(grad_u[1], u); + co_gradients<1, false, true, false>( + Kokkos::subview(grad_u, Kokkos::ALL, 1), u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - co_gradients<0, false, true, false>(grad_u[0], u); + co_gradients<0, false, true, false>( + Kokkos::subview(grad_u, Kokkos::ALL, 0), u); KOKKOS_IF_ON_DEVICE(__syncthreads();) values<1, false, false, true>(u, u); @@ -582,11 +684,14 @@ namespace CUDAWrappers } case 3: { - co_gradients<2, false, true, false>(grad_u[2], u); + co_gradients<2, false, true, false>( + Kokkos::subview(grad_u, Kokkos::ALL, 2), u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - co_gradients<1, false, true, false>(grad_u[1], u); + co_gradients<1, false, true, false>( + Kokkos::subview(grad_u, Kokkos::ALL, 1), u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - co_gradients<0, false, true, false>(grad_u[0], u); + co_gradients<0, false, true, false>( + Kokkos::subview(grad_u, Kokkos::ALL, 0), u); KOKKOS_IF_ON_DEVICE(__syncthreads();) values<2, false, false, true>(u, u);