From: Bruno Turcksin Date: Tue, 8 Aug 2023 21:57:21 +0000 (+0000) Subject: Use Kokkos constructs in tensor product kernels when using Kokkos 4.0 or later X-Git-Tag: relicensing~587^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F15863%2Fhead;p=dealii.git Use Kokkos constructs in tensor product kernels when using Kokkos 4.0 or later --- diff --git a/include/deal.II/matrix_free/cuda_tensor_product_kernels.h b/include/deal.II/matrix_free/cuda_tensor_product_kernels.h index d169e658da..74bb8be7c6 100644 --- a/include/deal.II/matrix_free/cuda_tensor_product_kernels.h +++ b/include/deal.II/matrix_free/cuda_tensor_product_kernels.h @@ -44,6 +44,202 @@ namespace CUDAWrappers evaluate_evenodd }; + + +#if KOKKOS_VERSION >= 40000 + /** + * Helper function for values() and gradients() in 1D + */ + template + DEAL_II_HOST_DEVICE void + apply_1d(const Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space>::member_type + &team_member, + const Kokkos::View + shape_data, + const ViewTypeIn in, + ViewTypeOut out) + { + Number t[n_q_points_1d]; + Kokkos::parallel_for(Kokkos::TeamThreadRange(team_member, n_q_points_1d), + [&](const int &q) { + t[q] = 0; + // This loop simply multiplies the shape function + // at the quadrature point by the value finite + // element coefficient. + // FIXME check why using parallel_reduce + // ThreadVector is slower + for (int k = 0; k < n_q_points_1d; ++k) + { + const unsigned int shape_idx = + dof_to_quad ? (q + k * n_q_points_1d) : + (k + q * n_q_points_1d); + const unsigned int source_idx = k; + t[q] += shape_data[shape_idx] * in(source_idx); + } + }); + + if constexpr (in_place) + team_member.team_barrier(); + + Kokkos::parallel_for(Kokkos::TeamThreadRange(team_member, n_q_points_1d), + [&](const int &q) { + const unsigned int destination_idx = q; + if constexpr (add) + Kokkos::atomic_add(&out(destination_idx), t[q]); + else + out(destination_idx) = t[q]; + }); + } + + + + /** + * Helper function for values() and gradients() in 2D + */ + template + DEAL_II_HOST_DEVICE void + apply_2d(const Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space>::member_type + &team_member, + const Kokkos::View + shape_data, + const ViewTypeIn in, + ViewTypeOut out) + { + using TeamType = Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space>::member_type; + constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, 2); + + Number t[n_q_points]; + auto thread_policy = + Kokkos::TeamThreadMDRange, TeamType>(team_member, + n_q_points_1d, + n_q_points_1d); + Kokkos::parallel_for(thread_policy, [&](const int i, const int j) { + int q_point = i + j * n_q_points_1d; + t[q_point] = 0; + + // This loop simply multiplies the shape function at the quadrature + // point by the value finite element coefficient. + // FIXME check why using parallel_reduce ThreadVector is slower + for (int k = 0; k < n_q_points_1d; ++k) + { + const unsigned int shape_idx = + dof_to_quad ? (j + k * n_q_points_1d) : (k + j * n_q_points_1d); + const unsigned int source_idx = (direction == 0) ? + (k + n_q_points_1d * i) : + (i + n_q_points_1d * k); + t[q_point] += shape_data[shape_idx] * in(source_idx); + } + }); + + if (in_place) + team_member.team_barrier(); + + Kokkos::parallel_for(Kokkos::TeamThreadRange(team_member, n_q_points), + [&](const int i, const int j) { + const int q_point = i + j * n_q_points_1d; + const unsigned int destination_idx = + (direction == 0) ? (j + n_q_points_1d * i) : + (i + n_q_points_1d * j); + + if (add) + Kokkos::atomic_add(&out(destination_idx), + t[q_point]); + else + out(destination_idx) = t[q_point]; + }); + } + + + + /** + * Helper function for values() and gradients() in 3D + */ + template + DEAL_II_HOST_DEVICE void + apply_3d(const Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space>::member_type + &team_member, + const Kokkos::View + shape_data, + const ViewTypeIn in, + ViewTypeOut out) + { + using TeamType = Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space>::member_type; + constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, 3); + + Number t[n_q_points]; + auto thread_policy = Kokkos::TeamThreadMDRange, TeamType>( + team_member, n_q_points_1d, n_q_points_1d, n_q_points_1d); + Kokkos::parallel_for( + thread_policy, [&](const int i, const int j, const int q) { + const int q_point = + i + j * n_q_points_1d + q * n_q_points_1d * n_q_points_1d; + t[q_point] = 0; + + // This loop simply multiplies the shape function at the quadrature + // point by the value finite element coefficient. + // FIXME check why using parallel_reduce ThreadVector is slower + for (int k = 0; k < n_q_points_1d; ++k) + { + const unsigned int shape_idx = + dof_to_quad ? (q + k * n_q_points_1d) : (k + q * n_q_points_1d); + const unsigned int source_idx = + (direction == 0) ? + (k + n_q_points_1d * (i + n_q_points_1d * j)) : + (direction == 1) ? + (i + n_q_points_1d * (k + n_q_points_1d * j)) : + (i + n_q_points_1d * (j + n_q_points_1d * k)); + t[q_point] += shape_data[shape_idx] * in(source_idx); + } + }); + + if (in_place) + team_member.team_barrier(); + + Kokkos::parallel_for( + thread_policy, [&](const int i, const int j, const int q) { + const int q_point = + i + j * n_q_points_1d + q * n_q_points_1d * n_q_points_1d; + const unsigned int destination_idx = + (direction == 0) ? (q + n_q_points_1d * (i + n_q_points_1d * j)) : + (direction == 1) ? (i + n_q_points_1d * (q + n_q_points_1d * j)) : + (i + n_q_points_1d * (j + n_q_points_1d * q)); + + if (add) + Kokkos::atomic_add(&out(destination_idx), t[q_point]); + else + out(destination_idx) = t[q_point]; + }); + } +#endif + + + /** * Helper function for values() and gradients(). */ @@ -65,6 +261,17 @@ namespace CUDAWrappers const ViewTypeIn in, ViewTypeOut out) { +#if KOKKOS_VERSION >= 40000 + if constexpr (dim == 1) + apply_1d( + team_member, shape_data, in, out); + if constexpr (dim == 2) + apply_2d( + team_member, shape_data, in, out); + if constexpr (dim == 3) + apply_3d( + team_member, shape_data, in, out); +#else constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, dim); Number t[n_q_points]; @@ -121,6 +328,7 @@ namespace CUDAWrappers else out(destination_idx) = t[q_point]; }); +#endif }