From: Bruno Turcksin Date: Thu, 11 May 2023 13:47:06 +0000 (+0000) Subject: Use Kokkos to launch kernels in the CUDAWrappers::MatrixFree framework X-Git-Tag: v9.5.0-rc1~209^2~2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=efe4cd6d03f0cc83613e2c06cf3fe9f222cfc0b3;p=dealii.git Use Kokkos to launch kernels in the CUDAWrappers::MatrixFree framework --- diff --git a/examples/step-64/step-64.cc b/examples/step-64/step-64.cc index d59e3feebc..0c99de0a16 100644 --- a/examples/step-64/step-64.cc +++ b/examples/step-64/step-64.cc @@ -61,8 +61,8 @@ namespace Step64 // an object of this type to a CUDAWrappers::MatrixFree // object that expects the class to have an `operator()` that fills the // values provided in the constructor for a given cell. This operator - // needs to run on the device, so it needs to be marked as `__device__` - // for the compiler. + // needs to run on the device, so it needs to be marked as + // `DEAL_II_HOST_DEVICE` for the compiler. template class VaryingCoefficientFunctor { @@ -71,13 +71,15 @@ namespace Step64 : coef(coefficient) {} - __device__ void operator()( + DEAL_II_HOST_DEVICE void operator()( + const typename CUDAWrappers::MatrixFree::Data *gpu_data, const unsigned int cell, - const typename CUDAWrappers::MatrixFree::Data *gpu_data); + const unsigned int q) const; // Since CUDAWrappers::MatrixFree::Data doesn't know about the size of its - // arrays, we need to store the number of quadrature points and the numbers - // of degrees of freedom in this class to do necessary index conversions. + // arrays, we need to store the number of quadrature points and the + // numbers of degrees of freedom in this class to do necessary index + // conversions. static const unsigned int n_dofs_1d = fe_degree + 1; static const unsigned int n_local_dofs = Utilities::pow(n_dofs_1d, dim); static const unsigned int n_q_points = Utilities::pow(n_dofs_1d, dim); @@ -92,16 +94,14 @@ namespace Step64 // the introduction that we have defined it as $a(\mathbf // x)=\frac{10}{0.05 + 2\|\mathbf x\|^2}$ template - __device__ void VaryingCoefficientFunctor::operator()( + DEAL_II_HOST_DEVICE void + VaryingCoefficientFunctor::operator()( + const typename CUDAWrappers::MatrixFree::Data *gpu_data, const unsigned int cell, - const typename CUDAWrappers::MatrixFree::Data *gpu_data) + const unsigned int q) const { - const unsigned int pos = CUDAWrappers::local_q_point_id( - cell, gpu_data, n_dofs_1d, n_q_points); - const Point q_point = - CUDAWrappers::get_quadrature_point(cell, - gpu_data, - n_dofs_1d); + const unsigned int pos = gpu_data->local_q_point_id(cell, n_q_points, q); + const Point q_point = gpu_data->get_quadrature_point(cell, q); double p_square = 0.; for (unsigned int i = 0; i < dim; ++i) @@ -121,22 +121,31 @@ namespace Step64 // step-37. In contrast to there, the actual quadrature point // index is treated implicitly by converting the current thread // index. As before, the functions of this class need to run on - // the device, so need to be marked as `__device__` for the + // the device, so need to be marked as `DEAL_II_HOST_DEVICE` for the // compiler. template class HelmholtzOperatorQuad { public: - __device__ HelmholtzOperatorQuad(double coef) + DEAL_II_HOST_DEVICE HelmholtzOperatorQuad( + const typename CUDAWrappers::MatrixFree::Data *gpu_data, + double * coef, + int cell) : coef(coef) {} - __device__ void operator()( + DEAL_II_HOST_DEVICE void operator()( CUDAWrappers::FEEvaluation - *fe_eval) const; + * fe_eval, + const int q_point) const; + + static const unsigned int n_q_points = + dealii::Utilities::pow(fe_degree + 1, dim); private: - double coef; + const typename CUDAWrappers::MatrixFree::Data *gpu_data; + double * coef; + int cell; }; @@ -148,12 +157,16 @@ namespace Step64 // the two terms on the left-hand side correspond to the two function calls // here: template - __device__ void HelmholtzOperatorQuad::operator()( + DEAL_II_HOST_DEVICE void HelmholtzOperatorQuad::operator()( CUDAWrappers::FEEvaluation - *fe_eval) const + * fe_eval, + const int q_point) const { - fe_eval->submit_value(coef * fe_eval->get_value()); - fe_eval->submit_gradient(fe_eval->get_gradient()); + const unsigned int pos = + gpu_data->local_q_point_id(cell, n_q_points, q_point); + + fe_eval->submit_value(coef[pos] * fe_eval->get_value(q_point), q_point); + fe_eval->submit_gradient(fe_eval->get_gradient(q_point), q_point); } @@ -166,24 +179,26 @@ namespace Step64 class LocalHelmholtzOperator { public: + // Again, the CUDAWrappers::MatrixFree object doesn't know about the number + // of degrees of freedom and the number of quadrature points so we need + // to store these for index calculations in the call operator. + static constexpr unsigned int n_dofs_1d = fe_degree + 1; + static constexpr unsigned int n_local_dofs = + Utilities::pow(fe_degree + 1, dim); + static constexpr unsigned int n_q_points = + Utilities::pow(fe_degree + 1, dim); + LocalHelmholtzOperator(double *coefficient) : coef(coefficient) {} - __device__ void operator()( + DEAL_II_HOST_DEVICE void operator()( const unsigned int cell, const typename CUDAWrappers::MatrixFree::Data *gpu_data, CUDAWrappers::SharedData * shared_data, const double * src, double * dst) const; - // Again, the CUDAWrappers::MatrixFree object doesn't know about the number - // of degrees of freedom and the number of quadrature points so we need - // to store these for index calculations in the call operator. - static const unsigned int n_dofs_1d = fe_degree + 1; - static const unsigned int n_local_dofs = Utilities::pow(fe_degree + 1, dim); - static const unsigned int n_q_points = Utilities::pow(fe_degree + 1, dim); - private: double *coef; }; @@ -195,22 +210,19 @@ namespace Step64 // vector and we write value and gradient information to the destination // vector. template - __device__ void LocalHelmholtzOperator::operator()( + DEAL_II_HOST_DEVICE void LocalHelmholtzOperator::operator()( const unsigned int cell, const typename CUDAWrappers::MatrixFree::Data *gpu_data, CUDAWrappers::SharedData * shared_data, const double * src, double * dst) const { - const unsigned int pos = CUDAWrappers::local_q_point_id( - cell, gpu_data, n_dofs_1d, n_q_points); - CUDAWrappers::FEEvaluation - fe_eval(cell, gpu_data, shared_data); + fe_eval(gpu_data, shared_data); fe_eval.read_dof_values(src); fe_eval.evaluate(true, true); fe_eval.apply_for_each_quad_point( - HelmholtzOperatorQuad(coef[pos])); + HelmholtzOperatorQuad(gpu_data, coef, cell)); fe_eval.integrate(true, true); fe_eval.distribute_local_to_global(dst); } diff --git a/include/deal.II/matrix_free/cuda_fe_evaluation.h b/include/deal.II/matrix_free/cuda_fe_evaluation.h index fe1a96f101..b106f850b4 100644 --- a/include/deal.II/matrix_free/cuda_fe_evaluation.h +++ b/include/deal.II/matrix_free/cuda_fe_evaluation.h @@ -41,27 +41,6 @@ DEAL_II_NAMESPACE_OPEN */ namespace CUDAWrappers { - namespace internal - { - /** - * Compute the dof/quad index for a given thread id, dimension, and - * number of points in each space dimensions. - */ - template - DEAL_II_HOST_DEVICE inline unsigned int - compute_index() - { - KOKKOS_IF_ON_DEVICE( - return (dim == 1 ? - threadIdx.x % n_points_1d : - dim == 2 ? - threadIdx.x % n_points_1d + n_points_1d * threadIdx.y : - threadIdx.x % n_points_1d + - n_points_1d * (threadIdx.y + n_points_1d * threadIdx.z));) - KOKKOS_IF_ON_HOST(return 0;) - } - } // namespace internal - /** * This class provides all the functions necessary to evaluate functions at * quadrature points and cell integrations. In functionality, this class is @@ -136,9 +115,7 @@ namespace CUDAWrappers * Constructor. */ DEAL_II_HOST_DEVICE - FEEvaluation(const unsigned int cell_id, - const data_type * data, - SharedData *shdata); + FEEvaluation(const data_type *data, SharedData *shdata); /** * For the vector @p src, read out the values on the degrees of freedom of @@ -185,42 +162,42 @@ namespace CUDAWrappers * id. */ DEAL_II_HOST_DEVICE value_type - get_value() const; + get_value(int q_point) const; /** * Same as above, except that the local dof index is computed from the * thread id. */ DEAL_II_HOST_DEVICE value_type - get_dof_value() const; + get_dof_value(int q_point) const; /** * Same as above, except that the quadrature point is computed from the * thread id. */ DEAL_II_HOST_DEVICE void - submit_value(const value_type &val_in); + submit_value(const value_type &val_in, int q_point); /** * Same as above, except that the local dof index is computed from the * thread id. */ DEAL_II_HOST_DEVICE void - submit_dof_value(const value_type &val_in); + submit_dof_value(const value_type &val_in, int q_point); /** * Same as above, except that the quadrature point is computed from the * thread id. */ DEAL_II_HOST_DEVICE gradient_type - get_gradient() const; + get_gradient(int q_point) const; /** * Same as above, except that the quadrature point is computed from the * thread id. */ DEAL_II_HOST_DEVICE void - submit_gradient(const gradient_type &grad_in); + submit_gradient(const gradient_type &grad_in, int q_point); // clang-format off /** @@ -239,64 +216,9 @@ namespace CUDAWrappers apply_for_each_quad_point(const Functor &func); private: - // FIXME We would like to use - // Kokkos::Subview, int, decltype(Kokkos::ALL)> - // but we get error: incomplete type is not allowed. I cannot reproduce - // outside of deal.II. Need to investigate more. - Kokkos::Subview, - int, - Kokkos::pair> - local_to_global; - unsigned int n_cells; - unsigned int padding_length; - - const dealii::internal::MatrixFreeFunctions::ConstraintKinds - constraint_mask; - - const bool use_coloring; - - // FIXME We would like to use - // Kokkos::Subview, int, decltype(Kokkos::ALL), - // decltype(Kokkos::ALL), decltype(Kokkos::ALL)> but we get error: - // incomplete type is not allowed. I cannot reproduce outside of deal.II. - // Need to investigate more. - Kokkos::Subview< - Kokkos::View, - int, - Kokkos::pair, - Kokkos::pair, - Kokkos::pair> - inv_jac; - // FIXME We would like to use - // Kokkos::Subview, int, decltype(Kokkos::ALL)> - // but we get error: incomplete type is not allowed. I cannot reproduce - // outside of deal.II. Need to investigate more. - Kokkos::Subview, - int, - Kokkos::pair> - JxW; - - // Data shared by multiple cells - Kokkos::View shape_values; - Kokkos::View shape_gradients; - Kokkos::View - co_shape_gradients; - Kokkos::View - constraint_weights; - - // Internal buffer - Kokkos::Subview, - Kokkos::pair> - values; - Kokkos::Subview< - Kokkos::View, - Kokkos::pair, - Kokkos::pair> - gradients; + const data_type * data; + SharedData *shared_data; + int cell_id; }; @@ -308,33 +230,10 @@ namespace CUDAWrappers typename Number> DEAL_II_HOST_DEVICE FEEvaluation:: - FEEvaluation(const unsigned int cell_id, - const data_type * data, - SharedData *shdata) - : local_to_global(Kokkos::subview( - data->local_to_global, - cell_id, - Kokkos::pair(0, Utilities::pow(n_q_points_1d, dim)))) - , n_cells(data->n_cells) - , padding_length(data->padding_length) - , constraint_mask(data->constraint_mask[cell_id]) - , use_coloring(data->use_coloring) - , inv_jac(Kokkos::subview( - data->inv_jacobian, - cell_id, - Kokkos::pair(0, Utilities::pow(n_q_points_1d, dim)), - Kokkos::pair(0, dim), - Kokkos::pair(0, dim))) - , JxW(Kokkos::subview( - data->JxW, - cell_id, - Kokkos::pair(0, Utilities::pow(n_q_points_1d, dim)))) - , shape_values(data->shape_values) - , shape_gradients(data->shape_gradients) - , co_shape_gradients(data->co_shape_gradients) - , constraint_weights(data->constraint_weights) - , values(shdata->values) - , gradients(shdata->gradients) + FEEvaluation(const data_type *data, SharedData *shdata) + : data(data) + , shared_data(shdata) + , cell_id(shared_data->team_member.league_rank()) {} @@ -350,15 +249,20 @@ namespace CUDAWrappers { static_assert(n_components_ == 1, "This function only supports FE with one \ components"); - const unsigned int idx = internal::compute_index(); - - const types::global_dof_index src_idx = local_to_global[idx]; - values[idx] = src[src_idx]; - KOKKOS_IF_ON_DEVICE(__syncthreads();) - - internal::resolve_hanging_nodes(constraint_weights, - constraint_mask, - values); + // Populate the scratch memory + Kokkos::parallel_for(Kokkos::TeamThreadRange(shared_data->team_member, + n_q_points), + [&](const int &i) { + shared_data->values(i) = + src[data->local_to_global(cell_id, i)]; + }); + shared_data->team_member.team_barrier(); + + internal::resolve_hanging_nodes( + shared_data->team_member, + data->constraint_weights, + data->constraint_mask(cell_id), + shared_data->values); } @@ -374,18 +278,31 @@ namespace CUDAWrappers { static_assert(n_components_ == 1, "This function only supports FE with one \ components"); - internal::resolve_hanging_nodes(constraint_weights, - constraint_mask, - values); - - const unsigned int idx = internal::compute_index(); - const types::global_dof_index destination_idx = local_to_global[idx]; + internal::resolve_hanging_nodes( + shared_data->team_member, + data->constraint_weights, + data->constraint_mask(cell_id), + shared_data->values); - if (use_coloring) - dst[destination_idx] += values[idx]; + if (data->use_coloring) + { + Kokkos::parallel_for(Kokkos::TeamThreadRange(shared_data->team_member, + n_q_points), + [&](const int &i) { + dst[data->local_to_global(cell_id, i)] += + shared_data->values(i); + }); + } else - atomicAdd(&dst[destination_idx], values[idx]); + { + Kokkos::parallel_for( + Kokkos::TeamThreadRange(shared_data->team_member, n_q_points), + [&](const int &i) { + Kokkos::atomic_add(&dst[data->local_to_global(cell_id, i)], + shared_data->values(i)); + }); + } } @@ -408,24 +325,26 @@ namespace CUDAWrappers fe_degree, n_q_points_1d, Number> - evaluator_tensor_product(shape_values, - shape_gradients, - co_shape_gradients); + evaluator_tensor_product(shared_data->team_member, + data->shape_values, + data->shape_gradients, + data->co_shape_gradients); if (evaluate_val == true && evaluate_grad == true) { - evaluator_tensor_product.value_and_gradient_at_quad_pts(values, - gradients); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + evaluator_tensor_product.value_and_gradient_at_quad_pts( + shared_data->values, shared_data->gradients); + shared_data->team_member.team_barrier(); } else if (evaluate_grad == true) { - evaluator_tensor_product.gradient_at_quad_pts(values, gradients); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + evaluator_tensor_product.gradient_at_quad_pts(shared_data->values, + shared_data->gradients); + shared_data->team_member.team_barrier(); } else if (evaluate_val == true) { - evaluator_tensor_product.value_at_quad_pts(values); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + evaluator_tensor_product.value_at_quad_pts(shared_data->values); + shared_data->team_member.team_barrier(); } } @@ -447,24 +366,25 @@ namespace CUDAWrappers fe_degree, n_q_points_1d, Number> - evaluator_tensor_product(shape_values, - shape_gradients, - co_shape_gradients); + evaluator_tensor_product(shared_data->team_member, + data->shape_values, + data->shape_gradients, + data->co_shape_gradients); if (integrate_val == true && integrate_grad == true) { - evaluator_tensor_product.integrate_value_and_gradient(values, - gradients); + evaluator_tensor_product.integrate_value_and_gradient( + shared_data->values, shared_data->gradients); } else if (integrate_val == true) { - evaluator_tensor_product.integrate_value(values); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + evaluator_tensor_product.integrate_value(shared_data->values); + shared_data->team_member.team_barrier(); } else if (integrate_grad == true) { - evaluator_tensor_product.template integrate_gradient(values, - gradients); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + evaluator_tensor_product.template integrate_gradient( + shared_data->values, shared_data->gradients); + shared_data->team_member.team_barrier(); } } @@ -480,11 +400,10 @@ namespace CUDAWrappers n_q_points_1d, n_components_, Number>::value_type - FEEvaluation:: - get_value() const + FEEvaluation::get_value( + int q_point) const { - const unsigned int q_point = internal::compute_index(); - return values[q_point]; + return shared_data->values(q_point); } @@ -500,10 +419,9 @@ namespace CUDAWrappers n_components_, Number>::value_type FEEvaluation:: - get_dof_value() const + get_dof_value(int q_point) const { - const unsigned int dof = internal::compute_index(); - return values[dof]; + return shared_data->values(q_point); } @@ -515,10 +433,9 @@ namespace CUDAWrappers typename Number> DEAL_II_HOST_DEVICE void FEEvaluation:: - submit_value(const value_type &val_in) + submit_value(const value_type &val_in, int q_point) { - const unsigned int q_point = internal::compute_index(); - values[q_point] = val_in * JxW[q_point]; + shared_data->values(q_point) = val_in * data->JxW(cell_id, q_point); } @@ -530,10 +447,9 @@ namespace CUDAWrappers typename Number> DEAL_II_HOST_DEVICE void FEEvaluation:: - submit_dof_value(const value_type &val_in) + submit_dof_value(const value_type &val_in, int q_point) { - const unsigned int dof = internal::compute_index(); - values[dof] = val_in; + shared_data->values(q_point) = val_in; } @@ -549,19 +465,18 @@ namespace CUDAWrappers n_components_, Number>::gradient_type FEEvaluation:: - get_gradient() const + get_gradient(int q_point) const { static_assert(n_components_ == 1, "This function only supports FE with one \ components"); - // TODO optimize if the mesh is uniform - const unsigned int q_point = internal::compute_index(); - gradient_type grad; + gradient_type grad; for (unsigned int d_1 = 0; d_1 < dim; ++d_1) { Number tmp = 0.; for (unsigned int d_2 = 0; d_2 < dim; ++d_2) - tmp += inv_jac(q_point, d_2, d_1) * gradients(q_point, d_2); + tmp += data->inv_jacobian(cell_id, q_point, d_2, d_1) * + shared_data->gradients(q_point, d_2); grad[d_1] = tmp; } @@ -577,16 +492,15 @@ namespace CUDAWrappers typename Number> DEAL_II_HOST_DEVICE void FEEvaluation:: - submit_gradient(const gradient_type &grad_in) + submit_gradient(const gradient_type &grad_in, int q_point) { - // TODO optimize if the mesh is uniform - const unsigned int q_point = internal::compute_index(); for (unsigned int d_1 = 0; d_1 < dim; ++d_1) { Number tmp = 0.; for (unsigned int d_2 = 0; d_2 < dim; ++d_2) - tmp += inv_jac(q_point, d_1, d_2) * grad_in[d_2]; - gradients(q_point, d_1) = tmp * JxW[q_point]; + tmp += data->inv_jacobian(cell_id, q_point, d_1, d_2) * grad_in[d_2]; + shared_data->gradients(q_point, d_1) = + tmp * data->JxW(cell_id, q_point); } } @@ -602,9 +516,10 @@ namespace CUDAWrappers FEEvaluation:: apply_for_each_quad_point(const Functor &func) { - func(this); - - KOKKOS_IF_ON_DEVICE(__syncthreads();) + Kokkos::parallel_for(Kokkos::TeamThreadRange(shared_data->team_member, + n_q_points), + [&](const int &i) { func(this, i); }); + shared_data->team_member.team_barrier(); } } // namespace CUDAWrappers diff --git a/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h b/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h index 8b040a1970..8063eee3bb 100644 --- a/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h +++ b/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h @@ -52,31 +52,96 @@ namespace CUDAWrappers + template + DEAL_II_HOST_DEVICE inline bool + is_constrained_dof( + const dealii::internal::MatrixFreeFunctions::ConstraintKinds + & constraint_mask, + const unsigned int x_idx, + const unsigned int y_idx) + { + return ((direction == 0) && + (((constraint_mask & dealii::internal::MatrixFreeFunctions:: + ConstraintKinds::subcell_y) != + dealii::internal::MatrixFreeFunctions::ConstraintKinds:: + unconstrained) ? + (y_idx == 0) : + (y_idx == fe_degree))) || + ((direction == 1) && + (((constraint_mask & dealii::internal::MatrixFreeFunctions:: + ConstraintKinds::subcell_x) != + dealii::internal::MatrixFreeFunctions::ConstraintKinds:: + unconstrained) ? + (x_idx == 0) : + (x_idx == fe_degree))); + } + + template + DEAL_II_HOST_DEVICE inline bool + is_constrained_dof( + const dealii::internal::MatrixFreeFunctions::ConstraintKinds + & constraint_mask, + const unsigned int x_idx, + const unsigned int y_idx, + const unsigned int z_idx, + const dealii::internal::MatrixFreeFunctions::ConstraintKinds face1_type, + const dealii::internal::MatrixFreeFunctions::ConstraintKinds face2_type, + const dealii::internal::MatrixFreeFunctions::ConstraintKinds face1, + const dealii::internal::MatrixFreeFunctions::ConstraintKinds face2, + const dealii::internal::MatrixFreeFunctions::ConstraintKinds edge) + { + const unsigned int face1_idx = (direction == 0) ? y_idx : + (direction == 1) ? z_idx : + x_idx; + const unsigned int face2_idx = (direction == 0) ? z_idx : + (direction == 1) ? x_idx : + y_idx; + + const bool on_face1 = ((constraint_mask & face1_type) != + dealii::internal::MatrixFreeFunctions:: + ConstraintKinds::unconstrained) ? + (face1_idx == 0) : + (face1_idx == fe_degree); + const bool on_face2 = ((constraint_mask & face2_type) != + dealii::internal::MatrixFreeFunctions:: + ConstraintKinds::unconstrained) ? + (face2_idx == 0) : + (face2_idx == fe_degree); + return ( + (((constraint_mask & face1) != dealii::internal::MatrixFreeFunctions:: + ConstraintKinds::unconstrained) && + on_face1) || + (((constraint_mask & face2) != dealii::internal::MatrixFreeFunctions:: + ConstraintKinds::unconstrained) && + on_face2) || + (((constraint_mask & edge) != dealii::internal::MatrixFreeFunctions:: + ConstraintKinds::unconstrained) && + on_face1 && on_face2)); + } + + + template DEAL_II_HOST_DEVICE inline void interpolate_boundary_2d( + const Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space>::member_type + &team_member, Kokkos::View constraint_weights, const dealii::internal::MatrixFreeFunctions::ConstraintKinds - constraint_mask, - Kokkos::Subview< - Kokkos::View, - Kokkos::pair> values) + & constraint_mask, + Kokkos::View> values) { - const unsigned int x_idx = threadIdx.x % (fe_degree + 1); - const unsigned int y_idx = threadIdx.y; - - const auto this_type = - (direction == 0) ? - dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_x : - dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_y; - - const unsigned int interp_idx = (direction == 0) ? x_idx : y_idx; + constexpr unsigned int n_q_points_1d = fe_degree + 1; + constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, 2); - Number t = 0; // Flag is true if dof is constrained for the given direction and the // given face. const bool constrained_face = @@ -91,73 +156,88 @@ namespace CUDAWrappers unconstrained))) != dealii::internal::MatrixFreeFunctions::ConstraintKinds::unconstrained; - // Flag is true if for the given direction, the dof is constrained with - // the right type and is on the correct side (left (= 0) or right (= - // fe_degree)) - const bool constrained_dof = - ((direction == 0) && - (((constraint_mask & dealii::internal::MatrixFreeFunctions:: - ConstraintKinds::subcell_y) != - dealii::internal::MatrixFreeFunctions::ConstraintKinds:: - unconstrained) ? - (y_idx == 0) : - (y_idx == fe_degree))) || - ((direction == 1) && - (((constraint_mask & dealii::internal::MatrixFreeFunctions:: - ConstraintKinds::subcell_x) != - dealii::internal::MatrixFreeFunctions::ConstraintKinds:: - unconstrained) ? - (x_idx == 0) : - (x_idx == fe_degree))); - - if (constrained_face && constrained_dof) - { - const bool type = (constraint_mask & this_type) != - dealii::internal::MatrixFreeFunctions:: - ConstraintKinds::unconstrained; - - if (type) + Number t[n_q_points]; + Kokkos::parallel_for( + Kokkos::TeamThreadRange(team_member, n_q_points), + [&](const int &q_point) { + const unsigned int x_idx = q_point % n_q_points_1d; + const unsigned int y_idx = q_point / n_q_points_1d; + + const auto this_type = + (direction == 0) ? + dealii::internal::MatrixFreeFunctions::ConstraintKinds:: + subcell_x : + dealii::internal::MatrixFreeFunctions::ConstraintKinds::subcell_y; + + const unsigned int interp_idx = (direction == 0) ? x_idx : y_idx; + t[q_point] = 0; + + // Flag is true if for the given direction, the dof is constrained + // with the right type and is on the correct side (left (= 0) or right + // (= fe_degree)) + const bool constrained_dof = + is_constrained_dof(constraint_mask, + x_idx, + y_idx); + + if (constrained_face && constrained_dof) { - for (unsigned int i = 0; i <= fe_degree; ++i) + const bool type = (constraint_mask & this_type) != + dealii::internal::MatrixFreeFunctions:: + ConstraintKinds::unconstrained; + + if (type) { - const unsigned int real_idx = - (direction == 0) ? index2(i, y_idx) : - index2(x_idx, i); - - const Number w = - transpose ? - constraint_weights[i * (fe_degree + 1) + interp_idx] : - constraint_weights[interp_idx * (fe_degree + 1) + i]; - t += w * values[real_idx]; + for (unsigned int i = 0; i <= fe_degree; ++i) + { + const unsigned int real_idx = + (direction == 0) ? index2(i, y_idx) : + index2(x_idx, i); + + const Number w = + transpose ? + constraint_weights[i * n_q_points_1d + interp_idx] : + constraint_weights[interp_idx * n_q_points_1d + i]; + t[q_point] += w * values[real_idx]; + } } - } - else - { - for (unsigned int i = 0; i <= fe_degree; ++i) + else { - const unsigned int real_idx = - (direction == 0) ? index2(i, y_idx) : - index2(x_idx, i); - - const Number w = - transpose ? - constraint_weights[(fe_degree - i) * (fe_degree + 1) + - fe_degree - interp_idx] : - constraint_weights[(fe_degree - interp_idx) * - (fe_degree + 1) + - fe_degree - i]; - t += w * values[real_idx]; + for (unsigned int i = 0; i <= fe_degree; ++i) + { + const unsigned int real_idx = + (direction == 0) ? index2(i, y_idx) : + index2(x_idx, i); + + const Number w = + transpose ? + constraint_weights[(fe_degree - i) * n_q_points_1d + + fe_degree - interp_idx] : + constraint_weights[(fe_degree - interp_idx) * + n_q_points_1d + + fe_degree - i]; + t[q_point] += w * values[real_idx]; + } } } - } + }); // The synchronization is done for all the threads in one block with // each block being assigned to one element. - KOKKOS_IF_ON_DEVICE(__syncthreads();) - if (constrained_face && constrained_dof) - values[index2(x_idx, y_idx)] = t; - - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); + Kokkos::parallel_for(Kokkos::TeamThreadRange(team_member, n_q_points), + [&](const int &q_point) { + const unsigned int x_idx = q_point % n_q_points_1d; + const unsigned int y_idx = q_point / n_q_points_1d; + const bool constrained_dof = + is_constrained_dof( + constraint_mask, x_idx, y_idx); + if (constrained_face && constrained_dof) + values[index2(x_idx, y_idx)] = + t[q_point]; + }); + + team_member.team_barrier(); } @@ -168,17 +248,20 @@ namespace CUDAWrappers typename Number> DEAL_II_HOST_DEVICE inline void interpolate_boundary_3d( + const Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space>::member_type + &team_member, Kokkos::View constraint_weights, const dealii::internal::MatrixFreeFunctions::ConstraintKinds - constraint_mask, - Kokkos::Subview< - Kokkos::View, - Kokkos::pair> values) + constraint_mask, + Kokkos::View> values) { - const unsigned int x_idx = threadIdx.x % (fe_degree + 1); - const unsigned int y_idx = threadIdx.y; - const unsigned int z_idx = threadIdx.z; + constexpr unsigned int n_q_points_1d = fe_degree + 1; + constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, 3); const auto this_type = (direction == 0) ? @@ -221,92 +304,104 @@ namespace CUDAWrappers dealii::internal::MatrixFreeFunctions::ConstraintKinds::edge_z; const auto constrained_face = constraint_mask & (face1 | face2 | edge); - const unsigned int interp_idx = (direction == 0) ? x_idx : - (direction == 1) ? y_idx : - z_idx; - const unsigned int face1_idx = (direction == 0) ? y_idx : - (direction == 1) ? z_idx : - x_idx; - const unsigned int face2_idx = (direction == 0) ? z_idx : - (direction == 1) ? x_idx : - y_idx; - - Number t = 0; - const bool on_face1 = ((constraint_mask & face1_type) != - dealii::internal::MatrixFreeFunctions:: - ConstraintKinds::unconstrained) ? - (face1_idx == 0) : - (face1_idx == fe_degree); - const bool on_face2 = ((constraint_mask & face2_type) != - dealii::internal::MatrixFreeFunctions:: - ConstraintKinds::unconstrained) ? - (face2_idx == 0) : - (face2_idx == fe_degree); - const bool constrained_dof = - ((((constraint_mask & face1) != dealii::internal::MatrixFreeFunctions:: - ConstraintKinds::unconstrained) && - on_face1) || - (((constraint_mask & face2) != dealii::internal::MatrixFreeFunctions:: - ConstraintKinds::unconstrained) && - on_face2) || - (((constraint_mask & edge) != dealii::internal::MatrixFreeFunctions:: - ConstraintKinds::unconstrained) && - on_face1 && on_face2)); - - if ((constrained_face != dealii::internal::MatrixFreeFunctions:: - ConstraintKinds::unconstrained) && - constrained_dof) - { - const bool type = (constraint_mask & this_type) != - dealii::internal::MatrixFreeFunctions:: - ConstraintKinds::unconstrained; - if (type) + Number t[n_q_points]; + Kokkos::parallel_for( + Kokkos::TeamThreadRange(team_member, n_q_points), + [&](const int &q_point) { + const unsigned int x_idx = q_point % n_q_points_1d; + const unsigned int y_idx = (q_point / n_q_points_1d) % n_q_points_1d; + const unsigned int z_idx = q_point / (n_q_points_1d * n_q_points_1d); + + const unsigned int interp_idx = (direction == 0) ? x_idx : + (direction == 1) ? y_idx : + z_idx; + const bool constrained_dof = + is_constrained_dof(constraint_mask, + x_idx, + y_idx, + z_idx, + face1_type, + face2_type, + face1, + face2, + edge); + t[q_point] = 0; + if ((constrained_face != dealii::internal::MatrixFreeFunctions:: + ConstraintKinds::unconstrained) && + constrained_dof) { - for (unsigned int i = 0; i <= fe_degree; ++i) + const bool type = (constraint_mask & this_type) != + dealii::internal::MatrixFreeFunctions:: + ConstraintKinds::unconstrained; + if (type) { - const unsigned int real_idx = - (direction == 0) ? index3(i, y_idx, z_idx) : - (direction == 1) ? index3(x_idx, i, z_idx) : - index3(x_idx, y_idx, i); - - const Number w = - transpose ? - constraint_weights[i * (fe_degree + 1) + interp_idx] : - constraint_weights[interp_idx * (fe_degree + 1) + i]; - t += w * values[real_idx]; + for (unsigned int i = 0; i <= fe_degree; ++i) + { + const unsigned int real_idx = + (direction == 0) ? + index3(i, y_idx, z_idx) : + (direction == 1) ? + index3(x_idx, i, z_idx) : + index3(x_idx, y_idx, i); + + const Number w = + transpose ? + constraint_weights[i * n_q_points_1d + interp_idx] : + constraint_weights[interp_idx * n_q_points_1d + i]; + t[q_point] += w * values[real_idx]; + } } - } - else - { - for (unsigned int i = 0; i <= fe_degree; ++i) + else { - const unsigned int real_idx = - (direction == 0) ? index3(i, y_idx, z_idx) : - (direction == 1) ? index3(x_idx, i, z_idx) : - index3(x_idx, y_idx, i); - - const Number w = - transpose ? - constraint_weights[(fe_degree - i) * (fe_degree + 1) + - fe_degree - interp_idx] : - constraint_weights[(fe_degree - interp_idx) * - (fe_degree + 1) + - fe_degree - i]; - t += w * values[real_idx]; + for (unsigned int i = 0; i <= fe_degree; ++i) + { + const unsigned int real_idx = + (direction == 0) ? + index3(i, y_idx, z_idx) : + (direction == 1) ? + index3(x_idx, i, z_idx) : + index3(x_idx, y_idx, i); + + const Number w = + transpose ? + constraint_weights[(fe_degree - i) * n_q_points_1d + + fe_degree - interp_idx] : + constraint_weights[(fe_degree - interp_idx) * + n_q_points_1d + + fe_degree - i]; + t[q_point] += w * values[real_idx]; + } } } - } + }); // The synchronization is done for all the threads in one block with // each block being assigned to one element. - KOKKOS_IF_ON_DEVICE(__syncthreads();) - - if ((constrained_face != dealii::internal::MatrixFreeFunctions:: - ConstraintKinds::unconstrained) && - constrained_dof) - values[index3(x_idx, y_idx, z_idx)] = t; - - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); + + Kokkos::parallel_for( + Kokkos::TeamThreadRange(team_member, n_q_points), + [&](const int &q_point) { + const unsigned int x_idx = q_point % n_q_points_1d; + const unsigned int y_idx = (q_point / n_q_points_1d) % n_q_points_1d; + const unsigned int z_idx = q_point / (n_q_points_1d * n_q_points_1d); + const bool constrained_dof = + is_constrained_dof(constraint_mask, + x_idx, + y_idx, + z_idx, + face1_type, + face2_type, + face1, + face2, + edge); + if ((constrained_face != dealii::internal::MatrixFreeFunctions:: + ConstraintKinds::unconstrained) && + constrained_dof) + values[index3(x_idx, y_idx, z_idx)] = t[q_point]; + }); + + team_member.team_barrier(); } @@ -321,36 +416,45 @@ namespace CUDAWrappers template DEAL_II_HOST_DEVICE void resolve_hanging_nodes( + const Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space>::member_type + &team_member, Kokkos::View constraint_weights, const dealii::internal::MatrixFreeFunctions::ConstraintKinds - constraint_mask, - Kokkos::Subview< - Kokkos::View, - Kokkos::pair> values) + constraint_mask, + Kokkos::View> values) { if (dim == 2) { - interpolate_boundary_2d(constraint_weights, + interpolate_boundary_2d(team_member, + constraint_weights, constraint_mask, values); - interpolate_boundary_2d(constraint_weights, + interpolate_boundary_2d(team_member, + constraint_weights, constraint_mask, values); } else if (dim == 3) { // Interpolate y and z faces (x-direction) - interpolate_boundary_3d(constraint_weights, + interpolate_boundary_3d(team_member, + constraint_weights, constraint_mask, values); // Interpolate x and z faces (y-direction) - interpolate_boundary_3d(constraint_weights, + interpolate_boundary_3d(team_member, + constraint_weights, constraint_mask, values); // Interpolate x and y faces (z-direction) - interpolate_boundary_3d(constraint_weights, + interpolate_boundary_3d(team_member, + constraint_weights, constraint_mask, values); } diff --git a/include/deal.II/matrix_free/cuda_matrix_free.h b/include/deal.II/matrix_free/cuda_matrix_free.h index 1cef0d1580..0a4d68f764 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.h @@ -235,6 +235,30 @@ namespace CUDAWrappers * the destingation vector. Otherwise, use atomic operations. */ bool use_coloring; + + /** + * Return the quadrature point index local. The index is + * only unique for a given MPI process. + */ + DEAL_II_HOST_DEVICE unsigned int + local_q_point_id(const unsigned int cell, + const unsigned int n_q_points, + const unsigned int q_point) const + { + return (row_start / padding_length + cell) * n_q_points + q_point; + } + + + /** + * Return the quadrature point. + */ + DEAL_II_HOST_DEVICE + typename CUDAWrappers::MatrixFree::point_type & + get_quadrature_point(const unsigned int cell, + const unsigned int q_point) const + { + return q_points(cell, q_point); + } }; /** @@ -585,22 +609,6 @@ namespace CUDAWrappers */ std::shared_ptr partitioner; - /** - * Cells per block (determined by the function cells_per_block_shmem() ). - */ - unsigned int cells_per_block; - - /** - * Grid dimensions used to launch the CUDA kernels - * in *_constrained_values-operations. - */ - dim3 constraint_grid_dim; - - /** - * Block dimensions used to launch the CUDA kernels - * in *_constrained_values-operations. - */ - dim3 constraint_block_dim; /** * Length of the padding (closest power of two larger than or equal to @@ -628,114 +636,48 @@ namespace CUDAWrappers - // TODO We should rework this to use scratch memory - /** - * Structure to pass the shared memory into a general user function. - */ template struct SharedData { + using TeamHandle = Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space>::member_type; + + using SharedView1D = Kokkos::View< + Number *, + MemorySpace::Default::kokkos_space::execution_space::scratch_memory_space, + Kokkos::MemoryTraits>; + using SharedView2D = Kokkos::View< + Number *[dim], + MemorySpace::Default::kokkos_space::execution_space::scratch_memory_space, + Kokkos::MemoryTraits>; + + DEAL_II_HOST_DEVICE + SharedData(const TeamHandle &team_member, + SharedView1D values, + SharedView2D gradients) + : team_member(team_member) + , values(values) + , gradients(gradients) + {} + + /** + * TeamPolicy handle. + */ + TeamHandle team_member; + /** * Memory for dof and quad values. */ - Kokkos::Subview, - Kokkos::pair> - values; + SharedView1D values; /** * Memory for computed gradients in reference coordinate system. */ - Kokkos::Subview< - Kokkos::View, - Kokkos::pair, - Kokkos::pair> - gradients; + SharedView2D gradients; }; - // This function determines the number of cells per block, possibly at compile - // time (by virtue of being 'constexpr') - // TODO this function should be rewritten using meta-programming - DEAL_II_HOST_DEVICE constexpr unsigned int - cells_per_block_shmem(int dim, int fe_degree) - { - /* clang-format off */ - // We are limiting the number of threads according to the - // following formulas: - // - in 2d: `threads = cells * (k+1)^d <= 4*CUDAWrappers::warp_size` - // - in 3d: `threads = cells * (k+1)^d <= 2*CUDAWrappers::warp_size` - return dim==2 ? (fe_degree==1 ? CUDAWrappers::warp_size : // 128 - fe_degree==2 ? CUDAWrappers::warp_size/4 : // 72 - fe_degree==3 ? CUDAWrappers::warp_size/8 : // 64 - fe_degree==4 ? CUDAWrappers::warp_size/8 : // 100 - 1) : - dim==3 ? (fe_degree==1 ? CUDAWrappers::warp_size/4 : // 64 - fe_degree==2 ? CUDAWrappers::warp_size/16 : // 54 - 1) : 1; - /* clang-format on */ - } - - - /*----------------------- Helper functions ---------------------------------*/ - /** - * Compute the quadrature point index in the local cell of a given thread. - * - * @relates CUDAWrappers::MatrixFree - */ - template - DEAL_II_HOST_DEVICE inline unsigned int - q_point_id_in_cell(const unsigned int n_q_points_1d) - { - KOKKOS_IF_ON_DEVICE( - return (dim == 1 ? - threadIdx.x % n_q_points_1d : - dim == 2 ? - threadIdx.x % n_q_points_1d + n_q_points_1d * threadIdx.y : - threadIdx.x % n_q_points_1d + - n_q_points_1d * (threadIdx.y + n_q_points_1d * threadIdx.z));) - - KOKKOS_IF_ON_HOST(AssertThrow(false, ExcInternalError()); return 0;) - } - - - - /** - * Return the quadrature point index local of a given thread. The index is - * only unique for a given MPI process. - * - * @relates CUDAWrappers::MatrixFree - */ - template - DEAL_II_HOST_DEVICE inline unsigned int - local_q_point_id( - const unsigned int cell, - const typename CUDAWrappers::MatrixFree::Data *data, - const unsigned int n_q_points_1d, - const unsigned int n_q_points) - { - return (data->row_start / data->padding_length + cell) * n_q_points + - q_point_id_in_cell(n_q_points_1d); - } - - - - /** - * Return the quadrature point associated with a given thread. - * - * @relates CUDAWrappers::MatrixFree - */ - template - DEAL_II_HOST_DEVICE inline - typename CUDAWrappers::MatrixFree::point_type & - get_quadrature_point( - const unsigned int cell, - const typename CUDAWrappers::MatrixFree::Data *data, - const unsigned int n_q_points_1d) - { - return data->q_points(cell, q_point_id_in_cell(n_q_points_1d)); - } - /** * Structure which is passed to the kernel. It is used to pass all the * necessary information from the CPU to the GPU. @@ -798,6 +740,31 @@ namespace CUDAWrappers * the destingation vector. Otherwise, use atomic operations. */ bool use_coloring; + + + + /** + * This function is the host version of local_q_point_id(). + */ + unsigned int + local_q_point_id(const unsigned int cell, + const unsigned int n_q_points, + const unsigned int q_point) const + { + return (row_start / padding_length + cell) * n_q_points + q_point; + } + + + + /** + * This function is the host version of get_quadrature_point(). + */ + Point + get_quadrature_point(const unsigned int cell, + const unsigned int q_point) const + { + return q_points(cell, q_point); + } }; @@ -851,41 +818,6 @@ namespace CUDAWrappers } - - /** - * This function is the host version of local_q_point_id(). - * - * @relates CUDAWrappers::MatrixFree - */ - template - inline unsigned int - local_q_point_id_host(const unsigned int cell, - const DataHost &data, - const unsigned int n_q_points, - const unsigned int i) - { - return (data.row_start / data.padding_length + cell) * n_q_points + i; - } - - - - /** - * This function is the host version of get_quadrature_point(). It assumes - * that the data in MatrixFree::Data has been copied to the host - * using copy_mf_data_to_host(). - * - * @relates CUDAWrappers::MatrixFree - */ - template - inline Point - get_quadrature_point_host(const unsigned int cell, - const DataHost &data, - const unsigned int i) - { - return data.q_points(cell, i); - } - - /*----------------------- Inline functions ---------------------------------*/ # ifndef DOXYGEN diff --git a/include/deal.II/matrix_free/cuda_matrix_free.templates.h b/include/deal.II/matrix_free/cuda_matrix_free.templates.h index 656b73cecf..8a4d44309f 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.templates.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.templates.h @@ -172,30 +172,7 @@ namespace CUDAWrappers void ReinitHelper::setup_cell_arrays(const unsigned int color) { - const unsigned int n_cells = data->n_cells[color]; - const unsigned int cells_per_block = data->cells_per_block; - - // Setup kernel parameters - const double apply_n_blocks = std::ceil( - static_cast(n_cells) / static_cast(cells_per_block)); - const auto apply_x_n_blocks = - static_cast(std::round(std::sqrt(apply_n_blocks))); - const auto apply_y_n_blocks = static_cast( - std::ceil(apply_n_blocks / static_cast(apply_x_n_blocks))); - - data->grid_dim[color] = dim3(apply_x_n_blocks, apply_y_n_blocks); - - // TODO this should be a templated parameter. - const unsigned int n_dofs_1d = fe_degree + 1; - - if (dim == 1) - data->block_dim[color] = dim3(n_dofs_1d * cells_per_block); - else if (dim == 2) - data->block_dim[color] = dim3(n_dofs_1d * cells_per_block, n_dofs_1d); - else - data->block_dim[color] = - dim3(n_dofs_1d * cells_per_block, n_dofs_1d, n_dofs_1d); - + const unsigned int n_cells = data->n_cells[color]; local_to_global = Kokkos::View( @@ -369,60 +346,6 @@ namespace CUDAWrappers - template - __global__ void - apply_kernel_shmem( - Functor func, - const typename MatrixFree::Data gpu_data, - Kokkos::View values, - Kokkos::View gradients, - Number *const src, - Number * dst) - { - constexpr unsigned int cells_per_block = - cells_per_block_shmem(dim, Functor::n_dofs_1d - 1); - - const unsigned int local_cell = threadIdx.x / Functor::n_dofs_1d; - const unsigned int cell = - local_cell + cells_per_block * (blockIdx.x + gridDim.x * blockIdx.y); - - if (cell < gpu_data.n_cells) - { - SharedData shared_data( - {Kokkos::subview( - values, - Kokkos::pair(cell * Functor::n_local_dofs, - (cell + 1) * Functor::n_local_dofs)), - Kokkos::subview(gradients, - Kokkos::pair(cell * Functor::n_q_points, - (cell + 1) * - Functor::n_q_points), - Kokkos::pair(0, dim))}); - - func(cell, &gpu_data, &shared_data, src, dst); - } - } - - - - template - __global__ void - evaluate_coeff(Functor func, - const typename MatrixFree::Data gpu_data) - { - constexpr unsigned int cells_per_block = - cells_per_block_shmem(dim, Functor::n_dofs_1d - 1); - - const unsigned int local_cell = threadIdx.x / Functor::n_dofs_1d; - const unsigned int cell = - local_cell + cells_per_block * (blockIdx.x + gridDim.x * blockIdx.y); - - if (cell < gpu_data.n_cells) - func(cell, &gpu_data); - } - - - template struct VectorLocalSize { @@ -452,6 +375,63 @@ namespace CUDAWrappers return vec.size(); } }; + + + + template + struct ApplyKernel + { + using TeamHandle = Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space>::member_type; + using SharedView1D = + Kokkos::View>; + using SharedView2D = + Kokkos::View>; + + ApplyKernel(Functor func, + const typename MatrixFree::Data gpu_data, + Number *const src, + Number * dst) + : func(func) + , gpu_data(gpu_data) + , src(src) + , dst(dst) + {} + + Functor func; + const typename MatrixFree::Data gpu_data; + Number *const src; + Number * dst; + + + // Provide the shared memory capacity. This function takes the team_size + // as an argument, which allows team_size dependent allocations. + size_t + team_shmem_size(int /*team_size*/) const + { + return SharedView1D::shmem_size(Functor::n_local_dofs) + + SharedView2D::shmem_size(Functor::n_local_dofs); + } + + + DEAL_II_HOST_DEVICE + void + operator()(const TeamHandle &team_member) const + { + // Get the scratch memory + SharedView1D values(team_member.team_shmem(), Functor::n_local_dofs); + SharedView2D gradients(team_member.team_shmem(), Functor::n_local_dofs); + + SharedData shared_data(team_member, values, gradients); + func(team_member.league_rank(), &gpu_data, &shared_data, src, dst); + } + }; } // namespace internal @@ -680,9 +660,16 @@ namespace CUDAWrappers for (unsigned int i = 0; i < n_colors; ++i) if (n_cells[i] > 0) { - internal::evaluate_coeff - <<>>(func, get_data(i)); - AssertCudaKernel(); + MemorySpace::Default::kokkos_space::execution_space exec; + auto color_data = get_data(i); + Kokkos::parallel_for( + "dealii::MatrixFree::evaluate_coeff", + Kokkos::MDRangePolicy< + MemorySpace::Default::kokkos_space::execution_space, + Kokkos::Rank<2>>(exec, {0, 0}, {n_cells[i], Functor::n_q_points}), + KOKKOS_LAMBDA(const int cell, const int q) { + func(&color_data, cell, q); + }); } } @@ -808,9 +795,6 @@ namespace CUDAWrappers Kokkos::deep_copy(co_shape_gradients, co_shape_gradients_host); } - // Setup the number of cells per CUDA thread block - cells_per_block = cells_per_block_shmem(dim, fe_degree); - internal::ReinitHelper helper( this, mapping, fe, quad, shape_info, *dof_handler, update_flags); @@ -937,19 +921,6 @@ namespace CUDAWrappers if (n_constrained_dofs != 0) { - const auto constraint_n_blocks = static_cast( - std::ceil(static_cast(n_constrained_dofs) / - static_cast(block_size))); - const auto constraint_x_n_blocks = - static_cast(std::round(std::sqrt(constraint_n_blocks))); - const auto constraint_y_n_blocks = static_cast( - std::ceil(static_cast(constraint_n_blocks) / - static_cast(constraint_x_n_blocks))); - - constraint_grid_dim = - dim3(constraint_x_n_blocks, constraint_y_n_blocks); - constraint_block_dim = dim3(block_size); - std::vector constrained_dofs_host( n_constrained_dofs); @@ -1006,37 +977,23 @@ namespace CUDAWrappers const VectorType &src, VectorType & dst) const { - std::vector> - values_colors(n_colors); - std::vector> - gradients_colors(n_colors); // Execute the loop on the cells - for (unsigned int i = 0; i < n_colors; ++i) - if (n_cells[i] > 0) + for (unsigned int color = 0; color < n_colors; ++color) + if (n_cells[color] > 0) { - const unsigned int size = - (grid_dim[i].x * grid_dim[i].y * grid_dim[i].z) * cells_per_block * - Functor::n_local_dofs; - values_colors[i] = - Kokkos::View( - Kokkos::view_alloc("values_" + std::to_string(i), - Kokkos::WithoutInitializing), - size); - gradients_colors[i] = - Kokkos::View( - Kokkos::view_alloc("gradients_" + std::to_string(i), - Kokkos::WithoutInitializing), - size); - internal::apply_kernel_shmem - <<>>(func, - get_data(i), - values_colors[i], - gradients_colors[i], - src.get_values(), - dst.get_values()); - AssertCudaKernel(); + MemorySpace::Default::kokkos_space::execution_space exec; + Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space> + team_policy(exec, n_cells[color], Kokkos::AUTO); + + internal::ApplyKernel apply_kernel( + func, get_data(color), src.get_values(), dst.get_values()); + + Kokkos::parallel_for("dealii::MatrixFree::serial_cell_loop", + team_policy, + apply_kernel); } - cudaDeviceSynchronize(); + Kokkos::fence(); } @@ -1049,6 +1006,8 @@ namespace CUDAWrappers const LinearAlgebra::distributed::Vector &src, LinearAlgebra::distributed::Vector &dst) const { + MemorySpace::Default::kokkos_space::execution_space exec; + // in case we have compatible partitioners, we can simply use the provided // vectors if (src.get_partitioner().get() == partitioner.get() && @@ -1059,32 +1018,21 @@ namespace CUDAWrappers { src.update_ghost_values_start(0); - Kokkos::View values; - Kokkos::View - gradients; // In parallel, it's possible that some processors do not own any // cells. if (n_cells[0] > 0) { - const unsigned int size = - (grid_dim[0].x * grid_dim[0].y * grid_dim[0].z) * - cells_per_block * Functor::n_local_dofs; - values = - Kokkos::View( - Kokkos::view_alloc("values", Kokkos::WithoutInitializing), - size); - gradients = Kokkos::View( - Kokkos::view_alloc("gradients", Kokkos::WithoutInitializing), - size); - internal::apply_kernel_shmem - <<>>(func, - get_data(0), - values, - gradients, - src.get_values(), - dst.get_values()); - AssertCudaKernel(); + Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space> + team_policy(exec, n_cells[0], Kokkos::AUTO); + + internal::ApplyKernel apply_kernel( + func, get_data(0), src.get_values(), dst.get_values()); + + Kokkos::parallel_for( + "dealii::MatrixFree::distributed_cell_loop_0", + team_policy, + apply_kernel); } src.update_ghost_values_finish(); @@ -1092,29 +1040,22 @@ namespace CUDAWrappers // cells if (n_cells[1] > 0) { - const unsigned int size = - (grid_dim[1].x * grid_dim[1].y * grid_dim[1].z) * - cells_per_block * Functor::n_local_dofs; - values = - Kokkos::View( - Kokkos::view_alloc("values", Kokkos::WithoutInitializing), - size); - gradients = Kokkos::View( - Kokkos::view_alloc("gradients", Kokkos::WithoutInitializing), - size); - internal::apply_kernel_shmem - <<>>(func, - get_data(1), - values, - gradients, - src.get_values(), - dst.get_values()); - AssertCudaKernel(); + Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space> + team_policy(exec, n_cells[1], Kokkos::AUTO); + + internal::ApplyKernel apply_kernel( + func, get_data(1), src.get_values(), dst.get_values()); + + Kokkos::parallel_for( + "dealii::MatrixFree::distributed_cell_loop_1", + team_policy, + apply_kernel); + // We need a synchronization point because we don't want // CUDA-aware MPI to start the MPI communication until the // kernel is done. - cudaDeviceSynchronize(); + Kokkos::fence(); } dst.compress_start(0, VectorOperation::add); @@ -1122,25 +1063,17 @@ namespace CUDAWrappers // not own any cells if (n_cells[2] > 0) { - const unsigned int size = - (grid_dim[2].x * grid_dim[2].y * grid_dim[2].z) * - cells_per_block * Functor::n_local_dofs; - values = - Kokkos::View( - Kokkos::view_alloc("values", Kokkos::WithoutInitializing), - size); - gradients = Kokkos::View( - Kokkos::view_alloc("gradients", Kokkos::WithoutInitializing), - size); - internal::apply_kernel_shmem - <<>>(func, - get_data(2), - values, - gradients, - src.get_values(), - dst.get_values()); - AssertCudaKernel(); + Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space> + team_policy(exec, n_cells[2], Kokkos::AUTO); + + internal::ApplyKernel apply_kernel( + func, get_data(2), src.get_values(), dst.get_values()); + + Kokkos::parallel_for( + "dealii::MatrixFree::distributed_cell_loop_2", + team_policy, + apply_kernel); } dst.compress_finish(VectorOperation::add); } @@ -1158,27 +1091,18 @@ namespace CUDAWrappers for (unsigned int i = 0; i < n_colors; ++i) if (n_cells[i] > 0) { - const unsigned int size = - (grid_dim[i].x * grid_dim[i].y * grid_dim[i].z) * - cells_per_block * Functor::n_local_dofs; - values_colors[i] = - Kokkos::View( - Kokkos::view_alloc("values_" + std::to_string(i), - Kokkos::WithoutInitializing), - size); - gradients_colors[i] = - Kokkos::View( - Kokkos::view_alloc("gradients_" + std::to_string(i), - Kokkos::WithoutInitializing), - size); - internal::apply_kernel_shmem - <<>>(func, - get_data(i), - values_colors[i], - gradients_colors[i], - src.get_values(), - dst.get_values()); + Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space> + team_policy(exec, n_cells[i], Kokkos::AUTO); + + internal::ApplyKernel apply_kernel( + func, get_data(i), src.get_values(), dst.get_values()); + + Kokkos::parallel_for( + "dealii::MatrixFree::distributed_cell_loop_" + + std::to_string(i), + team_policy, + apply_kernel); } dst.compress(VectorOperation::add); } @@ -1194,36 +1118,22 @@ namespace CUDAWrappers ghosted_src = src; ghosted_dst = dst; - std::vector> - values_colors(n_colors); - std::vector< - Kokkos::View> - gradients_colors(n_colors); // Execute the loop on the cells for (unsigned int i = 0; i < n_colors; ++i) if (n_cells[i] > 0) { - const unsigned int size = - (grid_dim[i].x * grid_dim[i].y * grid_dim[i].z) * - cells_per_block * Functor::n_local_dofs; - values_colors[i] = - Kokkos::View( - Kokkos::view_alloc("values_" + std::to_string(i), - Kokkos::WithoutInitializing), - size); - gradients_colors[i] = - Kokkos::View( - Kokkos::view_alloc("gradients_" + std::to_string(i), - Kokkos::WithoutInitializing), - size); - internal::apply_kernel_shmem - <<>>(func, - get_data(i), - values_colors[i], - gradients_colors[i], - ghosted_src.get_values(), - ghosted_dst.get_values()); - AssertCudaKernel(); + Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space> + team_policy(exec, n_cells[i], Kokkos::AUTO); + + internal::ApplyKernel apply_kernel( + func, get_data(i), src.get_values(), dst.get_values()); + + Kokkos::parallel_for( + "dealii::MatrixFree::distributed_cell_loop_" + + std::to_string(i), + team_policy, + apply_kernel); } // Add the ghosted values diff --git a/include/deal.II/matrix_free/cuda_tensor_product_kernels.h b/include/deal.II/matrix_free/cuda_tensor_product_kernels.h index e72becc238..b191a0cf5b 100644 --- a/include/deal.II/matrix_free/cuda_tensor_product_kernels.h +++ b/include/deal.II/matrix_free/cuda_tensor_product_kernels.h @@ -59,41 +59,70 @@ namespace CUDAWrappers typename ViewTypeIn, typename ViewTypeOut> DEAL_II_HOST_DEVICE void - apply(const Kokkos::View + apply(const Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space>::member_type + &team_member, + const Kokkos::View shape_data, const ViewTypeIn in, ViewTypeOut out) { - KOKKOS_IF_ON_DEVICE( - const unsigned int i = (dim == 1) ? 0 : threadIdx.x % n_q_points_1d; - const unsigned int j = (dim == 3) ? threadIdx.y : 0; - const unsigned int q = (dim == 1) ? (threadIdx.x % n_q_points_1d) : - (dim == 2) ? threadIdx.y : - threadIdx.z; - - // This loop simply multiplies the shape function at the quadrature - // point by the value finite element coefficient. - Number t = 0; - for (int k = 0; k < n_q_points_1d; ++k) { - const unsigned int shape_idx = - dof_to_quad ? (q + k * n_q_points_1d) : (k + q * n_q_points_1d); - const unsigned int source_idx = - (direction == 0) ? (k + n_q_points_1d * (i + n_q_points_1d * j)) : - (direction == 1) ? (i + n_q_points_1d * (k + n_q_points_1d * j)) : - (i + n_q_points_1d * (j + n_q_points_1d * k)); - t += shape_data[shape_idx] * - (in_place ? out[source_idx] : in[source_idx]); - } - - if (in_place) __syncthreads(); - - const unsigned int destination_idx = - (direction == 0) ? (q + n_q_points_1d * (i + n_q_points_1d * j)) : - (direction == 1) ? (i + n_q_points_1d * (q + n_q_points_1d * j)) : - (i + n_q_points_1d * (j + n_q_points_1d * q)); - - if (add) Kokkos::atomic_add(&out[destination_idx], t); - else out[destination_idx] = t;) + constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, dim); + + Number t[n_q_points]; + Kokkos::parallel_for( + Kokkos::TeamThreadRange(team_member, n_q_points), + [&](const int &q_point) { + const unsigned int i = (dim == 1) ? 0 : q_point % n_q_points_1d; + const unsigned int j = + (dim == 3) ? (q_point / n_q_points_1d) % n_q_points_1d : 0; + const unsigned int q = + (dim == 1) ? q_point : + (dim == 2) ? (q_point / n_q_points_1d) % n_q_points_1d : + q_point / (n_q_points_1d * n_q_points_1d); + + // This loop simply multiplies the shape function at the quadrature + // point by the value finite element coefficient. + t[q_point] = 0; + for (int k = 0; k < n_q_points_1d; ++k) + { + const unsigned int shape_idx = + dof_to_quad ? (q + k * n_q_points_1d) : (k + q * n_q_points_1d); + const unsigned int source_idx = + (direction == 0) ? + (k + n_q_points_1d * (i + n_q_points_1d * j)) : + (direction == 1) ? + (i + n_q_points_1d * (k + n_q_points_1d * j)) : + (i + n_q_points_1d * (j + n_q_points_1d * k)); + t[q_point] += shape_data[shape_idx] * + (in_place ? out(source_idx) : in(source_idx)); + } + }); + + if (in_place) + team_member.team_barrier(); + + Kokkos::parallel_for( + Kokkos::TeamThreadRange(team_member, n_q_points), + [&](const int &q_point) { + const unsigned int i = (dim == 1) ? 0 : q_point % n_q_points_1d; + const unsigned int j = + (dim == 3) ? (q_point / n_q_points_1d) % n_q_points_1d : 0; + const unsigned int q = + (dim == 1) ? q_point : + (dim == 2) ? (q_point / n_q_points_1d) % n_q_points_1d : + q_point / (n_q_points_1d * n_q_points_1d); + + const unsigned int destination_idx = + (direction == 0) ? (q + n_q_points_1d * (i + n_q_points_1d * j)) : + (direction == 1) ? (i + n_q_points_1d * (q + n_q_points_1d * j)) : + (i + n_q_points_1d * (j + n_q_points_1d * q)); + + if (add) + Kokkos::atomic_add(&out(destination_idx), t[q_point]); + else + out(destination_idx) = t[q_point]; + }); } @@ -125,8 +154,12 @@ namespace CUDAWrappers n_q_points_1d, Number> { + using TeamHandle = Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space>::member_type; + DEAL_II_HOST_DEVICE EvaluatorTensorProduct( + const TeamHandle & team_member, Kokkos::View shape_values, Kokkos::View shape_gradients, @@ -218,6 +251,11 @@ namespace CUDAWrappers DEAL_II_HOST_DEVICE void integrate_value_and_gradient(ViewType1 u, ViewType2 grad_u); + /** + * TeamPolicy handle. + */ + const TeamHandle &team_member; + /** * Values of the shape functions. */ @@ -246,12 +284,14 @@ namespace CUDAWrappers n_q_points_1d, Number>:: EvaluatorTensorProduct( + const TeamHandle & team_member, Kokkos::View shape_values, Kokkos::View shape_gradients, Kokkos::View co_shape_gradients) - : shape_values(shape_values) + : team_member(team_member) + , shape_values(shape_values) , shape_gradients(shape_gradients) , co_shape_gradients(co_shape_gradients) {} @@ -274,7 +314,7 @@ namespace CUDAWrappers ViewTypeOut out) const { apply( - shape_values, in, out); + team_member, shape_values, in, out); } @@ -295,7 +335,7 @@ namespace CUDAWrappers ViewTypeOut out) const { apply( - shape_gradients, in, out); + team_member, shape_gradients, in, out); } @@ -316,7 +356,7 @@ namespace CUDAWrappers ViewTypeOut out) const { apply( - co_shape_gradients, in, out); + team_member, co_shape_gradients, in, out); } @@ -341,7 +381,7 @@ namespace CUDAWrappers case 2: { values<0, true, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<1, true, false, true>(u, u); break; @@ -349,9 +389,9 @@ namespace CUDAWrappers case 3: { values<0, true, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<1, true, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<2, true, false, true>(u, u); break; @@ -386,7 +426,7 @@ namespace CUDAWrappers case 2: { values<0, false, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<1, false, false, true>(u, u); break; @@ -394,9 +434,9 @@ namespace CUDAWrappers case 3: { values<0, false, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<1, false, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<2, false, false, true>(u, u); break; @@ -437,7 +477,7 @@ namespace CUDAWrappers values<0, true, false, false>( u, Kokkos::subview(grad_u, Kokkos::ALL, 1)); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<1, true, false, true>( Kokkos::subview(grad_u, Kokkos::ALL, 0), @@ -457,7 +497,7 @@ namespace CUDAWrappers values<0, true, false, false>( u, Kokkos::subview(grad_u, Kokkos::ALL, 2)); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<1, true, false, true>( Kokkos::subview(grad_u, Kokkos::ALL, 0), @@ -469,7 +509,7 @@ namespace CUDAWrappers Kokkos::subview(grad_u, Kokkos::ALL, 2), Kokkos::subview(grad_u, Kokkos::ALL, 2)); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<2, true, false, true>( Kokkos::subview(grad_u, Kokkos::ALL, 0), @@ -509,7 +549,7 @@ namespace CUDAWrappers case 1: { values<0, true, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); co_gradients<0, true, false, false>( u, Kokkos::subview(grad_u, Kokkos::ALL, 0)); @@ -519,9 +559,9 @@ namespace CUDAWrappers case 2: { values<0, true, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<1, true, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); co_gradients<0, true, false, false>( u, Kokkos::subview(grad_u, Kokkos::ALL, 0)); @@ -533,11 +573,11 @@ namespace CUDAWrappers case 3: { values<0, true, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<1, true, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<2, true, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); co_gradients<0, true, false, false>( u, Kokkos::subview(grad_u, Kokkos::ALL, 0)); @@ -586,11 +626,11 @@ namespace CUDAWrappers Kokkos::subview(grad_u, Kokkos::ALL, 1), Kokkos::subview(grad_u, Kokkos::ALL, 1)); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<1, false, add, false>( Kokkos::subview(grad_u, Kokkos::ALL, 0), u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); gradients<1, false, true, false>( Kokkos::subview(grad_u, Kokkos::ALL, 1), u); @@ -608,7 +648,7 @@ namespace CUDAWrappers Kokkos::subview(grad_u, Kokkos::ALL, 2), Kokkos::subview(grad_u, Kokkos::ALL, 2)); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<1, false, false, true>( Kokkos::subview(grad_u, Kokkos::ALL, 0), @@ -620,14 +660,14 @@ namespace CUDAWrappers Kokkos::subview(grad_u, Kokkos::ALL, 2), Kokkos::subview(grad_u, Kokkos::ALL, 2)); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<2, false, add, false>( Kokkos::subview(grad_u, Kokkos::ALL, 0), u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<2, false, true, false>( Kokkos::subview(grad_u, Kokkos::ALL, 1), u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); gradients<2, false, true, false>( Kokkos::subview(grad_u, Kokkos::ALL, 2), u); @@ -660,7 +700,7 @@ namespace CUDAWrappers { co_gradients<0, false, true, false>( Kokkos::subview(grad_u, Kokkos::ALL, 0), u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<0, false, false, true>(u, u); @@ -670,15 +710,15 @@ namespace CUDAWrappers { co_gradients<1, false, true, false>( Kokkos::subview(grad_u, Kokkos::ALL, 1), u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); co_gradients<0, false, true, false>( Kokkos::subview(grad_u, Kokkos::ALL, 0), u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<1, false, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<0, false, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); break; } @@ -686,20 +726,20 @@ namespace CUDAWrappers { co_gradients<2, false, true, false>( Kokkos::subview(grad_u, Kokkos::ALL, 2), u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); co_gradients<1, false, true, false>( Kokkos::subview(grad_u, Kokkos::ALL, 1), u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); co_gradients<0, false, true, false>( Kokkos::subview(grad_u, Kokkos::ALL, 0), u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<2, false, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<1, false, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); values<0, false, false, true>(u, u); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + team_member.team_barrier(); break; } diff --git a/tests/cuda/coefficient_eval.cc b/tests/cuda/coefficient_eval.cc index 09529e932f..b2da859b17 100644 --- a/tests/cuda/coefficient_eval.cc +++ b/tests/cuda/coefficient_eval.cc @@ -59,16 +59,20 @@ DEAL_II_HOST_DEVICE void DummyOperator::operator()( const unsigned int cell, const typename CUDAWrappers::MatrixFree::Data *gpu_data, - CUDAWrappers::SharedData *, + CUDAWrappers::SharedData * shared_data, const double *, double *dst) const { - const unsigned int pos = CUDAWrappers::local_q_point_id( - cell, gpu_data, n_dofs_1d, n_q_points); - auto point = CUDAWrappers::get_quadrature_point(cell, - gpu_data, - fe_degree + 1); - dst[pos] = dim == 2 ? point(0) + point(1) : point(0) + point(1) + point(2); + Kokkos::parallel_for( + Kokkos::TeamThreadRange(shared_data->team_member, n_q_points), + [&](const int q_point) { + const unsigned int pos = + gpu_data->local_q_point_id(cell, n_q_points, q_point); + + auto point = gpu_data->get_quadrature_point(cell, q_point); + dst[pos] = + dim == 2 ? point(0) + point(1) : point(0) + point(1) + point(2); + }); } @@ -158,10 +162,8 @@ test() for (unsigned int i = 0; i < n_q_points_per_cell; ++i) { unsigned int const pos = - CUDAWrappers::local_q_point_id_host( - cell_id, gpu_data_host, n_q_points_per_cell, i); - auto p = CUDAWrappers::get_quadrature_point_host( - cell_id, gpu_data_host, i); + gpu_data_host.local_q_point_id(cell_id, n_q_points_per_cell, i); + auto p = gpu_data_host.get_quadrature_point(cell_id, i); const double p_val = dim == 2 ? p(0) + p(1) : p(0) + p(1) + p(2); AssertThrow(std::abs(coef[pos] - p_val) < 1e-12, ExcInternalError()); diff --git a/tests/cuda/cuda_evaluate_1d_shape.cc b/tests/cuda/cuda_evaluate_1d_shape.cc index 24409fe430..0da2826aa7 100644 --- a/tests/cuda/cuda_evaluate_1d_shape.cc +++ b/tests/cuda/cuda_evaluate_1d_shape.cc @@ -20,6 +20,8 @@ // much easier to check their correctness directly rather than from the results // in dependent functions +#include "deal.II/base/memory_space.h" + #include #include @@ -32,14 +34,18 @@ namespace CUDA = LinearAlgebra::CUDAWrappers; +using TeamHandle = Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space>::member_type; + template -__global__ void +DEAL_II_HOST_DEVICE void evaluate_tensor_product( + const TeamHandle & team_member, Kokkos::View shape_values, Kokkos::View shape_gradients, Kokkos::View co_shape_gradients, - double * dst, - double * src) + Kokkos::View dst, + Kokkos::View src) { CUDAWrappers::internal::EvaluatorTensorProduct< CUDAWrappers::internal::evaluate_general, @@ -47,7 +53,7 @@ evaluate_tensor_product( M - 1, N, double> - evaluator(shape_values, shape_gradients, co_shape_gradients); + evaluator(team_member, shape_values, shape_gradients, co_shape_gradients); if (type == 0) evaluator.template values<0, dof_to_quad, add, false>(src, dst); @@ -81,25 +87,29 @@ test() if (type == 1 && M % 2 == 1 && N % 2 == 1) shape_host[M / 2 * N + N / 2] = 0.; - LinearAlgebra::ReadWriteVector x_host(N), x_ref(N), y_host(M), - y_ref(M); + LinearAlgebra::ReadWriteVector x_ref(N), y_ref(M); + Kokkos::View x_dev( + Kokkos::view_alloc("x_dev", Kokkos::WithoutInitializing)); + Kokkos::View y_dev( + Kokkos::view_alloc("y_dev", Kokkos::WithoutInitializing)); + auto x_host = Kokkos::create_mirror_view(x_dev); + auto y_host = Kokkos::create_mirror_view(y_dev); + for (unsigned int i = 0; i < N; ++i) - x_host[i] = static_cast(Testing::rand()) / RAND_MAX; + x_host(i) = static_cast(Testing::rand()) / RAND_MAX; // Compute reference for (unsigned int i = 0; i < M; ++i) { - y_host[i] = 1.; - y_ref[i] = add ? y_host[i] : 0.; + y_host(i) = 1.; + y_ref[i] = add ? y_host(i) : 0.; for (unsigned int j = 0; j < N; ++j) - y_ref[i] += shape_host[i * N + j] * x_host[j]; + y_ref[i] += shape_host[i * N + j] * x_host(j); } // Copy data to the GPU. - CUDA::Vector x_dev(N), y_dev(M); - x_dev.import(x_host, VectorOperation::insert); - y_dev.import(y_host, VectorOperation::insert); - + Kokkos::deep_copy(x_dev, x_host); + Kokkos::deep_copy(y_dev, y_host); Kokkos::View shape_values( Kokkos::view_alloc("shape_values", Kokkos::WithoutInitializing), @@ -122,49 +132,60 @@ test() // Launch the kernel - evaluate_tensor_product<<<1, M>>>(shape_values, - shape_gradients, - co_shape_gradients, - y_dev.get_values(), - x_dev.get_values()); + MemorySpace::Default::kokkos_space::execution_space exec; + Kokkos::TeamPolicy + team_policy(exec, 1, Kokkos::AUTO); + Kokkos::parallel_for( + team_policy, KOKKOS_LAMBDA(const TeamHandle &team_member) { + evaluate_tensor_product(team_member, + shape_values, + shape_gradients, + co_shape_gradients, + y_dev, + x_dev); + }); // Check the results on the host - y_host.import(y_dev, VectorOperation::insert); + Kokkos::deep_copy(y_host, y_dev); deallog << "Errors no transpose: "; for (unsigned int i = 0; i < M; ++i) - deallog << y_host[i] - y_ref[i] << " "; + deallog << y_host(i) - y_ref[i] << " "; deallog << std::endl; for (unsigned int i = 0; i < M; ++i) - y_host[i] = static_cast(Testing::rand()) / RAND_MAX; + y_host(i) = static_cast(Testing::rand()) / RAND_MAX; // Copy y_host to the device - y_dev.import(y_host, VectorOperation::insert); + Kokkos::deep_copy(y_dev, y_host); // Compute reference for (unsigned int i = 0; i < N; ++i) { - x_host[i] = 2.; - x_ref[i] = add ? x_host[i] : 0.; + x_host(i) = 2.; + x_ref[i] = add ? x_host(i) : 0.; for (unsigned int j = 0; j < M; ++j) - x_ref[i] += shape_host[j * N + i] * y_host[j]; + x_ref[i] += shape_host[j * N + i] * y_host(j); } // Copy x_host to the device - x_dev.import(x_host, VectorOperation::insert); + Kokkos::deep_copy(x_dev, x_host); // Launch the kernel - evaluate_tensor_product<<<1, M>>>(shape_values, - shape_gradients, - co_shape_gradients, - x_dev.get_values(), - y_dev.get_values()); + Kokkos::parallel_for( + team_policy, KOKKOS_LAMBDA(const TeamHandle &team_member) { + evaluate_tensor_product(team_member, + shape_values, + shape_gradients, + co_shape_gradients, + x_dev, + y_dev); + }); // Check the results on the host - x_host.import(x_dev, VectorOperation::insert); + Kokkos::deep_copy(x_host, x_dev); deallog << "Errors transpose: "; for (unsigned int i = 0; i < N; ++i) - deallog << x_host[i] - x_ref[i] << " "; + deallog << x_host(i) - x_ref[i] << " "; deallog << std::endl; } diff --git a/tests/cuda/cuda_evaluate_2d_shape.cc b/tests/cuda/cuda_evaluate_2d_shape.cc index caf15450d4..84a120f947 100644 --- a/tests/cuda/cuda_evaluate_2d_shape.cc +++ b/tests/cuda/cuda_evaluate_2d_shape.cc @@ -32,14 +32,18 @@ namespace CUDA = LinearAlgebra::CUDAWrappers; +using TeamHandle = Kokkos::TeamPolicy< + MemorySpace::Default::kokkos_space::execution_space>::member_type; + template -__global__ void +DEAL_II_HOST_DEVICE void evaluate_tensor_product( + const TeamHandle & team_member, Kokkos::View shape_values, Kokkos::View shape_gradients, Kokkos::View co_shape_gradients, - double * dst, - double * src) + Kokkos::View dst, + Kokkos::View src) { CUDAWrappers::internal::EvaluatorTensorProduct< CUDAWrappers::internal::evaluate_general, @@ -47,18 +51,18 @@ evaluate_tensor_product( M - 1, N, double> - evaluator(shape_values, shape_gradients, co_shape_gradients); + evaluator(team_member, shape_values, shape_gradients, co_shape_gradients); if (type == 0) { evaluator.template values<0, dof_to_quad, false, false>(src, src); - __syncthreads(); + team_member.team_barrier(); evaluator.template values<1, dof_to_quad, add, false>(src, dst); } if (type == 1) { evaluator.template gradients<0, dof_to_quad, false, false>(src, src); - __syncthreads(); + team_member.team_barrier(); evaluator.template gradients<1, dof_to_quad, add, false>(src, dst); } } @@ -91,10 +95,16 @@ test() constexpr int M_2d = M * M; constexpr int N_2d = N * N; - LinearAlgebra::ReadWriteVector x_host(N_2d), x_ref(N_2d), - y_host(M_2d), y_ref(M_2d); + LinearAlgebra::ReadWriteVector x_ref(N_2d), y_ref(M_2d); + Kokkos::View x_dev( + Kokkos::view_alloc("x_dev", Kokkos::WithoutInitializing)); + Kokkos::View y_dev( + Kokkos::view_alloc("y_dev", Kokkos::WithoutInitializing)); + auto x_host = Kokkos::create_mirror_view(x_dev); + auto y_host = Kokkos::create_mirror_view(y_dev); + for (unsigned int i = 0; i < N_2d; ++i) - x_host[i] = static_cast(Testing::rand()) / RAND_MAX; + x_host(i) = static_cast(Testing::rand()) / RAND_MAX; FullMatrix shape_2d(M_2d, N_2d); for (unsigned int i = 0; i < M; ++i) @@ -112,16 +122,15 @@ test() // Compute reference for (unsigned int i = 0; i < M_2d; ++i) { - y_host[i] = 1.; - y_ref[i] = add ? y_host[i] : 0.; + y_host(i) = 1.; + y_ref[i] = add ? y_host(i) : 0.; for (unsigned int j = 0; j < N_2d; ++j) - y_ref[i] += shape_2d(i, j) * x_host[j]; + y_ref[i] += shape_2d(i, j) * x_host(j); } // Copy data to the GPU. - CUDA::Vector x_dev(N_2d), y_dev(M_2d); - x_dev.import(x_host, VectorOperation::insert); - y_dev.import(y_host, VectorOperation::insert); + Kokkos::deep_copy(x_dev, x_host); + Kokkos::deep_copy(y_dev, y_host); Kokkos::View shape_values( @@ -144,53 +153,61 @@ test() Kokkos::deep_copy(co_shape_gradients, shape_host_view); // Launch the kernel - dim3 block_dim(M, N); - evaluate_tensor_product - <<<1, block_dim>>>(shape_values, - shape_gradients, - co_shape_gradients, - y_dev.get_values(), - x_dev.get_values()); + MemorySpace::Default::kokkos_space::execution_space exec; + Kokkos::TeamPolicy + team_policy(exec, 1, Kokkos::AUTO); + Kokkos::parallel_for( + team_policy, KOKKOS_LAMBDA(const TeamHandle &team_member) { + evaluate_tensor_product(team_member, + shape_values, + shape_gradients, + co_shape_gradients, + y_dev, + x_dev); + }); // Check the results on the host - y_host.import(y_dev, VectorOperation::insert); + Kokkos::deep_copy(y_host, y_dev); deallog << "Errors no transpose: "; for (unsigned int i = 0; i < M_2d; ++i) - deallog << y_host[i] - y_ref[i] << " "; + deallog << y_host(i) - y_ref[i] << " "; deallog << std::endl; for (unsigned int i = 0; i < M_2d; ++i) - y_host[i] = static_cast(Testing::rand()) / RAND_MAX; + y_host(i) = static_cast(Testing::rand()) / RAND_MAX; // Copy y_host to the device - y_dev.import(y_host, VectorOperation::insert); + Kokkos::deep_copy(y_dev, y_host); // Compute reference for (unsigned int i = 0; i < N_2d; ++i) { - x_host[i] = 2.; - x_ref[i] = add ? x_host[i] : 0.; + x_host(i) = 2.; + x_ref[i] = add ? x_host(i) : 0.; for (unsigned int j = 0; j < M_2d; ++j) - x_ref[i] += shape_2d(j, i) * y_host[j]; + x_ref[i] += shape_2d(j, i) * y_host(j); } // Copy x_host to the device - x_dev.import(x_host, VectorOperation::insert); + Kokkos::deep_copy(x_dev, x_host); // Launch the kernel - evaluate_tensor_product - <<<1, block_dim>>>(shape_values, - shape_gradients, - co_shape_gradients, - x_dev.get_values(), - y_dev.get_values()); + Kokkos::parallel_for( + team_policy, KOKKOS_LAMBDA(const TeamHandle &team_member) { + evaluate_tensor_product(team_member, + shape_values, + shape_gradients, + co_shape_gradients, + x_dev, + y_dev); + }); // Check the results on the host - x_host.import(x_dev, VectorOperation::insert); + Kokkos::deep_copy(x_host, x_dev); deallog << "Errors transpose: "; for (unsigned int i = 0; i < N_2d; ++i) - deallog << x_host[i] - x_ref[i] << " "; + deallog << x_host(i) - x_ref[i] << " "; deallog << std::endl; } diff --git a/tests/cuda/matrix_free_matrix_vector_03.cc b/tests/cuda/matrix_free_matrix_vector_03.cc index eba7ced789..cd581a99c5 100644 --- a/tests/cuda/matrix_free_matrix_vector_03.cc +++ b/tests/cuda/matrix_free_matrix_vector_03.cc @@ -41,7 +41,6 @@ test() tria.begin(tria.n_levels() - 1)->set_refine_flag(); tria.last()->set_refine_flag(); tria.execute_coarsening_and_refinement(); - cell = tria.begin_active(); for (unsigned int i = 0; i < 10 - 3 * dim; ++i) { cell = tria.begin_active(); diff --git a/tests/cuda/matrix_free_no_index_initialize.cc b/tests/cuda/matrix_free_no_index_initialize.cc index df3e7d700f..bda055ddc6 100644 --- a/tests/cuda/matrix_free_no_index_initialize.cc +++ b/tests/cuda/matrix_free_no_index_initialize.cc @@ -49,22 +49,32 @@ public: Number * dst) const { CUDAWrappers::FEEvaluation - fe_eval(cell, gpu_data, shared_data); + fe_eval(gpu_data, shared_data); // set to unit vector - fe_eval.submit_dof_value(1.); - KOKKOS_IF_ON_DEVICE(__syncthreads();) + auto fe_eval_ptr = &fe_eval; + Kokkos::parallel_for(Kokkos::TeamThreadRange(shared_data->team_member, + n_local_dofs), + [&](int i) { fe_eval_ptr->submit_dof_value(1., i); }); + shared_data->team_member.team_barrier(); fe_eval.evaluate(/*evaluate_values =*/true, /*evaluate_gradients=*/true); #ifndef __APPLE__ - // values should evaluate to one, derivatives to zero - assert(fe_eval.get_value() == 1.); - for (unsigned int e = 0; e < dim; ++e) - assert(fe_eval.get_gradient()[e] == 0.); + Kokkos::parallel_for(Kokkos::TeamThreadRange(shared_data->team_member, + n_local_dofs), + [&](int i) { + // values should evaluate to one, derivatives to zero + assert(fe_eval_ptr->get_value(i) == 1.); + for (unsigned int e = 0; e < dim; ++e) + assert(fe_eval_ptr->get_gradient(i)[e] == 0.); + }); fe_eval.integrate(/*integrate_values = */ true, /*integrate_gradients=*/true); - assert(fe_eval.get_dof_value() == 1.); + + Kokkos::parallel_for( + Kokkos::TeamThreadRange(shared_data->team_member, n_local_dofs), + KOKKOS_LAMBDA(int i) { assert(fe_eval_ptr->get_dof_value(i) == 1.); }); #endif } diff --git a/tests/cuda/matrix_vector_mf.h b/tests/cuda/matrix_vector_mf.h index 20af64558e..c7ec9de966 100644 --- a/tests/cuda/matrix_vector_mf.h +++ b/tests/cuda/matrix_vector_mf.h @@ -31,17 +31,28 @@ class HelmholtzOperatorQuad { public: DEAL_II_HOST_DEVICE - HelmholtzOperatorQuad(Number coef) - : coef(coef) + HelmholtzOperatorQuad( + const typename CUDAWrappers::MatrixFree::Data *gpu_data, + Number * coef, + int cell) + : gpu_data(gpu_data) + , coef(coef) + , cell(cell) {} DEAL_II_HOST_DEVICE void operator()( CUDAWrappers::FEEvaluation - *fe_eval) const; + * fe_eval, + int q_point) const; + + static const unsigned int n_q_points = + dealii::Utilities::pow(n_q_points_1d, dim); private: - Number coef; + const typename CUDAWrappers::MatrixFree::Data *gpu_data; + Number * coef; + int cell; }; @@ -49,11 +60,12 @@ private: template DEAL_II_HOST_DEVICE void HelmholtzOperatorQuad::operator()( - CUDAWrappers::FEEvaluation *fe_eval) - const + CUDAWrappers::FEEvaluation *fe_eval, + int q_point) const { - fe_eval->submit_value(coef * fe_eval->get_value()); - fe_eval->submit_gradient(fe_eval->get_gradient()); + unsigned int pos = gpu_data->local_q_point_id(cell, n_q_points, q_point); + fe_eval->submit_value(coef[pos] * fe_eval->get_value(q_point), q_point); + fe_eval->submit_gradient(fe_eval->get_gradient(q_point), q_point); } @@ -62,6 +74,12 @@ template class HelmholtzOperator { public: + static const unsigned int n_dofs_1d = fe_degree + 1; + static const unsigned int n_local_dofs = + dealii::Utilities::pow(fe_degree + 1, dim); + static const unsigned int n_q_points = + dealii::Utilities::pow(n_q_points_1d, dim); + HelmholtzOperator(Number *coefficient) : coef(coefficient) {} @@ -74,12 +92,6 @@ public: const Number * src, Number * dst) const; - static const unsigned int n_dofs_1d = fe_degree + 1; - static const unsigned int n_local_dofs = - dealii::Utilities::pow(fe_degree + 1, dim); - static const unsigned int n_q_points = - dealii::Utilities::pow(n_q_points_1d, dim); - Number *coef; }; @@ -94,15 +106,14 @@ HelmholtzOperator::operator()( const Number * src, Number * dst) const { - const unsigned int pos = CUDAWrappers::local_q_point_id( - cell, gpu_data, n_dofs_1d, n_q_points); - CUDAWrappers::FEEvaluation fe_eval( - cell, gpu_data, shared_data); + gpu_data, shared_data); fe_eval.read_dof_values(src); fe_eval.evaluate(true, true); fe_eval.apply_for_each_quad_point( - HelmholtzOperatorQuad(coef[pos])); + HelmholtzOperatorQuad(gpu_data, + coef, + cell)); fe_eval.integrate(true, true); fe_eval.distribute_local_to_global(dst); } @@ -119,8 +130,9 @@ public: DEAL_II_HOST_DEVICE void operator()( + const typename CUDAWrappers::MatrixFree::Data *gpu_data, const unsigned int cell, - const typename CUDAWrappers::MatrixFree::Data *gpu_data); + const unsigned int q) const; static const unsigned int n_dofs_1d = fe_degree + 1; static const unsigned int n_local_dofs = @@ -137,13 +149,14 @@ private: template DEAL_II_HOST_DEVICE void VaryingCoefficientFunctor::operator()( + const typename CUDAWrappers::MatrixFree::Data *gpu_data, const unsigned int cell, - const typename CUDAWrappers::MatrixFree::Data *gpu_data) + const unsigned int q) const { - const unsigned int pos = CUDAWrappers::local_q_point_id( - cell, gpu_data, n_dofs_1d, n_q_points); - const auto q_point = - CUDAWrappers::get_quadrature_point(cell, gpu_data, n_dofs_1d); + const unsigned int pos = gpu_data->local_q_point_id(cell, n_q_points, q); + const auto q_point = gpu_data->get_quadrature_point(cell, q); + + Number p_square = 0.; for (unsigned int i = 0; i < dim; ++i)