From a5303b108b8dd5e029a0f4c60e8133cb0ef03060 Mon Sep 17 00:00:00 2001 From: Bruno Turcksin Date: Thu, 23 Mar 2023 12:53:41 -0400 Subject: [PATCH] Move global __constant__ variables to Kokkos::View --- include/deal.II/base/cuda_size.h | 13 - .../deal.II/matrix_free/cuda_fe_evaluation.h | 34 +- .../matrix_free/cuda_hanging_nodes_internal.h | 25 +- .../deal.II/matrix_free/cuda_matrix_free.h | 75 ++-- .../matrix_free/cuda_matrix_free.templates.h | 216 +++------- .../matrix_free/cuda_tensor_product_kernels.h | 405 +++++++----------- source/matrix_free/cuda_matrix_free.cc | 5 - tests/cuda/cuda_evaluate_1d_shape.cc | 73 ++-- tests/cuda/cuda_evaluate_2d_shape.cc | 76 ++-- 9 files changed, 396 insertions(+), 526 deletions(-) diff --git a/include/deal.II/base/cuda_size.h b/include/deal.II/base/cuda_size.h index 3cb8094e2a..d20fcad930 100644 --- a/include/deal.II/base/cuda_size.h +++ b/include/deal.II/base/cuda_size.h @@ -38,19 +38,6 @@ namespace CUDAWrappers * Define the number of threads in a warp. */ constexpr int warp_size = 32; - - /** - * Define the largest finite element degree that can be solved using - * CUDAWrappers::MatrixFree. Changing this number will affect the amount of - * constant memory being used. - */ - constexpr unsigned int mf_max_elem_degree = 10; - - /** - * Define the maximum number of valid CUDAWrappers::MatrixFree object. - * Changing this number will affect the amount of constant memory being used. - */ - constexpr unsigned int mf_n_concurrent_objects = 5; } // namespace CUDAWrappers DEAL_II_NAMESPACE_CLOSE diff --git a/include/deal.II/matrix_free/cuda_fe_evaluation.h b/include/deal.II/matrix_free/cuda_fe_evaluation.h index aa1bd4e1fc..420db4e13d 100644 --- a/include/deal.II/matrix_free/cuda_fe_evaluation.h +++ b/include/deal.II/matrix_free/cuda_fe_evaluation.h @@ -249,10 +249,9 @@ namespace CUDAWrappers MemorySpace::Default::kokkos_space>, int, Kokkos::pair> - local_to_global; - unsigned int n_cells; - unsigned int padding_length; - const unsigned int mf_object_id; + local_to_global; + unsigned int n_cells; + unsigned int padding_length; const dealii::internal::MatrixFreeFunctions::ConstraintKinds constraint_mask; @@ -282,6 +281,14 @@ namespace CUDAWrappers Kokkos::pair> JxW; + // Data shared by multiple cells + Kokkos::View shape_values; + Kokkos::View shape_gradients; + Kokkos::View + co_shape_gradients; + Kokkos::View + constraint_weights; + // Internal buffer Number *values; Number *gradients[dim]; @@ -305,7 +312,6 @@ namespace CUDAWrappers Kokkos::pair(0, Utilities::pow(n_q_points_1d, dim)))) , n_cells(data->n_cells) , padding_length(data->padding_length) - , mf_object_id(data->id) , constraint_mask(data->constraint_mask[cell_id]) , use_coloring(data->use_coloring) , inv_jac(Kokkos::subview( @@ -318,6 +324,10 @@ namespace CUDAWrappers data->JxW, cell_id, Kokkos::pair(0, Utilities::pow(n_q_points_1d, dim)))) + , shape_values(data->shape_values) + , shape_gradients(data->shape_gradients) + , co_shape_gradients(data->co_shape_gradients) + , constraint_weights(data->constraint_weights) , values(shdata->values) { for (unsigned int i = 0; i < dim; ++i) @@ -344,7 +354,8 @@ namespace CUDAWrappers KOKKOS_IF_ON_DEVICE(values[idx] = __ldg(&src[src_idx]); __syncthreads();) KOKKOS_IF_ON_HOST(values[idx] = src[src_idx];) - internal::resolve_hanging_nodes(constraint_mask, + internal::resolve_hanging_nodes(constraint_weights, + constraint_mask, values); } @@ -361,7 +372,8 @@ namespace CUDAWrappers { static_assert(n_components_ == 1, "This function only supports FE with one \ components"); - internal::resolve_hanging_nodes(constraint_mask, + internal::resolve_hanging_nodes(constraint_weights, + constraint_mask, values); const unsigned int idx = internal::compute_index(); @@ -394,7 +406,9 @@ namespace CUDAWrappers fe_degree, n_q_points_1d, Number> - evaluator_tensor_product(mf_object_id); + evaluator_tensor_product(shape_values, + shape_gradients, + co_shape_gradients); if (evaluate_val == true && evaluate_grad == true) { evaluator_tensor_product.value_and_gradient_at_quad_pts(values, @@ -431,7 +445,9 @@ namespace CUDAWrappers fe_degree, n_q_points_1d, Number> - evaluator_tensor_product(mf_object_id); + evaluator_tensor_product(shape_values, + shape_gradients, + co_shape_gradients); if (integrate_val == true && integrate_grad == true) { evaluator_tensor_product.integrate_value_and_gradient(values, diff --git a/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h b/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h index c354e0b2b7..8ed76db905 100644 --- a/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h +++ b/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h @@ -31,10 +31,6 @@ namespace CUDAWrappers { namespace internal { - __constant__ double - constraint_weights[(CUDAWrappers::mf_max_elem_degree + 1) * - (CUDAWrappers::mf_max_elem_degree + 1)]; - //------------------------------------------------------------------------// // Functions for resolving the hanging node constraints on the GPU // //------------------------------------------------------------------------// @@ -62,6 +58,8 @@ namespace CUDAWrappers typename Number> DEAL_II_HOST_DEVICE inline void interpolate_boundary_2d( + Kokkos::View + constraint_weights, const dealii::internal::MatrixFreeFunctions::ConstraintKinds constraint_mask, Number *values) @@ -168,6 +166,8 @@ namespace CUDAWrappers typename Number> DEAL_II_HOST_DEVICE inline void interpolate_boundary_3d( + Kokkos::View + constraint_weights, const dealii::internal::MatrixFreeFunctions::ConstraintKinds constraint_mask, Number *values) @@ -317,28 +317,35 @@ namespace CUDAWrappers template DEAL_II_HOST_DEVICE void resolve_hanging_nodes( + Kokkos::View + constraint_weights, const dealii::internal::MatrixFreeFunctions::ConstraintKinds constraint_mask, Number *values) { if (dim == 2) { - interpolate_boundary_2d(constraint_mask, + interpolate_boundary_2d(constraint_weights, + constraint_mask, values); - interpolate_boundary_2d(constraint_mask, + interpolate_boundary_2d(constraint_weights, + constraint_mask, values); } else if (dim == 3) { // Interpolate y and z faces (x-direction) - interpolate_boundary_3d(constraint_mask, + interpolate_boundary_3d(constraint_weights, + constraint_mask, values); // Interpolate x and z faces (y-direction) - interpolate_boundary_3d(constraint_mask, + interpolate_boundary_3d(constraint_weights, + constraint_mask, values); // Interpolate x and y faces (z-direction) - interpolate_boundary_3d(constraint_mask, + interpolate_boundary_3d(constraint_weights, + constraint_mask, values); } } diff --git a/include/deal.II/matrix_free/cuda_matrix_free.h b/include/deal.II/matrix_free/cuda_matrix_free.h index ee3bf02941..e7be918ef4 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.h @@ -188,9 +188,34 @@ namespace CUDAWrappers Kokkos::View JxW; /** - * ID of the associated MatrixFree object. + * Mask deciding where constraints are set on a given cell. + */ + Kokkos::View + constraint_mask; + + /** + * Values of the shape functions. + */ + Kokkos::View shape_values; + + /** + * Gradients of the shape functions. */ - unsigned int id; + Kokkos::View + shape_gradients; + + /** + * Gradients of the shape functions for collocation methods. + */ + Kokkos::View + co_shape_gradients; + + /** + * Weights used when resolving hanginf nodes. + */ + Kokkos::View + constraint_weights; /** * Number of cells. @@ -207,13 +232,6 @@ namespace CUDAWrappers */ unsigned int row_start; - /** - * Mask deciding where constraints are set on a given cell. - */ - Kokkos::View - constraint_mask; - /** * If true, use graph coloring has been used and we can simply add into * the destingation vector. Otherwise, use atomic operations. @@ -226,11 +244,6 @@ namespace CUDAWrappers */ MatrixFree(); - /** - * Destructor. - */ - ~MatrixFree(); - /** * Return the length of the padding. */ @@ -378,12 +391,6 @@ namespace CUDAWrappers const std::shared_ptr & get_vector_partitioner() const; - /** - * Free all the memory allocated. - */ - void - free(); - /** * Return the DoFHandler. */ @@ -540,6 +547,28 @@ namespace CUDAWrappers MemorySpace::Default::kokkos_space>> constraint_mask; + /** + * Values of the shape functions. + */ + Kokkos::View shape_values; + + /** + * Gradients of the shape functions. + */ + Kokkos::View shape_gradients; + + /** + * Gradients of the shape functions for collocation methods. + */ + Kokkos::View + co_shape_gradients; + + /** + * Weights used when resolving hanginf nodes. + */ + Kokkos::View + constraint_weights; + /** * Grid dimensions associated to the different colors. The grid dimensions * are used to launch the CUDA kernels. @@ -752,11 +781,6 @@ namespace CUDAWrappers typename Kokkos::View::HostMirror JxW; - /** - * ID of the associated MatrixFree object. - */ - unsigned int id; - /** * Number of cells. */ @@ -802,7 +826,6 @@ namespace CUDAWrappers { DataHost data_host; - data_host.id = data.id; data_host.n_cells = data.n_cells; data_host.padding_length = data.padding_length; data_host.row_start = data.row_start; diff --git a/include/deal.II/matrix_free/cuda_matrix_free.templates.h b/include/deal.II/matrix_free/cuda_matrix_free.templates.h index b094559789..18f330ee54 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.templates.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.templates.h @@ -53,89 +53,6 @@ namespace CUDAWrappers { namespace internal { - constexpr unsigned int data_array_size = - (mf_max_elem_degree + 1) * (mf_max_elem_degree + 1); - - // Default initialized to false - extern std::array used_objects; - - template - using DataArray = NumberType[data_array_size]; - - // These variables are stored in the device constant memory. - // Shape values - __constant__ double global_shape_values_d[mf_n_concurrent_objects] - [data_array_size]; - __constant__ float global_shape_values_f[mf_n_concurrent_objects] - [data_array_size]; - // Shape gradients - __constant__ double global_shape_gradients_d[mf_n_concurrent_objects] - [data_array_size]; - __constant__ float global_shape_gradients_f[mf_n_concurrent_objects] - [data_array_size]; - // for collocation methods - __constant__ double global_co_shape_gradients_d[mf_n_concurrent_objects] - [data_array_size]; - __constant__ float global_co_shape_gradients_f[mf_n_concurrent_objects] - [data_array_size]; - - template - DEAL_II_HOST_DEVICE inline DataArray & - get_global_shape_values(unsigned int i); - - template <> - DEAL_II_HOST_DEVICE inline DataArray & - get_global_shape_values(unsigned int i) - { - return global_shape_values_d[i]; - } - - template <> - DEAL_II_HOST_DEVICE inline DataArray & - get_global_shape_values(unsigned int i) - { - return global_shape_values_f[i]; - } - - template - DEAL_II_HOST_DEVICE inline DataArray & - get_global_shape_gradients(unsigned int i); - - template <> - DEAL_II_HOST_DEVICE inline DataArray & - get_global_shape_gradients(unsigned int i) - { - return global_shape_gradients_d[i]; - } - - template <> - DEAL_II_HOST_DEVICE inline DataArray & - get_global_shape_gradients(unsigned int i) - { - return global_shape_gradients_f[i]; - } - - // for collocation methods - template - DEAL_II_HOST_DEVICE inline DataArray & - get_global_co_shape_gradients(unsigned int i); - - template <> - DEAL_II_HOST_DEVICE inline DataArray & - get_global_co_shape_gradients(unsigned int i) - { - return global_co_shape_gradients_d[i]; - } - - template <> - DEAL_II_HOST_DEVICE inline DataArray & - get_global_co_shape_gradients(unsigned int i) - { - return global_co_shape_gradients_f[i]; - } - - - /** * Helper class to (re)initialize MatrixFree object. */ @@ -222,13 +139,6 @@ namespace CUDAWrappers , padding_length(data->get_padding_length()) , hanging_nodes(dof_handler.get_triangulation()) { - cudaError_t error_code = cudaMemcpyToSymbol( - constraint_weights, - shape_info.data.front().subface_interpolation_matrices[0].data(), - sizeof(double) * - shape_info.data.front().subface_interpolation_matrices[0].size()); - AssertCuda(error_code); - local_dof_indices.resize(data->dofs_per_cell); lexicographic_dof_indices.resize(dofs_per_cell); } @@ -558,14 +468,6 @@ namespace CUDAWrappers - template - MatrixFree::~MatrixFree() - { - free(); - } - - - template template void @@ -646,29 +548,22 @@ namespace CUDAWrappers data_copy.inv_jacobian = inv_jacobian[color]; if (JxW.size() > 0) data_copy.JxW = JxW[color]; - data_copy.local_to_global = local_to_global[color]; - data_copy.id = my_id; - data_copy.n_cells = n_cells[color]; - data_copy.padding_length = padding_length; - data_copy.row_start = row_start[color]; - data_copy.use_coloring = use_coloring; - data_copy.constraint_mask = constraint_mask[color]; + data_copy.local_to_global = local_to_global[color]; + data_copy.constraint_mask = constraint_mask[color]; + data_copy.shape_values = shape_values; + data_copy.shape_gradients = shape_gradients; + data_copy.co_shape_gradients = co_shape_gradients; + data_copy.constraint_weights = constraint_weights; + data_copy.n_cells = n_cells[color]; + data_copy.padding_length = padding_length; + data_copy.row_start = row_start[color]; + data_copy.use_coloring = use_coloring; return data_copy; } - template - void - MatrixFree::free() - { - internal::used_objects[my_id].store(false); - my_id = -1; - } - - - template template void @@ -845,9 +740,6 @@ namespace CUDAWrappers this->overlap_communication_computation = additional_data.overlap_communication_computation; - // TODO: only free if we actually need arrays of different length - free(); - n_dofs = dof_handler->n_dofs(); const FiniteElement &fe = dof_handler->get_fe(); @@ -871,55 +763,50 @@ namespace CUDAWrappers const ::dealii::internal::MatrixFreeFunctions::ShapeInfo shape_info( quad, fe); - unsigned int size_shape_values = n_dofs_1d * n_q_points_1d * sizeof(Number); + unsigned int size_shape_values = n_dofs_1d * n_q_points_1d; FE_DGQArbitraryNodes<1> fe_quad_co(quad); const ::dealii::internal::MatrixFreeFunctions::ShapeInfo shape_info_co(quad, fe_quad_co); - unsigned int size_co_shape_values = - n_q_points_1d * n_q_points_1d * sizeof(Number); - - // Check if we already a part of the constant memory allocated to us. If - // not, we try to get a block of memory. - bool found_id = false; - while (!found_id) + shape_values = Kokkos::View( + Kokkos::view_alloc("shape_values", Kokkos::WithoutInitializing), + size_shape_values); + auto shape_values_host = Kokkos::create_mirror_view(shape_values); + for (unsigned int i = 0; i < size_shape_values; ++i) { - ++my_id; - Assert( - my_id < static_cast(mf_n_concurrent_objects), - ExcMessage( - "Maximum number of concurrent MatrixFree objects reached. Increase mf_n_concurrent_objects")); - bool f = false; - found_id = - internal::used_objects[my_id].compare_exchange_strong(f, true); + shape_values_host[i] = shape_info.data.front().shape_values[i]; } - - cudaError_t cuda_error = - cudaMemcpyToSymbol(internal::get_global_shape_values(0), - shape_info.data.front().shape_values.data(), - size_shape_values, - my_id * internal::data_array_size * sizeof(Number), - cudaMemcpyHostToDevice); - AssertCuda(cuda_error); + Kokkos::deep_copy(shape_values, shape_values_host); if (update_flags & update_gradients) { - cuda_error = - cudaMemcpyToSymbol(internal::get_global_shape_gradients(0), - shape_info.data.front().shape_gradients.data(), - size_shape_values, - my_id * internal::data_array_size * sizeof(Number), - cudaMemcpyHostToDevice); - AssertCuda(cuda_error); - - cuda_error = - cudaMemcpyToSymbol(internal::get_global_co_shape_gradients(0), - shape_info_co.data.front().shape_gradients.data(), - size_co_shape_values, - my_id * internal::data_array_size * sizeof(Number), - cudaMemcpyHostToDevice); - AssertCuda(cuda_error); + shape_gradients = + Kokkos::View( + Kokkos::view_alloc("shape_gradients", Kokkos::WithoutInitializing), + size_shape_values); + auto shape_gradients_host = Kokkos::create_mirror_view(shape_gradients); + for (unsigned int i = 0; i < size_shape_values; ++i) + { + shape_gradients_host[i] = + shape_info.data.front().shape_gradients[i]; + } + Kokkos::deep_copy(shape_gradients, shape_gradients_host); + + + co_shape_gradients = + Kokkos::View( + Kokkos::view_alloc("co_shape_gradients", + Kokkos::WithoutInitializing), + size_shape_values); + auto co_shape_gradients_host = + Kokkos::create_mirror_view(co_shape_gradients); + for (unsigned int i = 0; i < size_shape_values; ++i) + { + co_shape_gradients_host[i] = + shape_info_co.data.front().shape_gradients[i]; + } + Kokkos::deep_copy(co_shape_gradients, co_shape_gradients_host); } // Setup the number of cells per CUDA thread block @@ -928,6 +815,21 @@ namespace CUDAWrappers internal::ReinitHelper helper( this, mapping, fe, quad, shape_info, *dof_handler, update_flags); + const unsigned int constraint_weights_size = + shape_info.data.front().subface_interpolation_matrices[0].size(); + constraint_weights = + Kokkos::View( + Kokkos::view_alloc("constraint_weights", Kokkos::WithoutInitializing), + constraint_weights_size); + auto constraint_weights_host = + Kokkos::create_mirror_view(constraint_weights); + for (unsigned int i = 0; i < constraint_weights_size; ++i) + { + constraint_weights_host[i] = + shape_info.data.front().subface_interpolation_matrices[0][i]; + } + Kokkos::deep_copy(constraint_weights, constraint_weights_host); + // Create a graph coloring CellFilter begin(iterator_filter, dof_handler->begin_active()); CellFilter end(iterator_filter, dof_handler->end()); diff --git a/include/deal.II/matrix_free/cuda_tensor_product_kernels.h b/include/deal.II/matrix_free/cuda_tensor_product_kernels.h index 9a93c939d4..f2547950ea 100644 --- a/include/deal.II/matrix_free/cuda_tensor_product_kernels.h +++ b/include/deal.II/matrix_free/cuda_tensor_product_kernels.h @@ -46,6 +46,52 @@ namespace CUDAWrappers evaluate_evenodd }; + /** + * Helper function for values() and gradients(). + */ + template + DEAL_II_HOST_DEVICE void + apply(Kokkos::View shape_data, + const Number * in, + Number * out) + { + KOKKOS_IF_ON_DEVICE( + const unsigned int i = (dim == 1) ? 0 : threadIdx.x % n_q_points_1d; + const unsigned int j = (dim == 3) ? threadIdx.y : 0; + const unsigned int q = (dim == 1) ? (threadIdx.x % n_q_points_1d) : + (dim == 2) ? threadIdx.y : + threadIdx.z; + + // This loop simply multiply the shape function at the quadrature point + // by the value finite element coefficient. + Number t = 0; + for (int k = 0; k < n_q_points_1d; ++k) { + const unsigned int shape_idx = + dof_to_quad ? (q + k * n_q_points_1d) : (k + q * n_q_points_1d); + const unsigned int source_idx = + (direction == 0) ? (k + n_q_points_1d * (i + n_q_points_1d * j)) : + (direction == 1) ? (i + n_q_points_1d * (k + n_q_points_1d * j)) : + (i + n_q_points_1d * (j + n_q_points_1d * k)); + t += shape_data[shape_idx] * + (in_place ? out[source_idx] : in[source_idx]); + } + + if (in_place) __syncthreads(); + + const unsigned int destination_idx = + (direction == 0) ? (q + n_q_points_1d * (i + n_q_points_1d * j)) : + (direction == 1) ? (i + n_q_points_1d * (q + n_q_points_1d * j)) : + (i + n_q_points_1d * (j + n_q_points_1d * q)); + + if (add) out[destination_idx] += t; + else out[destination_idx] = t;) + } /** @@ -59,9 +105,7 @@ namespace CUDAWrappers int n_q_points_1d, typename Number> struct EvaluatorTensorProduct - { - const int mf_object_id; - }; + {}; @@ -78,13 +122,13 @@ namespace CUDAWrappers n_q_points_1d, Number> { - static constexpr unsigned int dofs_per_cell = - Utilities::pow(fe_degree + 1, dim); - static constexpr unsigned int n_q_points = - Utilities::pow(n_q_points_1d, dim); - DEAL_II_HOST_DEVICE - EvaluatorTensorProduct(int mf_object_id); + EvaluatorTensorProduct( + Kokkos::View shape_values, + Kokkos::View + shape_gradients, + Kokkos::View + co_shape_gradients); /** * Evaluate the values of a finite element function at the quadrature @@ -92,7 +136,7 @@ namespace CUDAWrappers */ template DEAL_II_HOST_DEVICE void - values(Number shape_values[], const Number *in, Number *out) const; + values(const Number *in, Number *out) const; /** * Evaluate the gradient of a finite element function at the quadrature @@ -100,14 +144,14 @@ namespace CUDAWrappers */ template DEAL_II_HOST_DEVICE void - gradients(Number shape_gradients[], const Number *in, Number *out) const; + gradients(const Number *in, Number *out) const; /** - * Helper function for values() and gradients(). + * TODO */ template DEAL_II_HOST_DEVICE void - apply(Number shape_data[], const Number *in, Number *out) const; + co_gradients(const Number *in, Number *out) const; /** * Evaluate the finite element function at the quadrature points. @@ -150,7 +194,16 @@ namespace CUDAWrappers DEAL_II_HOST_DEVICE void integrate_value_and_gradient(Number *u, Number *grad_u[dim]); - const int mf_object_id; + // TODO shape values + Kokkos::View shape_values; + + // TODO shape gradients + Kokkos::View + shape_gradients; + + // TODO shape gradients for collocation methods + Kokkos::View + co_shape_gradients; }; @@ -161,8 +214,16 @@ namespace CUDAWrappers dim, fe_degree, n_q_points_1d, - Number>::EvaluatorTensorProduct(int object_id) - : mf_object_id(object_id) + Number>:: + EvaluatorTensorProduct( + Kokkos::View shape_values, + Kokkos::View + shape_gradients, + Kokkos::View + co_shape_gradients) + : shape_values(shape_values) + , shape_gradients(shape_gradients) + , co_shape_gradients(co_shape_gradients) {} @@ -174,11 +235,10 @@ namespace CUDAWrappers dim, fe_degree, n_q_points_1d, - Number>::values(Number shape_values[], - const Number *in, - Number * out) const + Number>::values(const Number *in, Number *out) const { - apply(shape_values, in, out); + apply( + shape_values, in, out); } @@ -190,11 +250,11 @@ namespace CUDAWrappers dim, fe_degree, n_q_points_1d, - Number>::gradients(Number shape_gradients[], - const Number *in, + Number>::gradients(const Number *in, Number * out) const { - apply(shape_gradients, in, out); + apply( + shape_gradients, in, out); } @@ -206,40 +266,11 @@ namespace CUDAWrappers dim, fe_degree, n_q_points_1d, - Number>::apply(Number shape_data[], - const Number *in, - Number * out) const + Number>::co_gradients(const Number *in, + Number * out) const { - KOKKOS_IF_ON_DEVICE( - const unsigned int i = (dim == 1) ? 0 : threadIdx.x % n_q_points_1d; - const unsigned int j = (dim == 3) ? threadIdx.y : 0; - const unsigned int q = (dim == 1) ? (threadIdx.x % n_q_points_1d) : - (dim == 2) ? threadIdx.y : - threadIdx.z; - - // This loop simply multiply the shape function at the quadrature point - // by the value finite element coefficient. - Number t = 0; - for (int k = 0; k < n_q_points_1d; ++k) { - const unsigned int shape_idx = - dof_to_quad ? (q + k * n_q_points_1d) : (k + q * n_q_points_1d); - const unsigned int source_idx = - (direction == 0) ? (k + n_q_points_1d * (i + n_q_points_1d * j)) : - (direction == 1) ? (i + n_q_points_1d * (k + n_q_points_1d * j)) : - (i + n_q_points_1d * (j + n_q_points_1d * k)); - t += shape_data[shape_idx] * - (in_place ? out[source_idx] : in[source_idx]); - } - - if (in_place) __syncthreads(); - - const unsigned int destination_idx = - (direction == 0) ? (q + n_q_points_1d * (i + n_q_points_1d * j)) : - (direction == 1) ? (i + n_q_points_1d * (q + n_q_points_1d * j)) : - (i + n_q_points_1d * (j + n_q_points_1d * q)); - - if (add) out[destination_idx] += t; - else out[destination_idx] = t;) + apply( + co_shape_gradients, in, out); } @@ -256,31 +287,25 @@ namespace CUDAWrappers { case 1: { - values<0, true, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<0, true, false, true>(u, u); break; } case 2: { - values<0, true, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<0, true, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<1, true, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<1, true, false, true>(u, u); break; } case 3: { - values<0, true, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<0, true, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<1, true, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<1, true, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<2, true, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<2, true, false, true>(u, u); break; } @@ -306,31 +331,25 @@ namespace CUDAWrappers { case 1: { - values<0, false, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<0, false, false, true>(u, u); break; } case 2: { - values<0, false, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<0, false, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<1, false, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<1, false, false, true>(u, u); break; } case 3: { - values<0, false, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<0, false, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<1, false, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<1, false, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<2, false, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<2, false, false, true>(u, u); break; } @@ -357,69 +376,39 @@ namespace CUDAWrappers { case 1: { - gradients<0, true, false, false>( - get_global_shape_gradients(mf_object_id), u, grad_u[0]); + gradients<0, true, false, false>(u, grad_u[0]); break; } case 2: { - gradients<0, true, false, false>( - get_global_shape_gradients(mf_object_id), u, grad_u[0]); - values<0, true, false, false>( - get_global_shape_values(mf_object_id), u, grad_u[1]); + gradients<0, true, false, false>(u, grad_u[0]); + values<0, true, false, false>(u, grad_u[1]); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<1, true, false, true>(get_global_shape_values( - mf_object_id), - grad_u[0], - grad_u[0]); - gradients<1, true, false, true>( - get_global_shape_gradients(mf_object_id), - grad_u[1], - grad_u[1]); + values<1, true, false, true>(grad_u[0], grad_u[0]); + gradients<1, true, false, true>(grad_u[1], grad_u[1]); break; } case 3: { - gradients<0, true, false, false>( - get_global_shape_gradients(mf_object_id), u, grad_u[0]); - values<0, true, false, false>( - get_global_shape_values(mf_object_id), u, grad_u[1]); - values<0, true, false, false>( - get_global_shape_values(mf_object_id), u, grad_u[2]); - - KOKKOS_IF_ON_DEVICE(__syncthreads();) - - values<1, true, false, true>(get_global_shape_values( - mf_object_id), - grad_u[0], - grad_u[0]); - gradients<1, true, false, true>( - get_global_shape_gradients(mf_object_id), - grad_u[1], - grad_u[1]); - values<1, true, false, true>(get_global_shape_values( - mf_object_id), - grad_u[2], - grad_u[2]); - - KOKKOS_IF_ON_DEVICE(__syncthreads();) - - values<2, true, false, true>(get_global_shape_values( - mf_object_id), - grad_u[0], - grad_u[0]); - values<2, true, false, true>(get_global_shape_values( - mf_object_id), - grad_u[1], - grad_u[1]); - gradients<2, true, false, true>( - get_global_shape_gradients(mf_object_id), - grad_u[2], - grad_u[2]); + gradients<0, true, false, false>(u, grad_u[0]); + values<0, true, false, false>(u, grad_u[1]); + values<0, true, false, false>(u, grad_u[2]); + + KOKKOS_IF_ON_DEVICE(__syncthreads();) + + values<1, true, false, true>(grad_u[0], grad_u[0]); + gradients<1, true, false, true>(grad_u[1], grad_u[1]); + values<1, true, false, true>(grad_u[2], grad_u[2]); + + KOKKOS_IF_ON_DEVICE(__syncthreads();) + + values<2, true, false, true>(grad_u[0], grad_u[0]); + values<2, true, false, true>(grad_u[1], grad_u[1]); + gradients<2, true, false, true>(grad_u[2], grad_u[2]); break; } @@ -447,61 +436,37 @@ namespace CUDAWrappers { case 1: { - values<0, true, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<0, true, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - gradients<0, true, false, false>( - get_global_co_shape_gradients(mf_object_id), - u, - grad_u[0]); + co_gradients<0, true, false, false>(u, grad_u[0]); break; } case 2: { - values<0, true, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<0, true, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<1, true, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<1, true, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - gradients<0, true, false, false>( - get_global_co_shape_gradients(mf_object_id), - u, - grad_u[0]); - gradients<1, true, false, false>( - get_global_co_shape_gradients(mf_object_id), - u, - grad_u[1]); + co_gradients<0, true, false, false>(u, grad_u[0]); + co_gradients<1, true, false, false>(u, grad_u[1]); break; } case 3: { - values<0, true, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<0, true, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<1, true, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<1, true, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<2, true, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<2, true, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - gradients<0, true, false, false>( - get_global_co_shape_gradients(mf_object_id), - u, - grad_u[0]); - gradients<1, true, false, false>( - get_global_co_shape_gradients(mf_object_id), - u, - grad_u[1]); - gradients<2, true, false, false>( - get_global_co_shape_gradients(mf_object_id), - u, - grad_u[2]); + co_gradients<0, true, false, false>(u, grad_u[0]); + co_gradients<1, true, false, false>(u, grad_u[1]); + co_gradients<2, true, false, false>(u, grad_u[2]); break; } @@ -530,73 +495,43 @@ namespace CUDAWrappers case 1: { gradients<0, false, add, false>( - get_global_shape_gradients(mf_object_id), - grad_u[dim], - u); + + grad_u[dim], u); break; } case 2: { - gradients<0, false, false, true>( - get_global_shape_gradients(mf_object_id), - grad_u[0], - grad_u[0]); - values<0, false, false, true>(get_global_shape_values( - mf_object_id), - grad_u[1], - grad_u[1]); + gradients<0, false, false, true>(grad_u[0], grad_u[0]); + values<0, false, false, true>(grad_u[1], grad_u[1]); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<1, false, add, false>( - get_global_shape_values(mf_object_id), grad_u[0], u); + values<1, false, add, false>(grad_u[0], u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - gradients<1, false, true, false>( - get_global_shape_gradients(mf_object_id), grad_u[1], u); + gradients<1, false, true, false>(grad_u[1], u); break; } case 3: { - gradients<0, false, false, true>( - get_global_shape_gradients(mf_object_id), - grad_u[0], - grad_u[0]); - values<0, false, false, true>(get_global_shape_values( - mf_object_id), - grad_u[1], - grad_u[1]); - values<0, false, false, true>(get_global_shape_values( - mf_object_id), - grad_u[2], - grad_u[2]); + gradients<0, false, false, true>(grad_u[0], grad_u[0]); + values<0, false, false, true>(grad_u[1], grad_u[1]); + values<0, false, false, true>(grad_u[2], grad_u[2]); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<1, false, false, true>(get_global_shape_values( - mf_object_id), - grad_u[0], - grad_u[0]); - gradients<1, false, false, true>( - get_global_shape_gradients(mf_object_id), - grad_u[1], - grad_u[1]); - values<1, false, false, true>(get_global_shape_values( - mf_object_id), - grad_u[2], - grad_u[2]); + values<1, false, false, true>(grad_u[0], grad_u[0]); + gradients<1, false, false, true>(grad_u[1], grad_u[1]); + values<1, false, false, true>(grad_u[2], grad_u[2]); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<2, false, add, false>( - get_global_shape_values(mf_object_id), grad_u[0], u); + values<2, false, add, false>(grad_u[0], u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<2, false, true, false>( - get_global_shape_values(mf_object_id), grad_u[1], u); + values<2, false, true, false>(grad_u[1], u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - gradients<2, false, true, false>( - get_global_shape_gradients(mf_object_id), grad_u[2], u); + gradients<2, false, true, false>(grad_u[2], u); break; } @@ -624,65 +559,41 @@ namespace CUDAWrappers { case 1: { - gradients<0, false, true, false>( - get_global_co_shape_gradients(mf_object_id), - grad_u[0], - u); + co_gradients<0, false, true, false>(grad_u[0], u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<0, false, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<0, false, false, true>(u, u); break; } case 2: { - gradients<1, false, true, false>( - get_global_co_shape_gradients(mf_object_id), - grad_u[1], - u); + co_gradients<1, false, true, false>(grad_u[1], u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - gradients<0, false, true, false>( - get_global_co_shape_gradients(mf_object_id), - grad_u[0], - u); + co_gradients<0, false, true, false>(grad_u[0], u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<1, false, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<1, false, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<0, false, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<0, false, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) break; } case 3: { - gradients<2, false, true, false>( - get_global_co_shape_gradients(mf_object_id), - grad_u[2], - u); + co_gradients<2, false, true, false>(grad_u[2], u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - gradients<1, false, true, false>( - get_global_co_shape_gradients(mf_object_id), - grad_u[1], - u); + co_gradients<1, false, true, false>(grad_u[1], u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - gradients<0, false, true, false>( - get_global_co_shape_gradients(mf_object_id), - grad_u[0], - u); + co_gradients<0, false, true, false>(grad_u[0], u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<2, false, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<2, false, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<1, false, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<1, false, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) - values<0, false, false, true>( - get_global_shape_values(mf_object_id), u, u); + values<0, false, false, true>(u, u); KOKKOS_IF_ON_DEVICE(__syncthreads();) break; diff --git a/source/matrix_free/cuda_matrix_free.cc b/source/matrix_free/cuda_matrix_free.cc index 37598743fc..2ff0a5968f 100644 --- a/source/matrix_free/cuda_matrix_free.cc +++ b/source/matrix_free/cuda_matrix_free.cc @@ -23,11 +23,6 @@ DEAL_II_NAMESPACE_OPEN namespace CUDAWrappers { - namespace internal - { - std::array used_objects; - } - // Do not instantiate for dim = 1 template class MatrixFree<2, float>; template class MatrixFree<2, double>; diff --git a/tests/cuda/cuda_evaluate_1d_shape.cc b/tests/cuda/cuda_evaluate_1d_shape.cc index 21f0f63268..24409fe430 100644 --- a/tests/cuda/cuda_evaluate_1d_shape.cc +++ b/tests/cuda/cuda_evaluate_1d_shape.cc @@ -34,7 +34,12 @@ namespace CUDA = LinearAlgebra::CUDAWrappers; template __global__ void -evaluate_tensor_product(double *dst, double *src) +evaluate_tensor_product( + Kokkos::View shape_values, + Kokkos::View shape_gradients, + Kokkos::View co_shape_gradients, + double * dst, + double * src) { CUDAWrappers::internal::EvaluatorTensorProduct< CUDAWrappers::internal::evaluate_general, @@ -42,14 +47,12 @@ evaluate_tensor_product(double *dst, double *src) M - 1, N, double> - evaluator(0); + evaluator(shape_values, shape_gradients, co_shape_gradients); if (type == 0) - evaluator.template values<0, dof_to_quad, add, false>( - CUDAWrappers::internal::get_global_shape_values(0), src, dst); + evaluator.template values<0, dof_to_quad, add, false>(src, dst); if (type == 1) - evaluator.template gradients<0, dof_to_quad, add, false>( - CUDAWrappers::internal::get_global_shape_values(0), src, dst); + evaluator.template gradients<0, dof_to_quad, add, false>(src, dst); } template @@ -57,7 +60,8 @@ void test() { deallog << "Test " << M << " x " << N << std::endl; - LinearAlgebra::ReadWriteVector shape_host(M * N); + unsigned int size_shape_values = M * N; + LinearAlgebra::ReadWriteVector shape_host(size_shape_values); for (unsigned int i = 0; i < (M + 1) / 2; ++i) for (unsigned int j = 0; j < N; ++j) { @@ -96,28 +100,33 @@ test() x_dev.import(x_host, VectorOperation::insert); y_dev.import(y_host, VectorOperation::insert); - unsigned int size_shape_values = M * N * sizeof(double); - - cudaError_t cuda_error = - cudaMemcpyToSymbol(CUDAWrappers::internal::get_global_shape_values( - 0), - shape_host.begin(), - size_shape_values, - 0, - cudaMemcpyHostToDevice); - AssertCuda(cuda_error); - - cuda_error = cudaMemcpyToSymbol( - CUDAWrappers::internal::get_global_shape_gradients(0), - shape_host.begin(), - size_shape_values, - 0, - cudaMemcpyHostToDevice); - AssertCuda(cuda_error); + + Kokkos::View shape_values( + Kokkos::view_alloc("shape_values", Kokkos::WithoutInitializing), + size_shape_values); + Kokkos::View> + shape_host_view(shape_host.begin(), size_shape_values); + Kokkos::deep_copy(shape_values, shape_host_view); + + Kokkos::View shape_gradients( + Kokkos::view_alloc("shape_gradients", Kokkos::WithoutInitializing), + size_shape_values); + Kokkos::deep_copy(shape_gradients, shape_host_view); + + Kokkos::View co_shape_gradients( + Kokkos::view_alloc("co_shape_gradients", Kokkos::WithoutInitializing), + size_shape_values); + Kokkos::deep_copy(co_shape_gradients, shape_host_view); + // Launch the kernel - evaluate_tensor_product - <<<1, M>>>(y_dev.get_values(), x_dev.get_values()); + evaluate_tensor_product<<<1, M>>>(shape_values, + shape_gradients, + co_shape_gradients, + y_dev.get_values(), + x_dev.get_values()); // Check the results on the host y_host.import(y_dev, VectorOperation::insert); @@ -145,8 +154,11 @@ test() x_dev.import(x_host, VectorOperation::insert); // Launch the kernel - evaluate_tensor_product - <<<1, M>>>(x_dev.get_values(), y_dev.get_values()); + evaluate_tensor_product<<<1, M>>>(shape_values, + shape_gradients, + co_shape_gradients, + x_dev.get_values(), + y_dev.get_values()); // Check the results on the host x_host.import(x_dev, VectorOperation::insert); @@ -162,6 +174,7 @@ main() std::ofstream logfile("output"); deallog.attach(logfile); + Kokkos::initialize(); init_cuda(); deallog.push("values"); @@ -204,5 +217,7 @@ main() deallog.pop(); + Kokkos::finalize(); + return 0; } diff --git a/tests/cuda/cuda_evaluate_2d_shape.cc b/tests/cuda/cuda_evaluate_2d_shape.cc index 48d9172dc8..caf15450d4 100644 --- a/tests/cuda/cuda_evaluate_2d_shape.cc +++ b/tests/cuda/cuda_evaluate_2d_shape.cc @@ -34,7 +34,12 @@ namespace CUDA = LinearAlgebra::CUDAWrappers; template __global__ void -evaluate_tensor_product(double *dst, double *src) +evaluate_tensor_product( + Kokkos::View shape_values, + Kokkos::View shape_gradients, + Kokkos::View co_shape_gradients, + double * dst, + double * src) { CUDAWrappers::internal::EvaluatorTensorProduct< CUDAWrappers::internal::evaluate_general, @@ -42,23 +47,19 @@ evaluate_tensor_product(double *dst, double *src) M - 1, N, double> - evaluator(0); + evaluator(shape_values, shape_gradients, co_shape_gradients); if (type == 0) { - evaluator.template values<0, dof_to_quad, false, false>( - CUDAWrappers::internal::get_global_shape_values(0), src, src); + evaluator.template values<0, dof_to_quad, false, false>(src, src); __syncthreads(); - evaluator.template values<1, dof_to_quad, add, false>( - CUDAWrappers::internal::get_global_shape_values(0), src, dst); + evaluator.template values<1, dof_to_quad, add, false>(src, dst); } if (type == 1) { - evaluator.template gradients<0, dof_to_quad, false, false>( - CUDAWrappers::internal::get_global_shape_values(0), src, src); + evaluator.template gradients<0, dof_to_quad, false, false>(src, src); __syncthreads(); - evaluator.template gradients<1, dof_to_quad, add, false>( - CUDAWrappers::internal::get_global_shape_values(0), src, dst); + evaluator.template gradients<1, dof_to_quad, add, false>(src, dst); } } @@ -67,7 +68,8 @@ void test() { deallog << "Test " << M << " x " << N << std::endl; - LinearAlgebra::ReadWriteVector shape_host(M * N); + unsigned int size_shape_values = M * N; + LinearAlgebra::ReadWriteVector shape_host(size_shape_values); for (unsigned int i = 0; i < (M + 1) / 2; ++i) for (unsigned int j = 0; j < N; ++j) { @@ -121,29 +123,34 @@ test() x_dev.import(x_host, VectorOperation::insert); y_dev.import(y_host, VectorOperation::insert); - unsigned int size_shape_values = M * N * sizeof(double); - - cudaError_t cuda_error = - cudaMemcpyToSymbol(CUDAWrappers::internal::get_global_shape_values( - 0), - shape_host.begin(), - size_shape_values, - 0, - cudaMemcpyHostToDevice); - AssertCuda(cuda_error); - - cuda_error = cudaMemcpyToSymbol( - CUDAWrappers::internal::get_global_shape_gradients(0), - shape_host.begin(), - size_shape_values, - 0, - cudaMemcpyHostToDevice); - AssertCuda(cuda_error); + + Kokkos::View shape_values( + Kokkos::view_alloc("shape_values", Kokkos::WithoutInitializing), + size_shape_values); + Kokkos::View> + shape_host_view(shape_host.begin(), size_shape_values); + Kokkos::deep_copy(shape_values, shape_host_view); + + Kokkos::View shape_gradients( + Kokkos::view_alloc("shape_gradients", Kokkos::WithoutInitializing), + size_shape_values); + Kokkos::deep_copy(shape_gradients, shape_host_view); + + Kokkos::View co_shape_gradients( + Kokkos::view_alloc("co_shape_gradients", Kokkos::WithoutInitializing), + size_shape_values); + Kokkos::deep_copy(co_shape_gradients, shape_host_view); // Launch the kernel dim3 block_dim(M, N); evaluate_tensor_product - <<<1, block_dim>>>(y_dev.get_values(), x_dev.get_values()); + <<<1, block_dim>>>(shape_values, + shape_gradients, + co_shape_gradients, + y_dev.get_values(), + x_dev.get_values()); // Check the results on the host y_host.import(y_dev, VectorOperation::insert); @@ -173,7 +180,11 @@ test() // Launch the kernel evaluate_tensor_product - <<<1, block_dim>>>(x_dev.get_values(), y_dev.get_values()); + <<<1, block_dim>>>(shape_values, + shape_gradients, + co_shape_gradients, + x_dev.get_values(), + y_dev.get_values()); // Check the results on the host x_host.import(x_dev, VectorOperation::insert); @@ -189,6 +200,7 @@ main() std::ofstream logfile("output"); deallog.attach(logfile); + Kokkos::initialize(); init_cuda(); deallog.push("values"); @@ -215,5 +227,7 @@ main() deallog.pop(); + Kokkos::finalize(); + return 0; } -- 2.39.5