From: Bruno Turcksin Date: Sun, 14 May 2017 03:04:03 +0000 (-0400) Subject: Add CUDA support of matrix-free. X-Git-Tag: v9.0.0-rc1~1588^2~2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=5fc1311ffa11dc1781dd091b03c79a3ba23ee5f0;p=dealii.git Add CUDA support of matrix-free. --- diff --git a/include/deal.II/matrix_free/cuda_fe_evaluation.cuh b/include/deal.II/matrix_free/cuda_fe_evaluation.cuh new file mode 100644 index 0000000000..1944e7ef75 --- /dev/null +++ b/include/deal.II/matrix_free/cuda_fe_evaluation.cuh @@ -0,0 +1,391 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2016 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#ifndef dealii__cuda_fe_evaluation_cuh +#define dealii__cuda_fe_evaluation_cuh + +#include +#include +#include +#include +#include +#include + +DEAL_II_NAMESPACE_OPEN + +namespace CUDAWrappers +{ + namespace internal + { + template + __device__ void resolve_hanging_nodes_shmem(Number *values, const unsigned + int constr) + { + //TODO + } + } + + + + /** + * This class provides all the functions necessary to evaluate functions at + * quadrature points and cell integrations. In functionality, this class is + * similar to FEValues. + * + * This class class has five template arguments: + * + * @ptaram dim Dimension in which this class is to be used + * + * @tparam fe_degree Degree of the tensor prodict finite element with fe_degree+1 + * degrees of freedom per coordinate direction + * + * @tparam n_q_points_1d Number of points in the quadrature formular in 1D, + * defaults to fe_degree+1 + * + * @tparam n_components Number of vector components when solving a system of + * PDEs. If the same operation is applied to several components of a PDE (e.g. + * a vector Laplace equation), they can be applied simultaneously with one call + * (and often more efficiently). Defaults to 1 + * + * @tparam Number Number format, usually @p double or @p float. Defaults to @p + * double + * + * @ingroup CUDAWrappers + * + * @author Karl Ljungkvist, Bruno Turcksin, 2016 + */ + template + class FEEvaluation + { + public: + typedef Number value_type; + typedef Tensor<1,dim,Number> gradient_type; + typedef typename MatrixFree::Data data_type; + static const unsigned int dimension = dim; + static const unsigned int n_components = n_components_; + static const unsigned int n_q_points = + Utilities::fixed_int_power::value; + static const unsigned int tensor_dofs_per_cell = + Utilities::fixed_int_power::value; + + /** + * Constructor. + */ + __device__ FEEvaluation(int cell_id, + const data_type *data, + SharedData *shdata); + + /** + * For the vector @p src, read out the values on the degrees of freedom of + * the current cell, and store them internally. Similar functionality as + * the function DoFAccessor::get_interpolated_dof_values when no + * constraints are present, but it also includes constraints from hanging + * nodes, so once can see it as a similar function to + * ConstraintMatrix::read_dof_valuess as well. + */ + __device__ void read_dof_values(const Number *src); + + /** + * Take the value stored internally on dof values of the current cell and + * sum them into the vector @p dst. The function also applies constraints + * during the write operation. The functionality is hence similar to the + * function ConstraintMatrix::distribute_local_to_global. + */ + __device__ void distribute_local_to_global(Number *dst) const; + + /** + * Evaluate the function values and the gradients of the FE function given + * at the DoF values in the input vector at the quadrature points on the + * unit cell. The function arguments specify which parts shall actually be + * computed. This function needs to be called before the functions + * @p get_value() or @p get_gradient() give useful information. + */ + __device__ void evaluate(const bool evaluate_val, + const bool evaluate_grad); + + /** + * This function takes the values and/or gradients that are stored on + * quadrature points, tests them by all the basis functions/gradients on + * the cell and performs the cell integration. The two function arguments + * @p integrate_val and @p integrate_grad are used to enable/disable some + * of the values or the gradients. + */ + __device__ void integrate(const bool integrate_val, + const bool integrate_grad); + + /** + * Return the value of a finite element function at quadrature point + * number @p q_point after a call to @p evalue(true,...). + */ + __device__ value_type get_value(const unsigned int q_point) const; + + /** + * Write a value to the field containing the values on quadrature points + * with component @p q_point. Access to the same fiels as through @p + * get_value(), This specifies the value which is tested by all basis + * function on the current cell and integrated over. + */ + __device__ void submit_value(const value_type &val_in, + const unsigned int q_point); + + /** + * Return the gradient of a finite element function at quadrature point + * number @p q_point after a call to @p evaluate(...,true). + */ + __device__ gradient_type get_gradient(const unsigned int q_point) const; + + /** + * Write a contribution that is tested by the gradient to the field + * containing the values on quadrature points with component @p q_point + */ + __device__ void submit_gradient(const gradient_type &grad_in, + const unsigned int q_point); + + /** + * Apply the function @p func on every quadrature point. + */ + template + __device__ void apply_quad_point_operations(const functor &func); + + private: + unsigned int *local_to_global; + unsigned int n_cells; + unsigned int padding_length; + + const unsigned int constraint_mask; + + Number *inv_jac; + Number *JxW; + + // Internal buffer + Number *values; + Number *gradients[dim]; + }; + + + + template + __device__ + FEEvaluation:: + FEEvaluation(int cell_id, + const data_type *data, + SharedData *shdata) + : + n_cells(data->n_cells), + padding_length(data->padding_length), + constraint_mask(data->constraint_mask[cell_id]), + values(shdata->values) + { + local_to_global = data->local_to_global + padding_length*cell_id; + inv_jac = data->inv_jacobian + padding_length*cell_id; + JxW = data->JxW + padding_length*cell_id; + + for (unsigned int i=0; i < dim; ++i) + gradients[i] = shdata->gradients[i]; + } + + + + template + __device__ void + FEEvaluation:: + read_dof_values(const Number *src) + { + static_assert(n_components_ == 1, "This function only supports FE with one \ + components"); + const unsigned int idx = (threadIdx.x%n_q_points_1d) + +(dim>1 ? threadIdx.y : 0)*n_q_points_1d + +(dim>2 ? threadIdx.z : 0)*n_q_points_1d*n_q_points_1d; + + const unsigned int src_idx = local_to_global[idx]; + // Use the read-only data cache. + values[idx] = __ldg(&src[src_idx]); + + if (constraint_mask) + internal::resolve_hanging_nodes_shmem(values, + constraint_mask); + + __syncthreads(); + } + + + + template + __device__ void + FEEvaluation:: + distribute_local_to_global(Number *dst) const + { + static_assert(n_components_ == 1, "This function only supports FE with one \ + components"); + if (constraint_mask) + internal::resolve_hanging_nodes_shmem(values, + constraint_mask); + + + const unsigned int idx = (threadIdx.x%n_q_points_1d) + + (dim>1 ? threadIdx.y : 0) * n_q_points_1d + + (dim>2 ? threadIdx.z : 0) * n_q_points_1d * n_q_points_1d; + const unsigned int destination_idx = local_to_global[idx]; + + dst[destination_idx] += values[idx]; + } + + + + template + __device__ void + FEEvaluation:: + evaluate(const bool evaluate_val, const bool evaluate_grad) + { + // First evaluate the gradients because it requires values that will be + // changed if evaluate_val is true + internal::EvaluatorTensorProduct evaluator_tensor_product; + if (evaluate_grad == true) + { + evaluator_tensor_product.gradient_at_quad_pts(values, gradients); + __syncthreads(); + } + + if (evaluate_val == true) + { + evaluator_tensor_product.value_at_quad_pts(values); + __syncthreads(); + } + } + + + + template + __device__ void + FEEvaluation:: + integrate(const bool integrate_val, const bool integrate_grad) + { + internal::EvaluatorTensorProduct evaluator_tensor_product; + if (integrate_val == true) + { + evaluator_tensor_product.integrate_value(values); + __syncthreads(); + if (integrate_grad == true) + { + evaluator_tensor_product.integrate_gradient(values, gradients); + __syncthreads(); + } + } + else if (integrate_grad == true) + { + evaluator_tensor_product.integrate_gradient(values, gradients); + __syncthreads(); + } + } + + + + template + __device__ + typename FEEvaluation::value_type + FEEvaluation:: + get_value(const unsigned int q_point) const + { + return values[q_point]; + } + + + + template + __device__ void + FEEvaluation:: + submit_value(const value_type &val_in, const unsigned int q_point) + { + values[q_point] = val_in * JxW[q_point]; + } + + + + template + __device__ + typename FEEvaluation::gradient_type + FEEvaluation:: + get_gradient(const unsigned int q_point) const + { + static_assert(n_components_ == 1, "This function only supports FE with one \ + components"); + // TODO optimize if the mesh is uniform + const Number *inv_jacobian = &inv_jac[q_point]; + gradient_type grad; + for (int d_1=0; d_1 + __device__ void + FEEvaluation:: + submit_gradient(const gradient_type &grad_in, const unsigned int q_point) + { + // TODO optimize if the mesh is uniform + const Number *inv_jacobian = &inv_jac[q_point]; + for (int d_1=0; d_1 + template + __device__ void + FEEvaluation:: + apply_quad_point_operations(const functor &func) + { + const unsigned int q_point = (dim == 1 ? threadIdx.x%n_q_points_1d : + dim == 2 ? threadIdx.x%n_q_points_1d + n_q_points_1d *threadIdx.y : + threadIdx.x%n_q_points_1d + n_q_points_1d * (threadIdx.y + + n_q_points_1d*threadIdx.z)); + func(this, q_point); + + __syncthreads(); + } +} + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/include/deal.II/matrix_free/cuda_matrix_free.h b/include/deal.II/matrix_free/cuda_matrix_free.h new file mode 100644 index 0000000000..fa7ad082c9 --- /dev/null +++ b/include/deal.II/matrix_free/cuda_matrix_free.h @@ -0,0 +1,312 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2016 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +#ifndef dealii__cuda_matrix_free_h +#define dealii__cuda_matrix_free_h + +#include + +#ifdef DEAL_II_WITH_CUDA + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +DEAL_II_NAMESPACE_OPEN + +namespace CUDAWrappers +{ + // forward declaration + namespace internal + { + template + class ReinitHelper; + } + + /** + * This class collects all the data that is stored for the matrix free + * implementation. The storage scheme is tailored towards several loops + * performed with the same data, i.e., typically doing many matrix-vector + * products or residual computations on the same mesh. + * + * This class does not implement any operations involving finite element basis + * functions, i.e., regarding the operation performed on the cells. For these + * operations, the class FEEvaluation is designed to use the data collected in + * this class. + * + * This class implements a loop over all cells (cell_loop()). This loop is + * scheduled in such a way that cells that cells that share degrees of freedom + * are not worked on simultaneously, which implies that it is possible to + * write to vectors in parallel without having to explicitly synchronize + * access to these vectors and matrices. This class does not implement any + * shape values, all it does is to cache the respective data. To implement + * finite element operations, use the class CUDAWrappers::FEEvalutation. + * + * This class traverse the cells in a different order than the usual + * Triangulation class in deal.II. + * + * @ingroup CUDAWrappers + */ + template + class MatrixFree : public Subscriptor + { + public: + typedef Tensor<2, dim, Tensor<1,dim,Number>> jacobian_type; + // TODO this should really be a CUDAWrappers::Point + typedef Tensor<1, dim, Number> point_type; + + // Use Number2 so we don't hide the template parameter Number + template + using CUDAVector = ::dealii::LinearAlgebra::CUDAWrappers::Vector; + + /** + * Parallelization scheme used: parallel_in_elem (parallelism at the level + * of degrees of freedom) or parallel_over_elem (parallelism at the level of + * cells) + */ + enum ParallelizationScheme {parallel_in_elem, parallel_over_elem}; + + struct AdditionalData + { + AdditionalData ( + const ParallelizationScheme parallelization_scheme = parallel_in_elem, + const UpdateFlags mapping_update_flags = update_gradients | update_JxW_values) + : + parallelization_scheme(parallelization_scheme), + mapping_update_flags(mapping_update_flags) + {} + + /** + * Number of colors created by the graph coloring algorithm. + */ + unsigned int n_colors; + /** + * Parallelization scheme used, parallization over degrees of freedom or + * over cells. + */ + ParallelizationScheme parallelization_scheme; + /** + * This flag is used to determine which quantities should be cached. This + * class can cache data needed for gradient computations (inverse + * Jacobians), Jacobian determinants (JxW), quadrature points as well as + * data for Hessians (derivative of Jacobians). By default, only data for + * gradients and Jacobian determinants times quadrature weights, JxW, are + * cached. If quadrature points of second derivatives are needed, they + * must be specified by this field. + */ + UpdateFlags mapping_update_flags; + }; + + /** + * Structure which is passed to the kernel. It is used to pass all the + * necessary information from the CPU to the GPU. + */ + struct Data + { + point_type *q_points; + unsigned int *local_to_global; + Number *inv_jacobian; + Number *JxW; + unsigned int n_cells; + unsigned int padding_length; + unsigned int row_start; + unsigned int *constraint_mask; + }; + + /** + * Default constructor. + */ + MatrixFree(); + + unsigned int get_padding_length() const; + + /** + * Extracts the information needed to perform loops over cells. The + * DoFHandler and ConstraintMatrix describe the layout of degrees of + * freedom, the DoFHandler and the mapping describe the transformation from + * unit to real cell, and the finite element underlying the DoFHandler + * together with the quadrature formula describe the local operations. + */ + void reinit(const Mapping &mapping, + const DoFHandler &dof_handler, + const ConstraintMatrix &constraints, + const Quadrature<1> &quad, + const AdditionalData additional_data = AdditionalData()); + + /** + * Initializes the data structures. Same as above but using a Q1 mapping. + */ + void reinit(const DoFHandler &dof_handler, + const ConstraintMatrix &constraints, + const Quadrature<1> &quad, + const AdditionalData AdditionalData = AdditionalData()); + + /** + * Return the Data structure associated with @p color. + */ + Data get_data(unsigned int color) const; + + /** + * This method runs the loop over all cells and apply the local operation on + * each element in parallel. @p func is a functor which is appplied on each color. + */ + template + void cell_loop(const functor &func, + const CUDAVector &src, + CUDAVector &dst) const; + + void copy_constrained_values(const CUDAVector &src, + CUDAVector &dst) const; + + void set_constrained_values(const Number val, CUDAVector &dst) const; + + /** + * Free all the memory allocated. + */ + void free(); + + /** + * Return an approximation of the memory consumption of this class in bytes. + */ + std::size_t memory_consumption() const; + + private: + /** + * Parallelization scheme used, parallization over degrees of freedom or + * over cells. + */ + ParallelizationScheme parallelization_scheme; + /** + * Degree of the finite element used. + */ + unsigned int fe_degree; + /** + * Number of degrees of freedom per cell. + */ + unsigned int dofs_per_cell; + /** + * Number of constrained degrees of freedom. + */ + unsigned int n_constrained_dofs; + /** + * Number of quadrature points per cells. + */ + unsigned int q_points_per_cell; + /** + * Number of colors produced by the graph coloring algorithm. + */ + unsigned int n_colors; + /** + * Number of cells in each color. + */ + std::vector n_cells; + /** + * Vector of pointers to the quadrature points associated to the cells of + * each color. + */ + std::vector q_points; + /** + * Map the position in the local vector to the position in the global + * vector. + */ + std::vector local_to_global; + /** + * Vector of pointer to the inverse Jacobian associated to the cells of each + * color. + */ + std::vector inv_jacobian; + /** + * Vector of pointer to the Jacobian time the weights associated to the + * cells of each color. + */ + std::vector JxW; + + // Constraints + unsigned int *constrained_dofs; + std::vector constraint_mask; + /** + * Grid dimensions associated to the different colors. The grid dimensions + * are used to launch the CUDA kernels. + */ + std::vector grid_dim; + /** + * Block dimensions associated to the different colors. The block dimensions + * are used to launch the CUDA kernels. + */ + std::vector block_dim; + + // Parallelization parameter + unsigned int cells_per_block; + dim3 constraint_grid_dim; + dim3 constraint_block_dim; + + unsigned int padding_length; + std::vector row_start; + + friend class internal::ReinitHelper; + }; + + + + // TODO find a better place to put these things + // Structure to pass the shared memory into a general user function. + template + struct SharedData + { + __device__ SharedData(Number *vd, + Number *gq[dim]) + : + values(vd) + { + for (int d=0; d + +#ifdef DEAL_II_WITH_CUDA + +#include +#include +#include +#include +#include +#include + +#define BLOCK_SIZE 128 + +DEAL_II_NAMESPACE_OPEN + +namespace CUDAWrappers +{ + namespace internal + { + // These variables are stored in the device constant memory. + // TODO: use a template parameter instead of a macro +#define MAX_ELEM_DEGREE 10 + __constant__ double global_shape_values[(MAX_ELEM_DEGREE+1) * (MAX_ELEM_DEGREE+1)]; + __constant__ double global_shape_gradients[(MAX_ELEM_DEGREE+1) * (MAX_ELEM_DEGREE+1)]; + + template + using CUDAVector = ::dealii::LinearAlgebra::CUDAWrappers::Vector; + + /** + * Transpose a N x M matrix stored in a one-dimensional array to a M x N + * matrix stored in a one-dimensional array. + */ + template + void transpose(const unsigned int N, const unsigned M, const Number *src, Number *dst) + { + // src is N X M + // dst is M X N + for (unsigned int i=0; i + void transpose_in_place(std::vector &array_host, + const unsigned int n, + const unsigned int m) + { + // convert to structure-of-array + std::vector old(array_host.size()); + old.swap(array_host); + + transpose(n, m, &old[0], &array_host[0]); + } + + + + /** + * Allocate an array to the device and copy @p array_host to the device. + */ + template + void alloc_and_copy(Number1 **array_device, std::vector &array_host, + const unsigned int n) + { + cudaError_t error_code = cudaMalloc(array_device, n*sizeof(Number1)); + AssertCuda(error_code); + + error_code = cudaMemcpy(*array_device, &array_host[0], n*sizeof(Number1), + cudaMemcpyHostToDevice); + AssertCuda(error_code); + } + + + + /** + * Helper class to (re)initialize MatrixFree object. + */ + //TODO for now does not support hanging_nodes + template + class ReinitHelper + { + public: + ReinitHelper(MatrixFree *data, + const Mapping &mapping, + const FiniteElement &fe, + const Quadrature<1> &quad, + const ::dealii::internal::MatrixFreeFunctions::ShapeInfo &shape_info, + const UpdateFlags &update_flags); + + void setup_color_arrays(const unsigned int n_colors); + + void setup_cell_arrays(const unsigned int color); + + template + void get_cell_data(const CellFilter &cell, const unsigned int cell_id); + + void alloc_and_copy_arrays(const unsigned int cell); + + private: + MatrixFree *data; + // Host data + std::vector local_to_global_host; + std::vector> q_points_host; + std::vector JxW_host; + std::vector inv_jacobian_host; + std::vector constraint_mask_host; + // Local buffer + std::vector local_dof_indices; + FEValues fe_values; + // Convert the default dof numbering to a lexicographic one + const std::vector &lexicographic_inv; + std::vector lexicographic_dof_indices; + const unsigned int fe_degree; + const unsigned int dofs_per_cell; + const unsigned int q_points_per_cell; + const UpdateFlags &update_flags; + const unsigned int padding_length; + }; + + + + template + ReinitHelper::ReinitHelper(MatrixFree *data, + const Mapping &mapping, + const FiniteElement &fe, + const Quadrature<1> &quad, + const ::dealii::internal::MatrixFreeFunctions::ShapeInfo &shape_info, + const UpdateFlags &update_flags) + : + data(data), + fe_degree(data->fe_degree), + dofs_per_cell(data->dofs_per_cell), + q_points_per_cell(data->q_points_per_cell), + fe_values(mapping, fe, Quadrature(quad), + update_inverse_jacobians | update_quadrature_points | + update_values | update_gradients | update_JxW_values), + lexicographic_inv(shape_info.lexicographic_numbering), + update_flags(update_flags), + padding_length(data->get_padding_length()) + { + local_dof_indices.resize(data->dofs_per_cell); + lexicographic_dof_indices.resize(dofs_per_cell); + } + + + + template + void ReinitHelper::setup_color_arrays(const unsigned int n_colors) + { + data->n_cells.resize(n_colors); + data->grid_dim.resize(n_colors); + data->block_dim.resize(n_colors); + data->local_to_global.resize(n_colors); + data->constraint_mask.resize(n_colors); + + data->row_start.resize(n_colors); + + if (update_flags & update_quadrature_points) + data->q_points.resize(n_colors); + + if (update_flags & update_JxW_values) + data->JxW.resize(n_colors); + + if (update_flags & update_gradients) + data->inv_jacobian.resize(n_colors); + } + + + + template + void ReinitHelper::setup_cell_arrays(const unsigned int color) + { + const unsigned int n_cells = data->n_cells[color]; + const unsigned int cells_per_block = data->cells_per_block; + + // Setup kernel parameters + const double apply_n_blocks = std::ceil(static_cast(n_cells)/ + static_cast(cells_per_block)); + const unsigned int apply_x_n_blocks = std::round(std::sqrt(apply_n_blocks)); + const unsigned int apply_y_n_blocks = std::ceil(apply_n_blocks/ + static_cast(apply_x_n_blocks)); + + data->grid_dim[color] = dim3(apply_x_n_blocks, apply_y_n_blocks); + + // TODO this should be a templated parameter. + const unsigned int n_dofs_1d = fe_degree+1; + + if (data->parallelization_scheme == MatrixFree::parallel_in_elem) + { + if (dim==1) + data->block_dim[color] = dim3(n_dofs_1d*cells_per_block); + else if (dim==2) + data->block_dim[color] = dim3(n_dofs_1d*cells_per_block, n_dofs_1d); + else + data->block_dim[color] = dim3(n_dofs_1d*cells_per_block, n_dofs_1d, n_dofs_1d); + } + else + data->block_dim[color] = dim3(cells_per_block); + + local_to_global_host.resize(n_cells*padding_length); + + if (update_flags & update_quadrature_points) + q_points_host.resize(n_cells*padding_length); + + if (update_flags & update_JxW_values) + JxW_host.resize(n_cells*padding_length); + + if (update_flags & update_gradients) + inv_jacobian_host.resize(n_cells*padding_length*dim*dim); + + constraint_mask_host.resize(n_cells); + } + + + + template + template + void ReinitHelper::get_cell_data(const CellFilter &cell, + const unsigned int cell_id) + { + cell->get_dof_indices(local_dof_indices); + + for (unsigned int i=0; i> &q_points = fe_values.get_quadrature_points(); + memcpy(&q_points_host[cell_id*padding_length], &q_points[0], + q_points_per_cell*sizeof(Point)); + } + + if (update_flags & update_JxW_values) + { + std::vector JxW_values_double = fe_values.get_JxW_values(); + const unsigned int offset = cell_id*padding_length; + for (unsigned int i=0; i(JxW_values_double[i]); + } + + if (update_flags & update_gradients) + { + const std::vector> &inv_jacobians = + fe_values.get_inverse_jacobians(); + memcpy(&inv_jacobian_host[cell_id*padding_length*dim*dim], &inv_jacobians[0], + q_points_per_cell*sizeof(DerivativeForm<1,dim,dim>)); + } + } + + + + template + void ReinitHelper::alloc_and_copy_arrays(const unsigned int color) + { + const unsigned int n_cells = data->n_cells[color]; + + // Local-to-global mapping + if (data->parallelization_scheme == MatrixFree::parallel_over_elem) + internal::transpose_in_place(local_to_global_host, n_cells, padding_length); + + alloc_and_copy(&data->local_to_global[color], local_to_global_host, + n_cells * padding_length); + + // Quadrature points + if (update_flags & update_quadrature_points) + { + if (data->parallelization_scheme == MatrixFree::parallel_over_elem) + internal::transpose_in_place(q_points_host, n_cells, padding_length); + + alloc_and_copy(&data->q_points[color], q_points_host, + n_cells*padding_length); + } + + // Jacobian determinants/quadrature weights + if (update_flags & update_JxW_values) + { + if (data->parallelization_scheme == MatrixFree::parallel_over_elem) + internal::transpose_in_place(JxW_host, n_cells, padding_length); + + alloc_and_copy(&data->JxW[color], JxW_host, n_cells*padding_length); + } + + // Inverse jacobians + if (update_flags & update_gradients) + { + // Reorder so that all J_11 elements are together, all J_12 elements are + // together, etc., i.e., reorder indices from + // cell_id*q_points_per_cell*dim*dim + q*dim*dim +i to + // i*q_points_per_cell*n_cells + cell_id*q_points_per_cell+q + internal::transpose_in_place(inv_jacobian_host, padding_length*n_cells, dim*dim); + + // Transpose second time means we get the following index order: + // q*n_cells*dim*dim + i*n_cells + cell_id which is good for an + // element-level parallelization + if (data->parallelization_scheme == MatrixFree::parallel_over_elem) + internal::transpose_in_place(inv_jacobian_host, n_cells*dim*dim, padding_length); + + alloc_and_copy(&data->inv_jacobian[color], inv_jacobian_host, + n_cells*dim*dim*padding_length); + } + + alloc_and_copy(&data->constraint_mask[color], constraint_mask_host, n_cells); + } + + + + template + std::vector get_conflict_indices( + const FilteredIterator::active_cell_iterator> &cell, + const ConstraintMatrix &constraints) + { + std::vector local_dof_indices( + cell->get_fe().dofs_per_cell); + cell->get_dof_indices(local_dof_indices); + constraints.resolve_indices(local_dof_indices); + + return local_dof_indices; + } + + + + template + __global__ void copy_constrained_dofs( + const dealii::types::global_dof_index *constrained_dofs, + const unsigned int n_constrained_dofs, + const Number *src, + Number *dst) + { + const unsigned int dof = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * + blockIdx.y); + if (dof + __global__ void set_constrained_dofs( + const dealii::types::global_dof_index *constrained_dofs, + const unsigned int n_constrained_dofs, + Number val, + Number *dst) + { + const unsigned int dof = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * + blockIdx.y); + if (dof < n_constrained_dofs) + dst[constrained_dofs[dof]] = val; + } + + + + template + __global__ void apply_kernel_shmem(const functor &func, + const typename MatrixFree::Data gpu_data, + const Number *src, + Number *dst) + { + const unsigned int cells_per_block = cells_per_block_shmem( + dim, functor::n_dofs_1d-1); + + // TODO make use of dynamically allocated shared memory + __shared__ Number values[cells_per_block*functor::n_local_dofs]; + __shared__ Number gradients[dim][cells_per_block*functor::n_q_points]; + + const unsigned int local_cell = threadIdx.x / functor::n_dofs_1d; + const unsigned int cell = local_cell + cells_per_block * + (blockIdx.x+gridDim.x*blockIdx.y); + + Number *gq[dim]; + for (int d=0; d shared_data( + &values[local_cell*functor::n_local_dofs], gq); + + if (cell < gpu_data.n_cells) + func(cell, &gpu_data, &shared_data, src, dst); + } + } + + + + template + MatrixFree::MatrixFree() + : + constrained_dofs(nullptr), + padding_length(0) + {} + + + + template + void MatrixFree::reinit(const Mapping &mapping, + const DoFHandler &dof_handler, + const ConstraintMatrix &constraints, + const Quadrature<1> &quad, + const AdditionalData additional_data) + { + if (typeid(Number) == typeid(double)) + cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); + + const UpdateFlags &update_flags = additional_data.mapping_update_flags; + + if (additional_data.parallelization_scheme != parallel_over_elem && + additional_data.parallelization_scheme != parallel_in_elem) + AssertThrow(false, ExcMessage("Invalid parallelization scheme.")); + + this->parallelization_scheme = additional_data.parallelization_scheme; + + //TODO: only free if we actually need arrays of different length + free(); + + const FiniteElement &fe = dof_handler.get_fe(); + + fe_degree = fe.degree; + //TODO this should be a templated parameter + const unsigned int n_dofs_1d = fe_degree+1; + const unsigned int n_q_points_1d = quad.size(); + + Assert(n_dofs_1d == n_q_points_1d, + ExcMessage("n_q_points_1d must be equal to fe_degree+1.")); + + // Set padding length to the closest power of two larger than or equal to the + // number of threads. + padding_length = 1 << static_cast(std::ceil(dim*std::log2(fe_degree+1.))); + + dofs_per_cell = fe.dofs_per_cell; + q_points_per_cell = std::pow(n_q_points_1d, dim); + + const ::dealii::internal::MatrixFreeFunctions::ShapeInfo shape_info(quad, fe); + + unsigned int size_shape_values = n_dofs_1d*n_q_points_1d*sizeof(Number); + + cudaError_t cuda_error = cudaMemcpyToSymbol(internal::global_shape_values, + &shape_info.shape_values_number[0], + size_shape_values, + 0, + cudaMemcpyHostToDevice); + AssertCuda(cuda_error); + + if (update_flags & update_gradients) + { + cuda_error = cudaMemcpyToSymbol(internal::global_shape_gradients, + &shape_info.shape_gradient_number[0], + size_shape_values, + 0, + cudaMemcpyHostToDevice); + AssertCuda(cuda_error); + } + + // Setup the number of cells per CUDA thread block + cells_per_block = cells_per_block_shmem(dim, fe_degree); + + internal::ReinitHelper helper(this, mapping, fe, quad, + shape_info, update_flags); + + // Create a graph coloring + typedef FilteredIterator::active_cell_iterator> CellFilter; + CellFilter begin(IteratorFilters::LocallyOwnedCell(), dof_handler.begin_active()); + CellFilter end(IteratorFilters::LocallyOwnedCell(), dof_handler.end()); + typedef std::function (CellFilter const &)> fun_type; + const fun_type &fun = static_cast(std::bind( + &internal::get_conflict_indices, + std::placeholders::_1, + constraints)); + + std::vector> graph = + GraphColoring::make_graph_coloring( + begin, end, fun); + n_colors = graph.size(); + + helper.setup_color_arrays(n_colors); + for (unsigned int i=0; i::iterator cell = graph[i].begin(), + end_cell = graph[i].end(); + for (unsigned int cell_id=0; cell != end_cell; ++cell, ++cell_id) + helper.get_cell_data(*cell, cell_id); + + helper.alloc_and_copy_arrays(i); + } + + // Setup row starts + row_start[0] = 0; + for (unsigned int i=0; i(n_constrained_dofs) / + static_cast(BLOCK_SIZE)); + const unsigned int constraint_x_n_blocks = std::round(std::sqrt(constraint_n_blocks)); + const unsigned int constraint_y_n_blocks = std::ceil(static_cast(constraint_n_blocks) / + static_cast(constraint_x_n_blocks)); + + constraint_grid_dim = dim3(constraint_x_n_blocks, constraint_y_n_blocks); + constraint_block_dim = dim3(BLOCK_SIZE); + + std::vector constrained_dofs_host(n_constrained_dofs); + + unsigned int i_constraint = 0; + const unsigned int n_dofs = dof_handler.n_dofs(); + for (unsigned int i=0; i + MatrixFree::Data + MatrixFree::get_data(unsigned int color) const + { + Data data_copy; + data_copy.q_points = q_points[color]; + data_copy.local_to_global = local_to_global[color]; + data_copy.inv_jacobian = inv_jacobian[color]; + data_copy.JxW = JxW[color]; + data_copy.constraint_mask = constraint_mask[color]; + data_copy.n_cells = n_cells[color]; + data_copy.padding_length = padding_length; + data_copy.row_start = row_start[color]; + + return data_copy; + } + + + + template + void MatrixFree::free() + { + for (unsigned int i=0; i < q_points.size(); ++i) + { + if (q_points[i] != nullptr) + { + cudaError_t cuda_error = cudaFree(q_points[i]); + AssertCuda(cuda_error); + q_points[i] = nullptr; + } + } + + for (unsigned int i=0; i < local_to_global.size(); ++i) + { + if (local_to_global[i] != nullptr) + { + cudaError_t cuda_error = cudaFree(local_to_global[i]); + AssertCuda(cuda_error); + local_to_global[i] = nullptr; + } + } + + for (unsigned int i=0; i < inv_jacobian.size(); ++i) + { + if (inv_jacobian[i] != nullptr) + { + cudaError_t cuda_error = cudaFree(inv_jacobian[i]); + AssertCuda(cuda_error); + inv_jacobian[i] = nullptr; + } + } + + for (unsigned int i=0; i < JxW.size(); ++i) + { + if (JxW[i] != nullptr) + { + cudaError_t cuda_error = cudaFree(JxW[i]); + AssertCuda(cuda_error); + JxW[i] = nullptr; + } + } + + for (unsigned int i=0; i < constraint_mask.size(); ++i) + { + if (constraint_mask[i] != nullptr) + { + cudaError_t cuda_error = cudaFree(constraint_mask[i]); + AssertCuda(cuda_error); + constraint_mask[i] = nullptr; + } + } + + + q_points.clear(); + local_to_global.clear(); + inv_jacobian.clear(); + JxW.clear(); + constraint_mask.clear(); + + if (constrained_dofs != nullptr) + { + cudaError_t cuda_error = cudaFree(constrained_dofs); + AssertCuda(cuda_error); + constrained_dofs = nullptr; + } + } + + + + template + void MatrixFree::copy_constrained_values(const CUDAVector &src, + CUDAVector &dst) const + { + internal::copy_constrained_dofs <<>> ( + constrained_dofs, n_constrained_dofs, src.get_values(), dst.get_values()); + } + + + + template + void MatrixFree::set_constrained_values(Number val, + CUDAVector &dst) const + { + internal::set_constrained_dofs <<>>( + constrained_dofs, n_constrained_dofs, val, dst.get_values()); + } + + + + template + unsigned int MatrixFree::get_padding_length() const + { + return padding_length; + } + + + + template + template + void MatrixFree::cell_loop(const functor &func, + const CUDAVector &src, + CUDAVector &dst) const + { + for (unsigned int i=0; i < n_colors; ++i) + internal::apply_kernel_shmem <<>> ( + func, get_data(i), src.get_values(), dst.get_values()); + } + + + + template + std::size_t MatrixFree::memory_consumption() const + { + // First compute the size of n_cells, row_starts, kernel launch parameters, + // and constrained_dofs + std::size_t bytes = n_cells.size()*sizeof(unsigned int)*2 + + 2*n_colors*sizeof(dim3) + n_constrained_dofs*sizeof(unsigned int); + + // For each color, add local_to_global, inv_jacobian, JxW, and q_points. + for (unsigned int i=0; i + + +DEAL_II_NAMESPACE_OPEN + + +namespace CUDAWrappers +{ + namespace internal + { + /** + * In this namespace, the evaluator routines that evaluate the tensor + * products are implemented. + * + * @ingroup CUDAWrappers + */ + // TODO: for now only the general variant is implemented + enum EvaluatorVariant + { + evaluate_general, + evaluate_symmetric, + evaluate_evenodd + }; + + + + /** + * Generic evaluator framework. + * + * @ingroup CUDAWrappers + */ + template + struct EvaluatorTensorProduct + {}; + + + + /** + * Internal evaluator for 1d-3d shape function using the tensor product form + * of the basis functions. + * + * @ingroup CUDAWrappers + */ + template + struct EvaluatorTensorProduct + { + static const unsigned int dofs_per_cell = + Utilities::fixed_int_power::value; + static const unsigned int n_q_points = + Utilities::fixed_int_power::value; + + __device__ EvaluatorTensorProduct(); + + /** + * Evaluate the values of a finite element function at the quadrature + * points. + */ + template + __device__ void values(const Number *in, Number *out) const; + + /** + * Evaluate the gradient of a finite element function at the quadrature + * points for a given @p direction. + */ + template + __device__ void gradients(const Number *in, Number *out) const; + + /** + * Helper function for values() and gradients(). + */ + template + __device__ void apply(Number shape_data[], + const Number *in, + Number *out) const; + + /** + * Evaluate the finite element function at the quadrature points. + */ + __device__ void value_at_quad_pts(Number *u); + + /** + * Helper function for integrate(). Integrate the finite element function. + */ + __device__ void integrate_value(Number *u); + + /** + * Evaluate the gradients of the finite element function at the quadrature + * points. + */ + __device__ void gradient_at_quad_pts(const Number *const u, + Number *grad_u[dim]); + + /** + * Helper function for integrate(). Integrate the gradients of the finite + * element function. + */ + template + __device__ void integrate_gradient(Number *u, + Number *grad_u[dim]); + }; + + + + template + __device__ EvaluatorTensorProduct::EvaluatorTensorProduct() + {} + + + + template + template + __device__ void EvaluatorTensorProduct::values(const Number *in, + Number *out) const + { + apply(global_shape_values, in, out); + } + + + + template + template + __device__ void EvaluatorTensorProduct::gradients(const Number *in, + Number *out) const + { + apply(global_shape_gradients, in, out); + } + + + + template + template + __device__ void EvaluatorTensorProduct::apply(Number shape_data[], + const Number *in, + Number *out) const + { + const unsigned int i = (dim == 1) ? 0 : threadIdx.x%n_q_points_1d; + const unsigned int j = (dim == 3) ? threadIdx.y : 0; + const unsigned int q = + (dim == 1) ? (threadIdx.x%n_q_points_1d) : + (dim == 2) ? threadIdx.y : + threadIdx.z; + + // This loop simply multiply the shape function at the quadrature point by + // the value finite element coefficient. + Number t = 0; + for (int k=0; k + inline + __device__ void EvaluatorTensorProduct::value_at_quad_pts(Number *u) + { + switch (dim) + { + case 1: + { + values<0, true, false, true>(u, u); + + break; + } + case 2: + { + values<0, true, false, true>(u, u); + __syncthreads(); + values<1, true, false, true>(u, u); + + break; + } + case 3: + { + values<0, true, false, true>(u, u); + __syncthreads(); + values<1, true, false, true>(u, u); + __syncthreads(); + values<2, true, false, true>(u, u); + + break; + } + default: + { + // Do nothing. We should throw but we can't from a __device__ function. + } + } + } + + + + template + inline + __device__ void EvaluatorTensorProduct::integrate_value(Number *u) + { + switch (dim) + { + case 1: + { + values<0, false, false, true> (u,u); + + break; + } + case 2: + { + values<0, false, false, true> (u,u); + __syncthreads(); + values<1, false, false, true> (u,u); + + break; + } + case 3: + { + values<0, false, false, true> (u,u); + __syncthreads(); + values<1, false, false, true> (u,u); + __syncthreads(); + values<2, false, false, true> (u,u); + + break; + } + default: + { + // Do nothing. We should throw but we can't from a __device__ function. + } + } + } + + + + template + inline + __device__ void EvaluatorTensorProduct::gradient_at_quad_pts( + const Number *const u, + Number *grad_u[dim]) + { + switch (dim) + { + case 1: + { + gradients<0, true, false, false>(u, grad_u[0]); + + break; + } + case 2: + { + gradients<0, true, false, false>(u, grad_u[0]); + values<0, true, false, false>(u, grad_u[1]); + + __syncthreads(); + + values<1, true, false, true>(grad_u[0], grad_u[0]); + gradients<1, true, false, true>(grad_u[1], grad_u[1]); + + break; + } + case 3: + { + gradients<0, true, false, false>(u, grad_u[0]); + values<0, true, false, false>(u, grad_u[1]); + values<0, true, false, false>(u, grad_u[2]); + + __syncthreads(); + + values<1, true, false, true>(grad_u[0], grad_u[0]); + gradients<1, true, false, true>(grad_u[1], grad_u[1]); + values<1, true, false, true>(grad_u[2], grad_u[2]); + + __syncthreads(); + + values<2, true, false, true>(grad_u[0], grad_u[0]); + values<2, true, false, true>(grad_u[1], grad_u[1]); + gradients<2, true, false, true>(grad_u[2], grad_u[2]); + + break; + } + default: + { + // Do nothing. We should throw but we can't from a __device__ function. + } + } + } + + + + template + template + inline + __device__ void EvaluatorTensorProduct::integrate_gradient( + Number *u, + Number *grad_u[dim]) + { + switch (dim) + { + case 1: + { + gradients<0, false, add, false> (grad_u[dim], u); + + break; + } + case 2: + { + gradients<0, false, false, true> (grad_u[0], grad_u[0]); + values<0, false, false, true> (grad_u[1], grad_u[1]); + + __syncthreads(); + + values<1, false, add, false> (grad_u[0], u); + __syncthreads(); + gradients<1, false, true, false> (grad_u[1], u); + + break; + } + case 3: + { + gradients<0, false, false, true> (grad_u[0], grad_u[0]); + values<0, false, false, true> (grad_u[1], grad_u[1]); + values<0, false, false, true> (grad_u[2], grad_u[2]); + + __syncthreads(); + + values<1, false, false, true> (grad_u[0], grad_u[0]); + gradients<1, false, false, true> (grad_u[1], grad_u[1]); + values<1, false, false, true> (grad_u[2], grad_u[2]); + + __syncthreads(); + + values<2, false, add, false> (grad_u[0], u); + __syncthreads(); + values<2, false, true, false> (grad_u[1], u); + __syncthreads(); + gradients<2, false, true, false> (grad_u[2], u); + + break; + } + default: + { + // Do nothing. We should throw but we can't from a __device__ function. + } + } + } + } +} + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/source/matrix_free/cuda_matrix_free.cu b/source/matrix_free/cuda_matrix_free.cu new file mode 100644 index 0000000000..7c18c07735 --- /dev/null +++ b/source/matrix_free/cuda_matrix_free.cu @@ -0,0 +1,29 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2016 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#include + +#ifdef DEAL_II_WITH_CUDA + +DEAL_II_NAMESPACE_OPEN + +namespace CUDAWrappers +{ +#include "cuda_matrix_free.inst" +} + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/source/matrix_free/cuda_matrix_free.inst.in b/source/matrix_free/cuda_matrix_free.inst.in new file mode 100644 index 0000000000..f0f2ce93a5 --- /dev/null +++ b/source/matrix_free/cuda_matrix_free.inst.in @@ -0,0 +1,21 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2016 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +for (deal_II_dimension : DIMENSIONS) +{ + template class MatrixFree; + template class MatrixFree; +}