]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Add CUDA support of matrix-free.
authorBruno Turcksin <bruno.turcksin@gmail.com>
Sun, 14 May 2017 03:04:03 +0000 (23:04 -0400)
committerBruno Turcksin <bruno.turcksin@gmail.com>
Mon, 15 May 2017 21:18:06 +0000 (17:18 -0400)
include/deal.II/matrix_free/cuda_fe_evaluation.cuh [new file with mode: 0644]
include/deal.II/matrix_free/cuda_matrix_free.h [new file with mode: 0644]
include/deal.II/matrix_free/cuda_matrix_free.templates.h [new file with mode: 0644]
include/deal.II/matrix_free/cuda_tensor_product_kernels.cuh [new file with mode: 0644]
source/matrix_free/cuda_matrix_free.cu [new file with mode: 0644]
source/matrix_free/cuda_matrix_free.inst.in [new file with mode: 0644]

diff --git a/include/deal.II/matrix_free/cuda_fe_evaluation.cuh b/include/deal.II/matrix_free/cuda_fe_evaluation.cuh
new file mode 100644 (file)
index 0000000..1944e7e
--- /dev/null
@@ -0,0 +1,391 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii__cuda_fe_evaluation_cuh
+#define dealii__cuda_fe_evaluation_cuh
+
+#include <deal.II/base/tensor.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/lac/cuda_vector.h>
+#include <deal.II/matrix_free/cuda_matrix_free.h>
+#include <deal.II/matrix_free/cuda_matrix_free.templates.h>
+#include <deal.II/matrix_free/cuda_tensor_product_kernels.cuh>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace CUDAWrappers
+{
+  namespace internal
+  {
+    template <int dim, int fe_degree, bool transpose, typename Number>
+    __device__ void resolve_hanging_nodes_shmem(Number *values, const unsigned
+                                                int constr)
+    {
+      //TODO
+    }
+  }
+
+
+
+  /**
+   * This class provides all the functions necessary to evaluate functions at
+   * quadrature points and cell integrations. In functionality, this class is
+   * similar to FEValues<dim>.
+   *
+   * This class class has five template arguments:
+   *
+   * @ptaram dim Dimension in which this class is to be used
+   *
+   * @tparam fe_degree Degree of the tensor prodict finite element with fe_degree+1
+   * degrees of freedom per coordinate direction
+   *
+   * @tparam n_q_points_1d Number of points in the quadrature formular in 1D,
+   * defaults to fe_degree+1
+   *
+   * @tparam n_components Number of vector components when solving a system of
+   * PDEs. If the same operation is applied to several components of a PDE (e.g.
+   * a vector Laplace equation), they can be applied simultaneously with one call
+   * (and often more efficiently). Defaults to 1
+   *
+   * @tparam Number Number format, usually @p double or @p float. Defaults to @p
+   * double
+   *
+   * @ingroup CUDAWrappers
+   *
+   * @author Karl Ljungkvist, Bruno Turcksin, 2016
+   */
+  template <int dim, int fe_degree, int n_q_points_1d = fe_degree+1,
+            int n_components_ = 1, typename Number = double>
+  class FEEvaluation
+  {
+  public:
+    typedef Number                                  value_type;
+    typedef Tensor<1,dim,Number>                    gradient_type;
+    typedef typename MatrixFree<dim, Number>::Data  data_type;
+    static const unsigned int dimension    =        dim;
+    static const unsigned int n_components =        n_components_;
+    static const unsigned int n_q_points   =
+      Utilities::fixed_int_power<n_q_points_1d,dim>::value;
+    static const unsigned int tensor_dofs_per_cell =
+      Utilities::fixed_int_power<fe_degree+1,dim>::value;
+
+    /**
+     * Constructor.
+     */
+    __device__ FEEvaluation(int cell_id,
+                            const data_type *data,
+                            SharedData<dim,Number> *shdata);
+
+    /**
+     * For the vector @p src, read out the values on the degrees of freedom of
+     * the current cell, and store them internally. Similar functionality as
+     * the function DoFAccessor::get_interpolated_dof_values when no
+     * constraints are present, but it also includes constraints from hanging
+     * nodes, so once can see it as a similar function to
+     * ConstraintMatrix::read_dof_valuess as well.
+     */
+    __device__ void read_dof_values(const Number *src);
+
+    /**
+     * Take the value stored internally on dof values of the current cell and
+     * sum them into the vector @p dst. The function also applies constraints
+     * during the write operation. The functionality is hence similar to the
+     * function ConstraintMatrix::distribute_local_to_global.
+     */
+    __device__  void distribute_local_to_global(Number *dst) const;
+
+    /**
+     * Evaluate the function values and the gradients of the FE function given
+     * at the DoF values in the input vector at the quadrature points on the
+     * unit cell. The function arguments specify which parts shall actually be
+     * computed. This function needs to be called before the functions
+     * @p get_value() or @p get_gradient() give useful information.
+     */
+    __device__ void evaluate(const bool evaluate_val,
+                             const bool evaluate_grad);
+
+    /**
+     * This function takes the values and/or gradients that are stored on
+     * quadrature points, tests them by all the basis functions/gradients on
+     * the cell and performs the cell integration. The two function arguments
+     * @p integrate_val and @p integrate_grad are used to enable/disable some
+     * of the values or the gradients.
+     */
+    __device__ void integrate(const bool integrate_val,
+                              const bool integrate_grad);
+
+    /**
+     * Return the value of a finite element function at quadrature point
+     * number @p q_point after a call to @p evalue(true,...).
+     */
+    __device__ value_type get_value(const unsigned int q_point) const;
+
+    /**
+     * Write a value to the field containing the values on quadrature points
+     * with component @p q_point. Access to the same fiels as through @p
+     * get_value(), This specifies the value which is tested by all basis
+     * function on the current cell and integrated over.
+     */
+    __device__ void submit_value(const value_type &val_in,
+                                 const unsigned int q_point);
+
+    /**
+     * Return the gradient of a finite element function at quadrature point
+     * number @p q_point after a call to @p evaluate(...,true).
+     */
+    __device__ gradient_type get_gradient(const unsigned int q_point) const;
+
+    /**
+     * Write a contribution that is tested by the gradient to the field
+     * containing the values on quadrature points with component @p q_point
+     */
+    __device__ void submit_gradient(const gradient_type &grad_in,
+                                    const unsigned int q_point);
+
+    /**
+     * Apply the function @p func on every quadrature point.
+     */
+    template <typename functor>
+    __device__ void apply_quad_point_operations(const functor &func);
+
+  private:
+    unsigned int *local_to_global;
+    unsigned int n_cells;
+    unsigned int padding_length;
+
+    const unsigned int constraint_mask;
+
+    Number *inv_jac;
+    Number *JxW;
+
+    // Internal buffer
+    Number *values;
+    Number *gradients[dim];
+  };
+
+
+
+  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
+            typename Number>
+  __device__
+  FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+  FEEvaluation(int cell_id,
+               const data_type *data,
+               SharedData<dim,Number> *shdata)
+    :
+    n_cells(data->n_cells),
+    padding_length(data->padding_length),
+    constraint_mask(data->constraint_mask[cell_id]),
+    values(shdata->values)
+  {
+    local_to_global = data->local_to_global + padding_length*cell_id;
+    inv_jac = data->inv_jacobian + padding_length*cell_id;
+    JxW = data->JxW + padding_length*cell_id;
+
+    for (unsigned int i=0; i < dim; ++i)
+      gradients[i] = shdata->gradients[i];
+  }
+
+
+
+  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
+            typename Number>
+  __device__ void
+  FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+  read_dof_values(const Number *src)
+  {
+    static_assert(n_components_ == 1, "This function only supports FE with one \
+                  components");
+    const unsigned int idx = (threadIdx.x%n_q_points_1d)
+                             +(dim>1 ? threadIdx.y : 0)*n_q_points_1d
+                             +(dim>2 ? threadIdx.z : 0)*n_q_points_1d*n_q_points_1d;
+
+    const unsigned int src_idx = local_to_global[idx];
+    // Use the read-only data cache.
+    values[idx] = __ldg(&src[src_idx]);
+
+    if (constraint_mask)
+      internal::resolve_hanging_nodes_shmem<dim,fe_degree,false>(values,
+                                                                 constraint_mask);
+
+    __syncthreads();
+  }
+
+
+
+  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
+            typename Number>
+  __device__ void
+  FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+  distribute_local_to_global(Number *dst) const
+  {
+    static_assert(n_components_ == 1, "This function only supports FE with one \
+                  components");
+    if (constraint_mask)
+      internal::resolve_hanging_nodes_shmem<dim,fe_degree,true>(values,
+                                                                constraint_mask);
+
+
+    const unsigned int idx = (threadIdx.x%n_q_points_1d)
+                             + (dim>1 ? threadIdx.y : 0) * n_q_points_1d
+                             + (dim>2 ? threadIdx.z : 0) * n_q_points_1d * n_q_points_1d;
+    const unsigned int destination_idx = local_to_global[idx];
+
+    dst[destination_idx] += values[idx];
+  }
+
+
+
+  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
+            typename Number>
+  __device__ void
+  FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+  evaluate(const bool evaluate_val, const bool evaluate_grad)
+  {
+    // First evaluate the gradients because it requires values that will be
+    // changed if evaluate_val is true
+    internal::EvaluatorTensorProduct<internal::EvaluatorVariant::evaluate_general,
+             dim, fe_degree,n_q_points_1d, Number> evaluator_tensor_product;
+    if (evaluate_grad == true)
+      {
+        evaluator_tensor_product.gradient_at_quad_pts(values, gradients);
+        __syncthreads();
+      }
+
+    if (evaluate_val == true)
+      {
+        evaluator_tensor_product.value_at_quad_pts(values);
+        __syncthreads();
+      }
+  }
+
+
+
+  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
+            typename Number>
+  __device__ void
+  FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+  integrate(const bool integrate_val, const bool integrate_grad)
+  {
+    internal::EvaluatorTensorProduct<internal::EvaluatorVariant::evaluate_general,
+             dim, fe_degree,n_q_points_1d, Number> evaluator_tensor_product;
+    if (integrate_val == true)
+      {
+        evaluator_tensor_product.integrate_value(values);
+        __syncthreads();
+        if (integrate_grad == true)
+          {
+            evaluator_tensor_product.integrate_gradient<true>(values, gradients);
+            __syncthreads();
+          }
+      }
+    else if (integrate_grad == true)
+      {
+        evaluator_tensor_product.integrate_gradient<false>(values, gradients);
+        __syncthreads();
+      }
+  }
+
+
+
+  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
+            typename Number>
+  __device__
+  typename FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::value_type
+  FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+  get_value(const unsigned int q_point) const
+  {
+    return values[q_point];
+  }
+
+
+
+  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
+            typename Number>
+  __device__ void
+  FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+  submit_value(const value_type &val_in, const unsigned int q_point)
+  {
+    values[q_point] = val_in * JxW[q_point];
+  }
+
+
+
+  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
+            typename Number>
+  __device__
+  typename FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::gradient_type
+  FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+  get_gradient(const unsigned int q_point) const
+  {
+    static_assert(n_components_ == 1, "This function only supports FE with one \
+                  components");
+    // TODO optimize if the mesh is uniform
+    const Number *inv_jacobian = &inv_jac[q_point];
+    gradient_type grad;
+    for (int d_1=0; d_1<dim; ++d_1)
+      {
+        Number tmp = 0.;
+        for (int d_2=0; d_2<dim; ++d_2)
+          tmp += inv_jacobian[padding_length*n_cells*(dim*d_2+d_1)] *
+                 gradients[d_2][q_point];
+        grad[d_1] = tmp;
+      }
+
+    return grad;
+  }
+
+
+
+  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
+            typename Number>
+  __device__ void
+  FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+  submit_gradient(const gradient_type &grad_in, const unsigned int q_point)
+  {
+    // TODO optimize if the mesh is uniform
+    const Number *inv_jacobian = &inv_jac[q_point];
+    for (int d_1=0; d_1<dim; ++d_1)
+      {
+        Number tmp = 0.;
+        for (int d_2=0; d_2<dim; ++d_2)
+          tmp += inv_jacobian[n_cells*padding_length*(dim*d_1+d_2)] *
+                 grad_in[d_2];
+        gradients[d_1][q_point] = tmp * JxW[q_point];
+      }
+  }
+
+
+
+  template <int dim, int fe_degree, int n_q_points_1d, int n_components_,
+            typename Number>
+  template <typename functor>
+  __device__ void
+  FEEvaluation<dim, fe_degree, n_q_points_1d, n_components_, Number>::
+  apply_quad_point_operations(const functor &func)
+  {
+    const unsigned int q_point = (dim == 1 ? threadIdx.x%n_q_points_1d :
+                                  dim == 2 ? threadIdx.x%n_q_points_1d + n_q_points_1d *threadIdx.y :
+                                  threadIdx.x%n_q_points_1d + n_q_points_1d * (threadIdx.y +
+                                      n_q_points_1d*threadIdx.z));
+    func(this, q_point);
+
+    __syncthreads();
+  }
+}
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
diff --git a/include/deal.II/matrix_free/cuda_matrix_free.h b/include/deal.II/matrix_free/cuda_matrix_free.h
new file mode 100644 (file)
index 0000000..fa7ad08
--- /dev/null
@@ -0,0 +1,312 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+#ifndef dealii__cuda_matrix_free_h
+#define dealii__cuda_matrix_free_h
+
+#include <deal.II/base/config.h>
+
+#ifdef DEAL_II_WITH_CUDA
+
+#include <deal.II/base/quadrature.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/fe/mapping.h>
+#include <deal.II/fe/mapping_q1.h>
+#include <deal.II/fe/fe_update_flags.h>
+#include <deal.II/lac/constraint_matrix.h>
+#include <deal.II/lac/cuda_vector.h>
+#include <cuda_runtime_api.h>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace CUDAWrappers
+{
+  // forward declaration
+  namespace internal
+  {
+    template <int dim, typename Number>
+    class ReinitHelper;
+  }
+
+  /**
+   * This class collects all the data that is stored for the matrix free
+   * implementation. The storage scheme is tailored towards several loops
+   * performed with the same data, i.e., typically doing many matrix-vector
+   * products or residual computations on the same mesh.
+   *
+   * This class does not implement any operations involving finite element basis
+   * functions, i.e., regarding the operation performed on the cells. For these
+   * operations, the class FEEvaluation is designed to use the data collected in
+   * this class.
+   *
+   * This class implements a loop over all cells (cell_loop()). This loop is
+   * scheduled in such a way that cells that cells that share degrees of freedom
+   * are not worked on simultaneously, which implies that it is possible to
+   * write to vectors in parallel without having to explicitly synchronize
+   * access to these vectors and matrices. This class does not implement any
+   * shape values, all it does is to cache the respective data. To implement
+   * finite element operations, use the class CUDAWrappers::FEEvalutation.
+   *
+   * This class traverse the cells in a different order than the usual
+   * Triangulation class in deal.II.
+   *
+   * @ingroup CUDAWrappers
+   */
+  template <int dim, typename Number=double>
+  class MatrixFree : public Subscriptor
+  {
+  public:
+    typedef Tensor<2, dim, Tensor<1,dim,Number>> jacobian_type;
+    // TODO this should really be a CUDAWrappers::Point
+    typedef Tensor<1, dim, Number> point_type;
+
+    // Use Number2 so we don't hide the template parameter Number
+    template <typename Number2>
+    using CUDAVector = ::dealii::LinearAlgebra::CUDAWrappers::Vector<Number2>;
+
+    /**
+     * Parallelization scheme used: parallel_in_elem (parallelism at the level
+     * of degrees of freedom) or parallel_over_elem (parallelism at the level of
+     * cells)
+     */
+    enum ParallelizationScheme {parallel_in_elem, parallel_over_elem};
+
+    struct AdditionalData
+    {
+      AdditionalData (
+        const ParallelizationScheme parallelization_scheme = parallel_in_elem,
+        const UpdateFlags mapping_update_flags = update_gradients | update_JxW_values)
+        :
+        parallelization_scheme(parallelization_scheme),
+        mapping_update_flags(mapping_update_flags)
+      {}
+
+      /**
+       * Number of colors created by the graph coloring algorithm.
+       */
+      unsigned int n_colors;
+      /**
+       * Parallelization scheme used, parallization over degrees of freedom or
+       * over cells.
+       */
+      ParallelizationScheme parallelization_scheme;
+      /**
+       * This flag is used to determine which quantities should be cached. This
+       * class can cache data needed for gradient computations (inverse
+       * Jacobians), Jacobian determinants (JxW), quadrature points as well as
+       * data for Hessians (derivative of Jacobians). By default, only data for
+       * gradients and Jacobian determinants times quadrature weights, JxW, are
+       * cached. If quadrature points of second derivatives are needed, they
+       * must be specified by this field.
+       */
+      UpdateFlags mapping_update_flags;
+    };
+
+    /**
+     * Structure which is passed to the kernel. It is used to pass all the
+     * necessary information from the CPU to the GPU.
+     */
+    struct Data
+    {
+      point_type *q_points;
+      unsigned int *local_to_global;
+      Number *inv_jacobian;
+      Number *JxW;
+      unsigned int n_cells;
+      unsigned int padding_length;
+      unsigned int row_start;
+      unsigned int *constraint_mask;
+    };
+
+    /**
+     * Default constructor.
+     */
+    MatrixFree();
+
+    unsigned int get_padding_length() const;
+
+    /**
+     * Extracts the information needed to perform loops over cells. The
+     * DoFHandler and ConstraintMatrix describe the layout of degrees of
+     * freedom, the DoFHandler and the mapping describe the transformation from
+     * unit to real cell, and the finite element underlying the DoFHandler
+     * together with the quadrature formula describe the local operations.
+     */
+    void reinit(const Mapping<dim> &mapping,
+                const DoFHandler<dim> &dof_handler,
+                const ConstraintMatrix &constraints,
+                const Quadrature<1> &quad,
+                const AdditionalData additional_data = AdditionalData());
+
+    /**
+     * Initializes the data structures. Same as above but using a Q1 mapping.
+     */
+    void reinit(const DoFHandler<dim> &dof_handler,
+                const ConstraintMatrix &constraints,
+                const Quadrature<1> &quad,
+                const AdditionalData AdditionalData = AdditionalData());
+
+    /**
+     * Return the Data structure associated with @p color.
+     */
+    Data get_data(unsigned int color) const;
+
+    /**
+     * This method runs the loop over all cells and apply the local operation on
+     * each element in parallel. @p func is a functor which is appplied on each color.
+     */
+    template <typename functor>
+    void cell_loop(const functor &func,
+                   const CUDAVector<Number> &src,
+                   CUDAVector<Number> &dst) const;
+
+    void copy_constrained_values(const CUDAVector<Number> &src,
+                                 CUDAVector<Number> &dst) const;
+
+    void set_constrained_values(const Number val, CUDAVector<Number> &dst) const;
+
+    /**
+     * Free all the memory allocated.
+     */
+    void free();
+
+    /**
+     * Return an approximation of the memory consumption of this class in bytes.
+     */
+    std::size_t memory_consumption() const;
+
+  private:
+    /**
+     * Parallelization scheme used, parallization over degrees of freedom or
+     * over cells.
+     */
+    ParallelizationScheme parallelization_scheme;
+    /**
+     * Degree of the finite element used.
+     */
+    unsigned int fe_degree;
+    /**
+     * Number of degrees of freedom per cell.
+     */
+    unsigned int dofs_per_cell;
+    /**
+     * Number of constrained degrees of freedom.
+     */
+    unsigned int n_constrained_dofs;
+    /**
+     * Number of quadrature points per cells.
+     */
+    unsigned int q_points_per_cell;
+    /**
+     * Number of colors produced by the graph coloring algorithm.
+     */
+    unsigned int n_colors;
+    /**
+     * Number of cells in each color.
+     */
+    std::vector<unsigned int> n_cells;
+    /**
+     * Vector of pointers to the quadrature points associated to the cells of
+     * each color.
+     */
+    std::vector<point_type *> q_points;
+    /**
+     * Map the position in the local vector to the position in the global
+     * vector.
+     */
+    std::vector<unsigned int *> local_to_global;
+    /**
+     * Vector of pointer to the inverse Jacobian associated to the cells of each
+     * color.
+     */
+    std::vector<Number *> inv_jacobian;
+    /**
+     * Vector of pointer to the Jacobian time the weights associated to the
+     * cells of each color.
+     */
+    std::vector<Number *> JxW;
+
+    // Constraints
+    unsigned int *constrained_dofs;
+    std::vector<unsigned int *> constraint_mask;
+    /**
+     * Grid dimensions associated to the different colors. The grid dimensions
+     * are used to launch the CUDA kernels.
+     */
+    std::vector<dim3> grid_dim;
+    /**
+     * Block dimensions associated to the different colors. The block dimensions
+     * are used to launch the CUDA kernels.
+     */
+    std::vector<dim3> block_dim;
+
+    // Parallelization parameter
+    unsigned int cells_per_block;
+    dim3 constraint_grid_dim;
+    dim3 constraint_block_dim;
+
+    unsigned int padding_length;
+    std::vector<unsigned int> row_start;
+
+    friend class internal::ReinitHelper<dim,Number>;
+  };
+
+
+
+  // TODO find a better place to put these things
+  // Structure to pass the shared memory into a general user function.
+  template <int dim, typename Number>
+  struct SharedData
+  {
+    __device__ SharedData(Number *vd,
+                          Number *gq[dim])
+      :
+      values(vd)
+    {
+      for (int d=0; d<dim; ++d)
+        gradients[d] = gq[d];
+    }
+
+    Number *values;
+    Number *gradients[dim];
+  };
+
+
+
+  // This function determines the number of cells per block, possibly at compile
+  // time
+  // TODO this function should be rewritten using meta-programming
+  __host__ __device__ constexpr unsigned int cells_per_block_shmem(int dim,
+      int fe_degree)
+  {
+    return dim==2 ? (fe_degree==1 ? 32 :
+                     fe_degree==2 ? 8 :
+                     fe_degree==3 ? 4 :
+                     fe_degree==4 ? 4 :
+                     1) :
+           dim==3 ? (fe_degree==1 ? 8 :
+                     fe_degree==2 ? 2 :
+                     1) : 1;
+  }
+}
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
+
+#endif
diff --git a/include/deal.II/matrix_free/cuda_matrix_free.templates.h b/include/deal.II/matrix_free/cuda_matrix_free.templates.h
new file mode 100644 (file)
index 0000000..8c93531
--- /dev/null
@@ -0,0 +1,712 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+#ifndef dealii__cuda_matrix_free_templates_h
+#define dealii__cuda_matrix_free_templates_h
+
+#include <deal.II/matrix_free/cuda_matrix_free.h>
+
+#ifdef DEAL_II_WITH_CUDA
+
+#include <deal.II/base/graph_coloring.h>
+#include <deal.II/fe/fe_values.h>
+#include <deal.II/grid/filtered_iterator.h>
+#include <deal.II/matrix_free/shape_info.h>
+#include <cuda_runtime_api.h>
+#include <functional>
+
+#define BLOCK_SIZE 128
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace CUDAWrappers
+{
+  namespace internal
+  {
+    // These variables are stored in the device constant memory.
+    // TODO: use a template parameter instead of a macro
+#define MAX_ELEM_DEGREE 10
+    __constant__ double global_shape_values[(MAX_ELEM_DEGREE+1) * (MAX_ELEM_DEGREE+1)];
+    __constant__ double global_shape_gradients[(MAX_ELEM_DEGREE+1) * (MAX_ELEM_DEGREE+1)];
+
+    template <typename Number>
+    using CUDAVector = ::dealii::LinearAlgebra::CUDAWrappers::Vector<Number>;
+
+    /**
+     * Transpose a N x M matrix stored in a one-dimensional array to a M x N
+     * matrix stored in a one-dimensional array.
+     */
+    template <typename Number>
+    void transpose(const unsigned int N, const unsigned M, const Number *src, Number *dst)
+    {
+      // src is N X M
+      // dst is M X N
+      for (unsigned int i=0; i<N; ++i)
+        for (unsigned int j=0; j<M; ++j)
+          dst[j*N+i] = src[i*M+j];
+    }
+
+
+
+    /**
+     * Same as above but the source and the destination are the same vector.
+     */
+    template <typename Number>
+    void transpose_in_place(std::vector<Number> &array_host,
+                            const unsigned int n,
+                            const unsigned int m)
+    {
+      // convert to structure-of-array
+      std::vector<Number> old(array_host.size());
+      old.swap(array_host);
+
+      transpose(n, m, &old[0], &array_host[0]);
+    }
+
+
+
+    /**
+     * Allocate an array to the device and copy @p array_host to the device.
+     */
+    template <typename Number1, typename Number2>
+    void alloc_and_copy(Number1 **array_device, std::vector<Number2> &array_host,
+                        const unsigned int n)
+    {
+      cudaError_t error_code = cudaMalloc(array_device, n*sizeof(Number1));
+      AssertCuda(error_code);
+
+      error_code = cudaMemcpy(*array_device, &array_host[0], n*sizeof(Number1),
+                              cudaMemcpyHostToDevice);
+      AssertCuda(error_code);
+    }
+
+
+
+    /**
+     * Helper class to (re)initialize MatrixFree object.
+     */
+    //TODO for now does not support hanging_nodes
+    template <int dim, typename Number>
+    class ReinitHelper
+    {
+    public:
+      ReinitHelper(MatrixFree<dim,Number>        *data,
+                   const Mapping<dim>            &mapping,
+                   const FiniteElement<dim, dim> &fe,
+                   const Quadrature<1>           &quad,
+                   const ::dealii::internal::MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
+                   const UpdateFlags             &update_flags);
+
+      void setup_color_arrays(const unsigned int n_colors);
+
+      void setup_cell_arrays(const unsigned int color);
+
+      template <typename CellFilter>
+      void get_cell_data(const CellFilter &cell, const unsigned int cell_id);
+
+      void alloc_and_copy_arrays(const unsigned int cell);
+
+    private:
+      MatrixFree<dim, Number> *data;
+      // Host data
+      std::vector<unsigned int> local_to_global_host;
+      std::vector<Point<dim>> q_points_host;
+      std::vector<Number> JxW_host;
+      std::vector<Number> inv_jacobian_host;
+      std::vector<unsigned int> constraint_mask_host;
+      // Local buffer
+      std::vector<types::global_dof_index> local_dof_indices;
+      FEValues<dim> fe_values;
+      // Convert the default dof numbering to a lexicographic one
+      const std::vector<unsigned int> &lexicographic_inv;
+      std::vector<unsigned int> lexicographic_dof_indices;
+      const unsigned int fe_degree;
+      const unsigned int dofs_per_cell;
+      const unsigned int q_points_per_cell;
+      const UpdateFlags &update_flags;
+      const unsigned int padding_length;
+    };
+
+
+
+    template <int dim, typename Number>
+    ReinitHelper<dim,Number>::ReinitHelper(MatrixFree<dim,Number>   *data,
+                                           const Mapping<dim>       &mapping,
+                                           const FiniteElement<dim> &fe,
+                                           const Quadrature<1>      &quad,
+                                           const ::dealii::internal::MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
+                                           const UpdateFlags        &update_flags)
+      :
+      data(data),
+      fe_degree(data->fe_degree),
+      dofs_per_cell(data->dofs_per_cell),
+      q_points_per_cell(data->q_points_per_cell),
+      fe_values(mapping, fe, Quadrature<dim>(quad),
+                update_inverse_jacobians | update_quadrature_points |
+                update_values | update_gradients | update_JxW_values),
+      lexicographic_inv(shape_info.lexicographic_numbering),
+      update_flags(update_flags),
+      padding_length(data->get_padding_length())
+    {
+      local_dof_indices.resize(data->dofs_per_cell);
+      lexicographic_dof_indices.resize(dofs_per_cell);
+    }
+
+
+
+    template <int dim, typename Number>
+    void ReinitHelper<dim, Number>::setup_color_arrays(const unsigned int n_colors)
+    {
+      data->n_cells.resize(n_colors);
+      data->grid_dim.resize(n_colors);
+      data->block_dim.resize(n_colors);
+      data->local_to_global.resize(n_colors);
+      data->constraint_mask.resize(n_colors);
+
+      data->row_start.resize(n_colors);
+
+      if (update_flags & update_quadrature_points)
+        data->q_points.resize(n_colors);
+
+      if (update_flags & update_JxW_values)
+        data->JxW.resize(n_colors);
+
+      if (update_flags & update_gradients)
+        data->inv_jacobian.resize(n_colors);
+    }
+
+
+
+    template <int dim, typename Number>
+    void ReinitHelper<dim,Number>::setup_cell_arrays(const unsigned int color)
+    {
+      const unsigned int n_cells = data->n_cells[color];
+      const unsigned int cells_per_block = data->cells_per_block;
+
+      // Setup kernel parameters
+      const double apply_n_blocks = std::ceil(static_cast<double>(n_cells)/
+                                              static_cast<double>(cells_per_block));
+      const unsigned int apply_x_n_blocks = std::round(std::sqrt(apply_n_blocks));
+      const unsigned int apply_y_n_blocks = std::ceil(apply_n_blocks/
+                                                      static_cast<double>(apply_x_n_blocks));
+
+      data->grid_dim[color] = dim3(apply_x_n_blocks, apply_y_n_blocks);
+
+      // TODO this should be a templated parameter.
+      const unsigned int n_dofs_1d = fe_degree+1;
+
+      if (data->parallelization_scheme == MatrixFree<dim,Number>::parallel_in_elem)
+        {
+          if (dim==1)
+            data->block_dim[color] = dim3(n_dofs_1d*cells_per_block);
+          else if (dim==2)
+            data->block_dim[color] = dim3(n_dofs_1d*cells_per_block, n_dofs_1d);
+          else
+            data->block_dim[color] = dim3(n_dofs_1d*cells_per_block, n_dofs_1d, n_dofs_1d);
+        }
+      else
+        data->block_dim[color] = dim3(cells_per_block);
+
+      local_to_global_host.resize(n_cells*padding_length);
+
+      if (update_flags & update_quadrature_points)
+        q_points_host.resize(n_cells*padding_length);
+
+      if (update_flags & update_JxW_values)
+        JxW_host.resize(n_cells*padding_length);
+
+      if (update_flags & update_gradients)
+        inv_jacobian_host.resize(n_cells*padding_length*dim*dim);
+
+      constraint_mask_host.resize(n_cells);
+    }
+
+
+
+    template <int dim, typename Number>
+    template <typename CellFilter>
+    void ReinitHelper<dim,Number>::get_cell_data(const CellFilter &cell,
+                                                 const unsigned int cell_id)
+    {
+      cell->get_dof_indices(local_dof_indices);
+
+      for (unsigned int i=0; i<dofs_per_cell; ++i)
+        lexicographic_dof_indices[i] = local_dof_indices[lexicographic_inv[i]];
+
+      memcpy(&local_to_global_host[cell_id*padding_length], &lexicographic_dof_indices[0],
+             dofs_per_cell*sizeof(unsigned int));
+
+      fe_values.reinit(cell);
+
+      // Quadrature points
+      if (update_flags & update_quadrature_points)
+        {
+          const std::vector<Point<dim>> &q_points = fe_values.get_quadrature_points();
+          memcpy(&q_points_host[cell_id*padding_length], &q_points[0],
+                 q_points_per_cell*sizeof(Point<dim>));
+        }
+
+      if (update_flags & update_JxW_values)
+        {
+          std::vector<double> JxW_values_double = fe_values.get_JxW_values();
+          const unsigned int offset = cell_id*padding_length;
+          for (unsigned int i=0; i<q_points_per_cell; ++i)
+            JxW_host[i+offset] = static_cast<Number>(JxW_values_double[i]);
+        }
+
+      if (update_flags & update_gradients)
+        {
+          const std::vector<DerivativeForm<1,dim,dim>> &inv_jacobians =
+                                                      fe_values.get_inverse_jacobians();
+          memcpy(&inv_jacobian_host[cell_id*padding_length*dim*dim], &inv_jacobians[0],
+                 q_points_per_cell*sizeof(DerivativeForm<1,dim,dim>));
+        }
+    }
+
+
+
+    template <int dim, typename Number>
+    void ReinitHelper<dim, Number>::alloc_and_copy_arrays(const unsigned int color)
+    {
+      const unsigned int n_cells = data->n_cells[color];
+
+      // Local-to-global mapping
+      if (data->parallelization_scheme == MatrixFree<dim, Number>::parallel_over_elem)
+        internal::transpose_in_place(local_to_global_host, n_cells, padding_length);
+
+      alloc_and_copy(&data->local_to_global[color], local_to_global_host,
+                     n_cells * padding_length);
+
+      // Quadrature points
+      if (update_flags & update_quadrature_points)
+        {
+          if (data->parallelization_scheme == MatrixFree<dim, Number>::parallel_over_elem)
+            internal::transpose_in_place(q_points_host, n_cells, padding_length);
+
+          alloc_and_copy(&data->q_points[color], q_points_host,
+                         n_cells*padding_length);
+        }
+
+      // Jacobian determinants/quadrature weights
+      if (update_flags & update_JxW_values)
+        {
+          if (data->parallelization_scheme == MatrixFree<dim, Number>::parallel_over_elem)
+            internal::transpose_in_place(JxW_host, n_cells, padding_length);
+
+          alloc_and_copy(&data->JxW[color], JxW_host, n_cells*padding_length);
+        }
+
+      // Inverse jacobians
+      if (update_flags & update_gradients)
+        {
+          // Reorder so that all J_11 elements are together, all J_12 elements are
+          // together, etc., i.e., reorder indices from
+          // cell_id*q_points_per_cell*dim*dim + q*dim*dim +i to
+          // i*q_points_per_cell*n_cells + cell_id*q_points_per_cell+q
+          internal::transpose_in_place(inv_jacobian_host, padding_length*n_cells, dim*dim);
+
+          // Transpose second time means we get the following index order:
+          // q*n_cells*dim*dim + i*n_cells + cell_id which is good for an
+          // element-level parallelization
+          if (data->parallelization_scheme == MatrixFree<dim, Number>::parallel_over_elem)
+            internal::transpose_in_place(inv_jacobian_host, n_cells*dim*dim, padding_length);
+
+          alloc_and_copy(&data->inv_jacobian[color], inv_jacobian_host,
+                         n_cells*dim*dim*padding_length);
+        }
+
+      alloc_and_copy(&data->constraint_mask[color], constraint_mask_host, n_cells);
+    }
+
+
+
+    template <int dim>
+    std::vector<types::global_dof_index> get_conflict_indices(
+      const FilteredIterator<typename DoFHandler<dim>::active_cell_iterator> &cell,
+      const ConstraintMatrix &constraints)
+    {
+      std::vector<types::global_dof_index> local_dof_indices(
+        cell->get_fe().dofs_per_cell);
+      cell->get_dof_indices(local_dof_indices);
+      constraints.resolve_indices(local_dof_indices);
+
+      return local_dof_indices;
+    }
+
+
+
+    template <typename Number>
+    __global__  void copy_constrained_dofs(
+      const dealii::types::global_dof_index *constrained_dofs,
+      const unsigned int                     n_constrained_dofs,
+      const Number                          *src,
+      Number                                *dst)
+    {
+      const unsigned int dof = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x *
+                                                           blockIdx.y);
+      if (dof <n_constrained_dofs)
+        dst[constrained_dofs[dof]] = src[constrained_dofs[dof]];
+    }
+
+
+
+    template <typename Number>
+    __global__ void set_constrained_dofs(
+      const dealii::types::global_dof_index *constrained_dofs,
+      const unsigned int                     n_constrained_dofs,
+      Number                                 val,
+      Number                                *dst)
+    {
+      const unsigned int dof = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x *
+                                                           blockIdx.y);
+      if (dof < n_constrained_dofs)
+        dst[constrained_dofs[dof]] = val;
+    }
+
+
+
+    template <int dim, typename Number, typename functor>
+    __global__ void apply_kernel_shmem(const functor &func,
+                                       const typename MatrixFree<dim,Number>::Data gpu_data,
+                                       const Number *src,
+                                       Number *dst)
+    {
+      const unsigned int cells_per_block = cells_per_block_shmem(
+                                             dim, functor::n_dofs_1d-1);
+
+      // TODO make use of dynamically allocated shared memory
+      __shared__ Number values[cells_per_block*functor::n_local_dofs];
+      __shared__ Number gradients[dim][cells_per_block*functor::n_q_points];
+
+      const unsigned int local_cell = threadIdx.x / functor::n_dofs_1d;
+      const unsigned int cell = local_cell + cells_per_block *
+                                (blockIdx.x+gridDim.x*blockIdx.y);
+
+      Number *gq[dim];
+      for (int d=0; d<dim; ++d)
+        gq[d] = &gradients[d][local_cell*functor::n_q_points];
+
+      SharedData<dim,Number> shared_data(
+        &values[local_cell*functor::n_local_dofs], gq);
+
+      if (cell < gpu_data.n_cells)
+        func(cell, &gpu_data, &shared_data, src, dst);
+    }
+  }
+
+
+
+  template <int dim, typename Number>
+  MatrixFree<dim,Number>::MatrixFree()
+    :
+    constrained_dofs(nullptr),
+    padding_length(0)
+  {}
+
+
+
+  template <int dim, typename Number>
+  void MatrixFree<dim,Number>::reinit(const Mapping<dim>     &mapping,
+                                      const DoFHandler<dim>  &dof_handler,
+                                      const ConstraintMatrix &constraints,
+                                      const Quadrature<1>    &quad,
+                                      const AdditionalData    additional_data)
+  {
+    if (typeid(Number) == typeid(double))
+      cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
+
+    const UpdateFlags &update_flags = additional_data.mapping_update_flags;
+
+    if (additional_data.parallelization_scheme != parallel_over_elem &&
+        additional_data.parallelization_scheme != parallel_in_elem)
+      AssertThrow(false, ExcMessage("Invalid parallelization scheme."));
+
+    this->parallelization_scheme = additional_data.parallelization_scheme;
+
+    //TODO: only free if we actually need arrays of different length
+    free();
+
+    const FiniteElement<dim> &fe = dof_handler.get_fe();
+
+    fe_degree = fe.degree;
+    //TODO this should be a templated parameter
+    const unsigned int n_dofs_1d = fe_degree+1;
+    const unsigned int n_q_points_1d = quad.size();
+
+    Assert(n_dofs_1d == n_q_points_1d,
+           ExcMessage("n_q_points_1d must be equal to fe_degree+1."));
+
+    // Set padding length to the closest power of two larger than or equal to the
+    // number of threads.
+    padding_length = 1 << static_cast<unsigned int>(std::ceil(dim*std::log2(fe_degree+1.)));
+
+    dofs_per_cell = fe.dofs_per_cell;
+    q_points_per_cell = std::pow(n_q_points_1d, dim);
+
+    const ::dealii::internal::MatrixFreeFunctions::ShapeInfo<Number> shape_info(quad, fe);
+
+    unsigned int size_shape_values = n_dofs_1d*n_q_points_1d*sizeof(Number);
+
+    cudaError_t cuda_error = cudaMemcpyToSymbol(internal::global_shape_values,
+                                                &shape_info.shape_values_number[0],
+                                                size_shape_values,
+                                                0,
+                                                cudaMemcpyHostToDevice);
+    AssertCuda(cuda_error);
+
+    if (update_flags & update_gradients)
+      {
+        cuda_error = cudaMemcpyToSymbol(internal::global_shape_gradients,
+                                        &shape_info.shape_gradient_number[0],
+                                        size_shape_values,
+                                        0,
+                                        cudaMemcpyHostToDevice);
+        AssertCuda(cuda_error);
+      }
+
+    // Setup the number of cells per CUDA thread block
+    cells_per_block = cells_per_block_shmem(dim, fe_degree);
+
+    internal::ReinitHelper<dim, Number> helper(this, mapping, fe, quad,
+                                               shape_info,  update_flags);
+
+    // Create a graph coloring
+    typedef FilteredIterator<typename DoFHandler<dim>::active_cell_iterator> CellFilter;
+    CellFilter begin(IteratorFilters::LocallyOwnedCell(), dof_handler.begin_active());
+    CellFilter end(IteratorFilters::LocallyOwnedCell(), dof_handler.end());
+    typedef std::function<std::vector<types::global_dof_index> (CellFilter const &)> fun_type;
+    const fun_type &fun = static_cast<fun_type>(std::bind(
+                                                  &internal::get_conflict_indices<dim>,
+                                                  std::placeholders::_1,
+                                                  constraints));
+
+    std::vector<std::vector<CellFilter>> graph =
+                                        GraphColoring::make_graph_coloring(
+                                          begin, end, fun);
+    n_colors = graph.size();
+
+    helper.setup_color_arrays(n_colors);
+    for (unsigned int i=0; i<n_colors; ++i)
+      {
+        n_cells[i] = graph[i].size();
+        helper.setup_cell_arrays(i);
+        typename std::vector<CellFilter>::iterator cell = graph[i].begin(),
+                                                   end_cell = graph[i].end();
+        for (unsigned int cell_id=0; cell != end_cell; ++cell, ++cell_id)
+          helper.get_cell_data(*cell, cell_id);
+
+        helper.alloc_and_copy_arrays(i);
+      }
+
+    // Setup row starts
+    row_start[0] = 0;
+    for (unsigned int i=0; i<n_colors-1; ++i)
+      row_start[i+1] = row_start[i] + n_cells[i] * get_padding_length();
+
+    // Constrained indices
+    n_constrained_dofs = constraints.n_constraints();
+
+    const unsigned int constraint_n_blocks = std::ceil(static_cast<double>(n_constrained_dofs) /
+                                                       static_cast<double>(BLOCK_SIZE));
+    const unsigned int constraint_x_n_blocks = std::round(std::sqrt(constraint_n_blocks));
+    const unsigned int constraint_y_n_blocks = std::ceil(static_cast<double>(constraint_n_blocks) /
+                                                         static_cast<double>(constraint_x_n_blocks));
+
+    constraint_grid_dim = dim3(constraint_x_n_blocks, constraint_y_n_blocks);
+    constraint_block_dim = dim3(BLOCK_SIZE);
+
+    std::vector<dealii::types::global_dof_index> constrained_dofs_host(n_constrained_dofs);
+
+    unsigned int i_constraint = 0;
+    const unsigned int n_dofs = dof_handler.n_dofs();
+    for (unsigned int i=0; i<n_dofs; ++i)
+      {
+        if (constraints.is_constrained(i))
+          {
+            constrained_dofs_host[i_constraint] = i;
+            ++i_constraint;
+          }
+      }
+
+    cuda_error = cudaMalloc(&constrained_dofs, n_constrained_dofs *
+                            sizeof(dealii::types::global_dof_index));
+    AssertCuda(cuda_error);
+
+    cuda_error = cudaMemcpy(constrained_dofs, &constrained_dofs_host[0],
+                            n_constrained_dofs * sizeof(dealii::types::global_dof_index),
+                            cudaMemcpyHostToDevice);
+    AssertCuda(cuda_error);
+  }
+
+
+
+  template <int dim, typename Number>
+  MatrixFree<dim,Number>::Data
+  MatrixFree<dim,Number>::get_data(unsigned int color) const
+  {
+    Data data_copy;
+    data_copy.q_points = q_points[color];
+    data_copy.local_to_global = local_to_global[color];
+    data_copy.inv_jacobian = inv_jacobian[color];
+    data_copy.JxW = JxW[color];
+    data_copy.constraint_mask = constraint_mask[color];
+    data_copy.n_cells = n_cells[color];
+    data_copy.padding_length = padding_length;
+    data_copy.row_start = row_start[color];
+
+    return data_copy;
+  }
+
+
+
+  template <int dim, typename Number>
+  void MatrixFree<dim, Number>::free()
+  {
+    for (unsigned int i=0; i < q_points.size(); ++i)
+      {
+        if (q_points[i] != nullptr)
+          {
+            cudaError_t cuda_error = cudaFree(q_points[i]);
+            AssertCuda(cuda_error);
+            q_points[i] = nullptr;
+          }
+      }
+
+    for (unsigned int i=0; i < local_to_global.size(); ++i)
+      {
+        if (local_to_global[i] != nullptr)
+          {
+            cudaError_t cuda_error = cudaFree(local_to_global[i]);
+            AssertCuda(cuda_error);
+            local_to_global[i] = nullptr;
+          }
+      }
+
+    for (unsigned int i=0; i < inv_jacobian.size(); ++i)
+      {
+        if (inv_jacobian[i] != nullptr)
+          {
+            cudaError_t cuda_error = cudaFree(inv_jacobian[i]);
+            AssertCuda(cuda_error);
+            inv_jacobian[i] = nullptr;
+          }
+      }
+
+    for (unsigned int i=0; i < JxW.size(); ++i)
+      {
+        if (JxW[i] != nullptr)
+          {
+            cudaError_t cuda_error = cudaFree(JxW[i]);
+            AssertCuda(cuda_error);
+            JxW[i] = nullptr;
+          }
+      }
+
+    for (unsigned int i=0; i < constraint_mask.size(); ++i)
+      {
+        if (constraint_mask[i] != nullptr)
+          {
+            cudaError_t cuda_error = cudaFree(constraint_mask[i]);
+            AssertCuda(cuda_error);
+            constraint_mask[i] = nullptr;
+          }
+      }
+
+
+    q_points.clear();
+    local_to_global.clear();
+    inv_jacobian.clear();
+    JxW.clear();
+    constraint_mask.clear();
+
+    if (constrained_dofs != nullptr)
+      {
+        cudaError_t cuda_error = cudaFree(constrained_dofs);
+        AssertCuda(cuda_error);
+        constrained_dofs = nullptr;
+      }
+  }
+
+
+
+  template <int dim, typename Number>
+  void MatrixFree<dim,Number>::copy_constrained_values(const CUDAVector<Number> &src,
+                                                       CUDAVector<Number>       &dst) const
+  {
+    internal::copy_constrained_dofs<Number> <<<constraint_grid_dim,constraint_block_dim>>> (
+      constrained_dofs, n_constrained_dofs, src.get_values(), dst.get_values());
+  }
+
+
+
+  template <int dim, typename Number>
+  void MatrixFree<dim,Number>::set_constrained_values(Number          val,
+                                                      CUDAVector<Number> &dst) const
+  {
+    internal::set_constrained_dofs<Number> <<<constraint_grid_dim, constraint_block_dim>>>(
+      constrained_dofs, n_constrained_dofs, val, dst.get_values());
+  }
+
+
+
+  template <int dim, typename Number>
+  unsigned int MatrixFree<dim,Number>::get_padding_length() const
+  {
+    return padding_length;
+  }
+
+
+
+  template <int dim, typename Number>
+  template <typename functor>
+  void MatrixFree<dim,Number>::cell_loop(const functor &func,
+                                         const CUDAVector<Number> &src,
+                                         CUDAVector<Number> &dst) const
+  {
+    for (unsigned int i=0; i < n_colors; ++i)
+      internal::apply_kernel_shmem<dim, Number, functor> <<<grid_dim[i],block_dim[i]>>> (
+        func, get_data(i), src.get_values(), dst.get_values());
+  }
+
+
+
+  template <int dim, typename Number>
+  std::size_t MatrixFree<dim, Number>::memory_consumption() const
+  {
+    // First compute the size of n_cells, row_starts, kernel launch parameters,
+    // and constrained_dofs
+    std::size_t bytes = n_cells.size()*sizeof(unsigned int)*2 +
+                        2*n_colors*sizeof(dim3) + n_constrained_dofs*sizeof(unsigned int);
+
+    // For each color, add local_to_global, inv_jacobian, JxW, and q_points.
+    for (unsigned int i=0; i<n_colors; ++i)
+      {
+        bytes += n_cells[i]*padding_length*sizeof(unsigned int) +
+                 n_cells[i]*padding_length*dim*dim*sizeof(Number) +
+                 n_cells[i]*padding_length*sizeof(Number) +
+                 n_cells[i]*padding_length*sizeof(point_type) +
+                 n_cells[i]*sizeof(unsigned int);
+      }
+
+    return bytes;
+  }
+}
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
+
+#endif
diff --git a/include/deal.II/matrix_free/cuda_tensor_product_kernels.cuh b/include/deal.II/matrix_free/cuda_tensor_product_kernels.cuh
new file mode 100644 (file)
index 0000000..fca3a43
--- /dev/null
@@ -0,0 +1,395 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+#ifndef dealii__cuda_tensor_product_kernels_h
+#define dealii__cuda_tensor_product_kernels_h
+
+#include <deal.II/base/config.h>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+
+namespace CUDAWrappers
+{
+  namespace internal
+  {
+    /**
+     * In this namespace, the evaluator routines that evaluate the tensor
+     * products are implemented.
+     *
+     * @ingroup CUDAWrappers
+     */
+    // TODO: for now only the general variant is implemented
+    enum EvaluatorVariant
+    {
+      evaluate_general,
+      evaluate_symmetric,
+      evaluate_evenodd
+    };
+
+
+
+    /**
+     * Generic evaluator framework.
+     *
+     * @ingroup CUDAWrappers
+     */
+    template <EvaluatorVariant variant, int dim, int fe_degree, int n_q_points_1d, typename Number>
+    struct EvaluatorTensorProduct
+    {};
+
+
+
+    /**
+     * Internal evaluator for 1d-3d shape function using the tensor product form
+     * of the basis functions.
+     *
+     * @ingroup CUDAWrappers
+     */
+    template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+    struct EvaluatorTensorProduct<evaluate_general, dim, fe_degree,
+      n_q_points_1d, Number>
+    {
+      static const unsigned int dofs_per_cell =
+        Utilities::fixed_int_power<fe_degree+1,dim>::value;
+      static const unsigned int n_q_points =
+        Utilities::fixed_int_power<n_q_points_1d,dim>::value;
+
+      __device__ EvaluatorTensorProduct();
+
+      /**
+       * Evaluate the values of a finite element function at the quadrature
+       * points.
+       */
+      template <int direction, bool dof_to_quad, bool add, bool in_place>
+      __device__ void values(const Number *in, Number *out) const;
+
+      /**
+       * Evaluate the gradient of a finite element function at the quadrature
+       * points for a given @p direction.
+       */
+      template <int direction, bool dof_to_quad, bool add, bool in_place>
+      __device__ void gradients(const Number *in, Number *out) const;
+
+      /**
+       * Helper function for values() and gradients().
+       */
+      template <int direction, bool dof_to_quad, bool add, bool in_place>
+      __device__ void apply(Number shape_data[],
+                            const Number *in,
+                            Number       *out) const;
+
+      /**
+       * Evaluate the finite element function at the quadrature points.
+       */
+      __device__ void value_at_quad_pts(Number *u);
+
+      /**
+       * Helper function for integrate(). Integrate the finite element function.
+       */
+      __device__ void integrate_value(Number *u);
+
+      /**
+       * Evaluate the gradients of the finite element function at the quadrature
+       * points.
+       */
+      __device__ void gradient_at_quad_pts(const Number *const u,
+                                           Number *grad_u[dim]);
+
+      /**
+       * Helper function for integrate(). Integrate the gradients of the finite
+       * element function.
+       */
+      template <bool add>
+      __device__ void integrate_gradient(Number *u,
+                                         Number *grad_u[dim]);
+    };
+
+
+
+    template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+    __device__ EvaluatorTensorProduct<evaluate_general, dim, fe_degree,
+               n_q_points_1d, Number>::EvaluatorTensorProduct()
+    {}
+
+
+
+    template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+    template <int direction, bool dof_to_quad, bool add, bool in_place>
+    __device__ void EvaluatorTensorProduct<evaluate_general, dim, fe_degree,
+               n_q_points_1d, Number>::values(const Number *in,
+                                              Number *out) const
+    {
+      apply<direction, dof_to_quad, add, in_place>(global_shape_values, in, out);
+    }
+
+
+
+    template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+    template <int direction, bool dof_to_quad, bool add, bool in_place>
+    __device__ void EvaluatorTensorProduct<evaluate_general, dim, fe_degree,
+               n_q_points_1d, Number>::gradients(const Number *in,
+                                                 Number *out) const
+    {
+      apply<direction, dof_to_quad, add, in_place>(global_shape_gradients, in, out);
+    }
+
+
+
+    template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+    template <int direction, bool dof_to_quad, bool add, bool in_place>
+    __device__ void EvaluatorTensorProduct<evaluate_general, dim, fe_degree,
+               n_q_points_1d, Number>::apply(Number shape_data[],
+                                             const Number *in,
+                                             Number       *out) const
+    {
+      const unsigned int i = (dim == 1) ? 0 : threadIdx.x%n_q_points_1d;
+      const unsigned int j = (dim == 3) ? threadIdx.y : 0;
+      const unsigned int q =
+        (dim == 1) ? (threadIdx.x%n_q_points_1d) :
+        (dim == 2) ? threadIdx.y :
+        threadIdx.z;
+
+      // This loop simply multiply the shape function at the quadrature point by
+      // the value finite element coefficient.
+      Number t = 0;
+      for (int k=0; k<n_q_points_1d; ++k)
+        {
+          const unsigned int shape_idx = dof_to_quad ? (q+k*n_q_points_1d) :
+                                         (k+q*n_q_points_1d);
+          const unsigned int source_idx =
+            (direction == 0) ? (k + n_q_points_1d*(i + n_q_points_1d*j)) :
+            (direction == 1) ? (i + n_q_points_1d*(k + n_q_points_1d*j)) :
+            (i + n_q_points_1d*(j + n_q_points_1d*k));
+          t += shape_data[shape_idx] * (in_place ? out[source_idx] : in[source_idx]);
+        }
+
+      if (in_place)
+        __syncthreads();
+
+      const unsigned int destination_idx =
+        (direction == 0) ? (q + n_q_points_1d*(i + n_q_points_1d*j)) :
+        (direction == 1) ? (i + n_q_points_1d*(q + n_q_points_1d*j)) :
+        (i + n_q_points_1d*(j + n_q_points_1d*q));
+
+      if (add)
+        out[destination_idx] += t;
+      else
+        out[destination_idx] = t;
+    }
+
+
+
+    template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+    inline
+    __device__ void EvaluatorTensorProduct<evaluate_general, dim, fe_degree,
+               n_q_points_1d, Number>::value_at_quad_pts(Number *u)
+    {
+      switch (dim)
+        {
+        case 1:
+        {
+          values<0, true, false, true>(u, u);
+
+          break;
+        }
+        case 2:
+        {
+          values<0, true, false, true>(u, u);
+          __syncthreads();
+          values<1, true, false, true>(u, u);
+
+          break;
+        }
+        case 3:
+        {
+          values<0, true, false, true>(u, u);
+          __syncthreads();
+          values<1, true, false, true>(u, u);
+          __syncthreads();
+          values<2, true, false, true>(u, u);
+
+          break;
+        }
+        default:
+        {
+          // Do nothing. We should throw but we can't from a __device__ function.
+        }
+        }
+    }
+
+
+
+    template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+    inline
+    __device__ void EvaluatorTensorProduct<evaluate_general, dim, fe_degree,
+               n_q_points_1d, Number>::integrate_value(Number *u)
+    {
+      switch (dim)
+        {
+        case 1:
+        {
+          values<0, false, false, true> (u,u);
+
+          break;
+        }
+        case 2:
+        {
+          values<0, false, false, true> (u,u);
+          __syncthreads();
+          values<1, false, false, true> (u,u);
+
+          break;
+        }
+        case 3:
+        {
+          values<0, false, false, true> (u,u);
+          __syncthreads();
+          values<1, false, false, true> (u,u);
+          __syncthreads();
+          values<2, false, false, true> (u,u);
+
+          break;
+        }
+        default:
+        {
+          // Do nothing. We should throw but we can't from a __device__ function.
+        }
+        }
+    }
+
+
+
+    template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+    inline
+    __device__ void EvaluatorTensorProduct<evaluate_general, dim, fe_degree,
+               n_q_points_1d, Number>::gradient_at_quad_pts(
+                 const Number *const u,
+                 Number *grad_u[dim])
+    {
+      switch (dim)
+        {
+        case 1:
+        {
+          gradients<0, true, false, false>(u, grad_u[0]);
+
+          break;
+        }
+        case 2:
+        {
+          gradients<0, true, false, false>(u, grad_u[0]);
+          values<0, true, false, false>(u, grad_u[1]);
+
+          __syncthreads();
+
+          values<1, true, false, true>(grad_u[0], grad_u[0]);
+          gradients<1, true, false, true>(grad_u[1], grad_u[1]);
+
+          break;
+        }
+        case 3:
+        {
+          gradients<0, true, false, false>(u, grad_u[0]);
+          values<0, true, false, false>(u, grad_u[1]);
+          values<0, true, false, false>(u, grad_u[2]);
+
+          __syncthreads();
+
+          values<1, true, false, true>(grad_u[0], grad_u[0]);
+          gradients<1, true, false, true>(grad_u[1], grad_u[1]);
+          values<1, true, false, true>(grad_u[2], grad_u[2]);
+
+          __syncthreads();
+
+          values<2, true, false, true>(grad_u[0], grad_u[0]);
+          values<2, true, false, true>(grad_u[1], grad_u[1]);
+          gradients<2, true, false, true>(grad_u[2], grad_u[2]);
+
+          break;
+        }
+        default:
+        {
+          // Do nothing. We should throw but we can't from a __device__ function.
+        }
+        }
+    }
+
+
+
+    template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+    template <bool add>
+    inline
+    __device__ void EvaluatorTensorProduct<evaluate_general, dim, fe_degree,
+               n_q_points_1d, Number>::integrate_gradient(
+                 Number *u,
+                 Number *grad_u[dim])
+    {
+      switch (dim)
+        {
+        case 1:
+        {
+          gradients<0, false, add, false> (grad_u[dim], u);
+
+          break;
+        }
+        case 2:
+        {
+          gradients<0, false, false, true> (grad_u[0], grad_u[0]);
+          values<0, false, false, true> (grad_u[1], grad_u[1]);
+
+          __syncthreads();
+
+          values<1, false, add, false> (grad_u[0], u);
+          __syncthreads();
+          gradients<1, false, true, false> (grad_u[1], u);
+
+          break;
+        }
+        case 3:
+        {
+          gradients<0, false, false, true> (grad_u[0], grad_u[0]);
+          values<0, false, false, true> (grad_u[1], grad_u[1]);
+          values<0, false, false, true> (grad_u[2], grad_u[2]);
+
+          __syncthreads();
+
+          values<1, false, false, true> (grad_u[0], grad_u[0]);
+          gradients<1, false, false, true> (grad_u[1], grad_u[1]);
+          values<1, false, false, true> (grad_u[2], grad_u[2]);
+
+          __syncthreads();
+
+          values<2, false, add, false> (grad_u[0], u);
+          __syncthreads();
+          values<2, false, true, false> (grad_u[1], u);
+          __syncthreads();
+          gradients<2, false, true, false> (grad_u[2], u);
+
+          break;
+        }
+        default:
+        {
+          // Do nothing. We should throw but we can't from a __device__ function.
+        }
+        }
+    }
+  }
+}
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
diff --git a/source/matrix_free/cuda_matrix_free.cu b/source/matrix_free/cuda_matrix_free.cu
new file mode 100644 (file)
index 0000000..7c18c07
--- /dev/null
@@ -0,0 +1,29 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/matrix_free/cuda_matrix_free.templates.h>
+
+#ifdef DEAL_II_WITH_CUDA
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace CUDAWrappers
+{
+#include "cuda_matrix_free.inst"
+}
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
diff --git a/source/matrix_free/cuda_matrix_free.inst.in b/source/matrix_free/cuda_matrix_free.inst.in
new file mode 100644 (file)
index 0000000..f0f2ce9
--- /dev/null
@@ -0,0 +1,21 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+for (deal_II_dimension : DIMENSIONS)
+{
+    template class MatrixFree<deal_II_dimension,double>;
+    template class MatrixFree<deal_II_dimension,float>;
+}

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.