]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Merge computation of values and gradients in the matrixfree CUDA code 8839/head
authorPeter Munch <peterrmuench@gmail.com>
Mon, 23 Sep 2019 13:38:40 +0000 (15:38 +0200)
committerPeter Munch <peterrmuench@gmail.com>
Tue, 24 Sep 2019 20:22:39 +0000 (22:22 +0200)
doc/news/changes/minor/20190924PeterMunch-1 [new file with mode: 0644]
include/deal.II/matrix_free/cuda_fe_evaluation.h
include/deal.II/matrix_free/cuda_matrix_free.templates.h
include/deal.II/matrix_free/cuda_tensor_product_kernels.h

diff --git a/doc/news/changes/minor/20190924PeterMunch-1 b/doc/news/changes/minor/20190924PeterMunch-1
new file mode 100644 (file)
index 0000000..f317f2e
--- /dev/null
@@ -0,0 +1,4 @@
+Improved: Evaluate/integrate the gradients in CUDAWrappers::EvaluatorTensorProduct
+with collocation method if both values and gradients are requested.
+<br>
+(Peter Munch, 2019/09/24)
index b2bb935ccfa457352bee0c4447edd6e00d16a1e7..1305e379fbcb38bf7202b92995fe19cd802c871a 100644 (file)
@@ -314,13 +314,18 @@ namespace CUDAWrappers
       n_q_points_1d,
       Number>
       evaluator_tensor_product;
-    if (evaluate_grad == true)
+    if (evaluate_val == true && evaluate_grad == true)
+      {
+        evaluator_tensor_product.value_and_gradient_at_quad_pts(values,
+                                                                gradients);
+        __syncthreads();
+      }
+    else if (evaluate_grad == true)
       {
         evaluator_tensor_product.gradient_at_quad_pts(values, gradients);
         __syncthreads();
       }
-
-    if (evaluate_val == true)
+    else if (evaluate_val == true)
       {
         evaluator_tensor_product.value_at_quad_pts(values);
         __syncthreads();
@@ -346,16 +351,15 @@ namespace CUDAWrappers
       n_q_points_1d,
       Number>
       evaluator_tensor_product;
-    if (integrate_val == true)
+    if (integrate_val == true && integrate_grad == true)
+      {
+        evaluator_tensor_product.integrate_value_and_gradient(values,
+                                                              gradients);
+      }
+    else if (integrate_val == true)
       {
         evaluator_tensor_product.integrate_value(values);
         __syncthreads();
-        if (integrate_grad == true)
-          {
-            evaluator_tensor_product.integrate_gradient<true>(values,
-                                                              gradients);
-            __syncthreads();
-          }
       }
     else if (integrate_grad == true)
       {
index ace2e804f762afe2c9d19f88c0976a3f56fd94be..cca2c7e758367a3e118c7e97421199d652eb68df 100644 (file)
@@ -26,6 +26,7 @@
 
 #  include <deal.II/dofs/dof_tools.h>
 
+#  include <deal.II/fe/fe_dgq.h>
 #  include <deal.II/fe/fe_values.h>
 
 #  include <deal.II/grid/filtered_iterator.h>
@@ -52,6 +53,10 @@ namespace CUDAWrappers
     __constant__ double
       global_shape_gradients[(max_elem_degree + 1) * (max_elem_degree + 1)];
 
+    // for collocation methods
+    __constant__ double
+      global_co_shape_gradients[(max_elem_degree + 1) * (max_elem_degree + 1)];
+
     template <typename Number>
     using CUDAVector = ::dealii::LinearAlgebra::CUDAWrappers::Vector<Number>;
 
@@ -835,6 +840,13 @@ namespace CUDAWrappers
 
     unsigned int size_shape_values = n_dofs_1d * n_q_points_1d * sizeof(Number);
 
+    FE_DGQArbitraryNodes<1> fe_quad_co(quad);
+    const ::dealii::internal::MatrixFreeFunctions::ShapeInfo<Number>
+      shape_info_co(quad, fe_quad_co);
+
+    unsigned int size_co_shape_values =
+      n_q_points_1d * n_q_points_1d * sizeof(Number);
+
     cudaError_t cuda_error = cudaMemcpyToSymbol(internal::global_shape_values,
                                                 shape_info.shape_values.data(),
                                                 size_shape_values,
@@ -850,6 +862,13 @@ namespace CUDAWrappers
                                         0,
                                         cudaMemcpyHostToDevice);
         AssertCuda(cuda_error);
+
+        cuda_error = cudaMemcpyToSymbol(internal::global_co_shape_gradients,
+                                        shape_info_co.shape_gradients.data(),
+                                        size_co_shape_values,
+                                        0,
+                                        cudaMemcpyHostToDevice);
+        AssertCuda(cuda_error);
       }
 
     // Setup the number of cells per CUDA thread block
index 5c5b55acd3bf14e672c8b4e7b447576fee80de48..bf9a4a3d9a2590b94a24bb77aaa371e9a39b77f7 100644 (file)
@@ -90,7 +90,7 @@ namespace CUDAWrappers
        */
       template <int direction, bool dof_to_quad, bool add, bool in_place>
       __device__ void
-      values(const Number *in, Number *out) const;
+      values(Number shape_values[], const Number *in, Number *out) const;
 
       /**
        * Evaluate the gradient of a finite element function at the quadrature
@@ -98,7 +98,7 @@ namespace CUDAWrappers
        */
       template <int direction, bool dof_to_quad, bool add, bool in_place>
       __device__ void
-      gradients(const Number *in, Number *out) const;
+      gradients(Number shape_gradients[], const Number *in, Number *out) const;
 
       /**
        * Helper function for values() and gradients().
@@ -126,6 +126,13 @@ namespace CUDAWrappers
       __device__ void
       gradient_at_quad_pts(const Number *const u, Number *grad_u[dim]);
 
+      /**
+       * Evaluate the values and the gradients of the finite element function at
+       *  the quadrature points.
+       */
+      __device__ void
+      value_and_gradient_at_quad_pts(Number *const u, Number *grad_u[dim]);
+
       /**
        * Helper function for integrate(). Integrate the gradients of the finite
        * element function.
@@ -133,6 +140,13 @@ namespace CUDAWrappers
       template <bool add>
       __device__ void
       integrate_gradient(Number *u, Number *grad_u[dim]);
+
+      /**
+       * Helper function for integrate(). Integrate the values and the gradients
+       * of the finite element function.
+       */
+      __device__ void
+      integrate_value_and_gradient(Number *u, Number *grad_u[dim]);
     };
 
 
@@ -155,11 +169,11 @@ namespace CUDAWrappers
                            dim,
                            fe_degree,
                            n_q_points_1d,
-                           Number>::values(const Number *in, Number *out) const
+                           Number>::values(Number        shape_values[],
+                                           const Number *in,
+                                           Number *      out) const
     {
-      apply<direction, dof_to_quad, add, in_place>(global_shape_values,
-                                                   in,
-                                                   out);
+      apply<direction, dof_to_quad, add, in_place>(shape_values, in, out);
     }
 
 
@@ -171,12 +185,11 @@ namespace CUDAWrappers
                            dim,
                            fe_degree,
                            n_q_points_1d,
-                           Number>::gradients(const Number *in,
+                           Number>::gradients(Number        shape_gradients[],
+                                              const Number *in,
                                               Number *      out) const
     {
-      apply<direction, dof_to_quad, add, in_place>(global_shape_gradients,
-                                                   in,
-                                                   out);
+      apply<direction, dof_to_quad, add, in_place>(shape_gradients, in, out);
     }
 
 
@@ -243,25 +256,25 @@ namespace CUDAWrappers
         {
           case 1:
             {
-              values<0, true, false, true>(u, u);
+              values<0, true, false, true>(global_shape_values, u, u);
 
               break;
             }
           case 2:
             {
-              values<0, true, false, true>(u, u);
+              values<0, true, false, true>(global_shape_values, u, u);
               __syncthreads();
-              values<1, true, false, true>(u, u);
+              values<1, true, false, true>(global_shape_values, u, u);
 
               break;
             }
           case 3:
             {
-              values<0, true, false, true>(u, u);
+              values<0, true, false, true>(global_shape_values, u, u);
               __syncthreads();
-              values<1, true, false, true>(u, u);
+              values<1, true, false, true>(global_shape_values, u, u);
               __syncthreads();
-              values<2, true, false, true>(u, u);
+              values<2, true, false, true>(global_shape_values, u, u);
 
               break;
             }
@@ -287,25 +300,25 @@ namespace CUDAWrappers
         {
           case 1:
             {
-              values<0, false, false, true>(u, u);
+              values<0, false, false, true>(global_shape_values, u, u);
 
               break;
             }
           case 2:
             {
-              values<0, false, false, true>(u, u);
+              values<0, false, false, true>(global_shape_values, u, u);
               __syncthreads();
-              values<1, false, false, true>(u, u);
+              values<1, false, false, true>(global_shape_values, u, u);
 
               break;
             }
           case 3:
             {
-              values<0, false, false, true>(u, u);
+              values<0, false, false, true>(global_shape_values, u, u);
               __syncthreads();
-              values<1, false, false, true>(u, u);
+              values<1, false, false, true>(global_shape_values, u, u);
               __syncthreads();
-              values<2, false, false, true>(u, u);
+              values<2, false, false, true>(global_shape_values, u, u);
 
               break;
             }
@@ -332,39 +345,131 @@ namespace CUDAWrappers
         {
           case 1:
             {
-              gradients<0, true, false, false>(u, grad_u[0]);
+              gradients<0, true, false, false>(global_shape_gradients,
+                                               u,
+                                               grad_u[0]);
 
               break;
             }
           case 2:
             {
-              gradients<0, true, false, false>(u, grad_u[0]);
-              values<0, true, false, false>(u, grad_u[1]);
+              gradients<0, true, false, false>(global_shape_gradients,
+                                               u,
+                                               grad_u[0]);
+              values<0, true, false, false>(global_shape_values, u, grad_u[1]);
 
               __syncthreads();
 
-              values<1, true, false, true>(grad_u[0], grad_u[0]);
-              gradients<1, true, false, true>(grad_u[1], grad_u[1]);
+              values<1, true, false, true>(global_shape_values,
+                                           grad_u[0],
+                                           grad_u[0]);
+              gradients<1, true, false, true>(global_shape_gradients,
+                                              grad_u[1],
+                                              grad_u[1]);
 
               break;
             }
           case 3:
             {
-              gradients<0, true, false, false>(u, grad_u[0]);
-              values<0, true, false, false>(u, grad_u[1]);
-              values<0, true, false, false>(u, grad_u[2]);
+              gradients<0, true, false, false>(global_shape_gradients,
+                                               u,
+                                               grad_u[0]);
+              values<0, true, false, false>(global_shape_values, u, grad_u[1]);
+              values<0, true, false, false>(global_shape_values, u, grad_u[2]);
+
+              __syncthreads();
+
+              values<1, true, false, true>(global_shape_values,
+                                           grad_u[0],
+                                           grad_u[0]);
+              gradients<1, true, false, true>(global_shape_gradients,
+                                              grad_u[1],
+                                              grad_u[1]);
+              values<1, true, false, true>(global_shape_values,
+                                           grad_u[2],
+                                           grad_u[2]);
+
+              __syncthreads();
+
+              values<2, true, false, true>(global_shape_values,
+                                           grad_u[0],
+                                           grad_u[0]);
+              values<2, true, false, true>(global_shape_values,
+                                           grad_u[1],
+                                           grad_u[1]);
+              gradients<2, true, false, true>(global_shape_gradients,
+                                              grad_u[2],
+                                              grad_u[2]);
+
+              break;
+            }
+          default:
+            {
+              // Do nothing. We should throw but we can't from a __device__
+              // function.
+            }
+        }
+    }
+
+
+
+    template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+    inline __device__ void
+    EvaluatorTensorProduct<
+      evaluate_general,
+      dim,
+      fe_degree,
+      n_q_points_1d,
+      Number>::value_and_gradient_at_quad_pts(Number *const u,
+                                              Number *      grad_u[dim])
+    {
+      switch (dim)
+        {
+          case 1:
+            {
+              values<0, true, false, true>(global_shape_values, u, u);
+              __syncthreads();
 
+              gradients<0, true, false, false>(global_co_shape_gradients,
+                                               u,
+                                               grad_u[0]);
+
+              break;
+            }
+          case 2:
+            {
+              values<0, true, false, true>(global_shape_values, u, u);
+              __syncthreads();
+              values<1, true, false, true>(global_shape_values, u, u);
               __syncthreads();
 
-              values<1, true, false, true>(grad_u[0], grad_u[0]);
-              gradients<1, true, false, true>(grad_u[1], grad_u[1]);
-              values<1, true, false, true>(grad_u[2], grad_u[2]);
+              gradients<0, true, false, false>(global_co_shape_gradients,
+                                               u,
+                                               grad_u[0]);
+              gradients<1, true, false, false>(global_co_shape_gradients,
+                                               u,
+                                               grad_u[1]);
 
+              break;
+            }
+          case 3:
+            {
+              values<0, true, false, true>(global_shape_values, u, u);
+              __syncthreads();
+              values<1, true, false, true>(global_shape_values, u, u);
+              __syncthreads();
+              values<2, true, false, true>(global_shape_values, u, u);
               __syncthreads();
 
-              values<2, true, false, true>(grad_u[0], grad_u[0]);
-              values<2, true, false, true>(grad_u[1], grad_u[1]);
-              gradients<2, true, false, true>(grad_u[2], grad_u[2]);
+              gradients<0, true, false, false>(global_co_shape_gradients,
+                                               u,
+                                               grad_u[0]);
+              gradients<1, true, false, false>(global_co_shape_gradients,
+                                               u,
+                                               grad_u[1]);
+              gradients<2, true, false, false>(global_co_shape_gradients,
+                                               u,
+                                               grad_u[2]);
 
               break;
             }
@@ -392,42 +497,139 @@ namespace CUDAWrappers
         {
           case 1:
             {
-              gradients<0, false, add, false>(grad_u[dim], u);
+              gradients<0, false, add, false>(global_shape_gradients,
+                                              grad_u[dim],
+                                              u);
 
               break;
             }
           case 2:
             {
-              gradients<0, false, false, true>(grad_u[0], grad_u[0]);
-              values<0, false, false, true>(grad_u[1], grad_u[1]);
+              gradients<0, false, false, true>(global_shape_gradients,
+                                               grad_u[0],
+                                               grad_u[0]);
+              values<0, false, false, true>(global_shape_values,
+                                            grad_u[1],
+                                            grad_u[1]);
 
               __syncthreads();
 
-              values<1, false, add, false>(grad_u[0], u);
+              values<1, false, add, false>(global_shape_values, grad_u[0], u);
               __syncthreads();
-              gradients<1, false, true, false>(grad_u[1], u);
+              gradients<1, false, true, false>(global_shape_gradients,
+                                               grad_u[1],
+                                               u);
 
               break;
             }
           case 3:
             {
-              gradients<0, false, false, true>(grad_u[0], grad_u[0]);
-              values<0, false, false, true>(grad_u[1], grad_u[1]);
-              values<0, false, false, true>(grad_u[2], grad_u[2]);
+              gradients<0, false, false, true>(global_shape_gradients,
+                                               grad_u[0],
+                                               grad_u[0]);
+              values<0, false, false, true>(global_shape_values,
+                                            grad_u[1],
+                                            grad_u[1]);
+              values<0, false, false, true>(global_shape_values,
+                                            grad_u[2],
+                                            grad_u[2]);
 
               __syncthreads();
 
-              values<1, false, false, true>(grad_u[0], grad_u[0]);
-              gradients<1, false, false, true>(grad_u[1], grad_u[1]);
-              values<1, false, false, true>(grad_u[2], grad_u[2]);
+              values<1, false, false, true>(global_shape_values,
+                                            grad_u[0],
+                                            grad_u[0]);
+              gradients<1, false, false, true>(global_shape_gradients,
+                                               grad_u[1],
+                                               grad_u[1]);
+              values<1, false, false, true>(global_shape_values,
+                                            grad_u[2],
+                                            grad_u[2]);
 
               __syncthreads();
 
-              values<2, false, add, false>(grad_u[0], u);
+              values<2, false, add, false>(global_shape_values, grad_u[0], u);
+              __syncthreads();
+              values<2, false, true, false>(global_shape_values, grad_u[1], u);
+              __syncthreads();
+              gradients<2, false, true, false>(global_shape_gradients,
+                                               grad_u[2],
+                                               u);
+
+              break;
+            }
+          default:
+            {
+              // Do nothing. We should throw but we can't from a __device__
+              // function.
+            }
+        }
+    }
+
+
+
+    template <int dim, int fe_degree, int n_q_points_1d, typename Number>
+    inline __device__ void
+    EvaluatorTensorProduct<evaluate_general,
+                           dim,
+                           fe_degree,
+                           n_q_points_1d,
+                           Number>::integrate_value_and_gradient(Number *u,
+                                                                 Number
+                                                                   *grad_u[dim])
+    {
+      switch (dim)
+        {
+          case 1:
+            {
+              gradients<0, false, true, false>(global_co_shape_gradients,
+                                               grad_u[0],
+                                               u);
+              __syncthreads();
+
+              values<0, false, false, true>(global_shape_values, u, u);
+
+              break;
+            }
+          case 2:
+            {
+              gradients<1, false, true, false>(global_co_shape_gradients,
+                                               grad_u[1],
+                                               u);
+              __syncthreads();
+              gradients<0, false, true, false>(global_co_shape_gradients,
+                                               grad_u[0],
+                                               u);
+              __syncthreads();
+
+              values<1, false, false, true>(global_shape_values, u, u);
+              __syncthreads();
+              values<0, false, false, true>(global_shape_values, u, u);
+              __syncthreads();
+
+              break;
+            }
+          case 3:
+            {
+              gradients<2, false, true, false>(global_co_shape_gradients,
+                                               grad_u[2],
+                                               u);
+              __syncthreads();
+              gradients<1, false, true, false>(global_co_shape_gradients,
+                                               grad_u[1],
+                                               u);
+              __syncthreads();
+              gradients<0, false, true, false>(global_co_shape_gradients,
+                                               grad_u[0],
+                                               u);
+              __syncthreads();
+
+              values<2, false, false, true>(global_shape_values, u, u);
+              __syncthreads();
+              values<1, false, false, true>(global_shape_values, u, u);
               __syncthreads();
-              values<2, false, true, false>(grad_u[1], u);
+              values<0, false, false, true>(global_shape_values, u, u);
               __syncthreads();
-              gradients<2, false, true, false>(grad_u[2], u);
 
               break;
             }

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.