]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Move cuda kernels to their own file and namespace. 7013/head
authorBruno Turcksin <bruno.turcksin@gmail.com>
Fri, 29 Jun 2018 19:06:56 +0000 (19:06 +0000)
committerBruno Turcksin <bruno.turcksin@gmail.com>
Wed, 8 Aug 2018 16:03:57 +0000 (16:03 +0000)
Move kernels from cuda_vector to their own file, remove reference to Vector, and
add a couple of new kernels.

include/deal.II/lac/cuda_kernels.h [new file with mode: 0644]
include/deal.II/lac/cuda_kernels.templates.h [new file with mode: 0644]
source/lac/CMakeLists.txt
source/lac/cuda_kernels.cu [new file with mode: 0644]
source/lac/cuda_vector.cu
tests/all-headers/CMakeLists.txt

diff --git a/include/deal.II/lac/cuda_kernels.h b/include/deal.II/lac/cuda_kernels.h
new file mode 100644 (file)
index 0000000..2e29e06
--- /dev/null
@@ -0,0 +1,355 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_cuda_kernels_h
+#define dealii_cuda_kernels_h
+
+#include <deal.II/base/config.h>
+
+#ifdef DEAL_II_WITH_CUDA
+
+
+#  include <deal.II/base/cuda_size.h>
+#  include <deal.II/base/types.h>
+
+#  include <deal.II/lac/cuda_atomic.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace LinearAlgebra
+{
+  namespace CUDAWrappers
+  {
+    /**
+     * Namespace containing the CUDA kernels.
+     */
+    namespace kernel
+    {
+      using ::dealii::CUDAWrappers::block_size;
+      using ::dealii::CUDAWrappers::chunk_size;
+      typedef types::global_dof_index size_type;
+
+      /**
+       * Multiply each entry of @p val of size @p N by @p a.
+       */
+      template <typename Number>
+      __global__ void
+      vec_scale(Number *val, const Number a, const size_type N);
+
+
+
+      /**
+       * Functor defining the addition of two Numbers.
+       */
+      struct Binop_Addition
+      {
+        template <typename Number>
+        __device__ static inline Number
+        operation(const Number a, const Number b)
+        {
+          return a + b;
+        }
+      };
+
+
+
+      /**
+       * Functor defining the subtraction of two Numbers.
+       */
+      struct Binop_Subtraction
+      {
+        template <typename Number>
+        __device__ static inline Number
+        operation(const Number a, const Number b)
+        {
+          return a - b;
+        }
+      };
+
+
+
+      /**
+       * Apply the functor @tparam Binop to each element of @p v1 and @p v2.
+       */
+      template <typename Number, typename Binop>
+      __global__ void
+      vector_bin_op(Number *v1, Number *v2, const size_type N);
+
+
+
+      /**
+       * Structure implementing the functions used to add elements when using a
+       * reduction.
+       */
+      template <typename Number>
+      struct ElemSum
+      {
+        __device__ static Number
+        reduction_op(const Number a, const Number b);
+
+        __device__ static Number
+        atomic_op(Number *dst, const Number a);
+
+        __device__ static Number
+        element_wise_op(const Number a);
+
+        __device__ static Number
+        null_value();
+      };
+
+
+
+      /**
+       * Structure implementing the functions used to compute the L1 norm when
+       * using a reduction.
+       */
+      template <typename Number>
+      struct L1Norm
+      {
+        __device__ static Number
+        reduction_op(const Number a, const Number b);
+
+        __device__ static Number
+        atomic_op(Number *dst, const Number a);
+
+        __device__ static Number
+        element_wise_op(const Number a);
+
+        __device__ static Number
+        null_value();
+      };
+
+
+
+      /**
+       * Structure implementing the functions used to compute the L-infinity
+       * norm when using a reduction.
+       */
+      template <typename Number>
+      struct LInfty
+      {
+        __device__ static Number
+        reduction_op(const Number a, const Number b);
+
+        __device__ static Number
+        atomic_op(Number *dst, const Number a);
+
+        __device__ static Number
+        element_wise_op(const Number a);
+
+        __device__ static Number
+        null_value();
+      };
+
+
+
+      /**
+       * Perform a reduction on @p v using @tparam Operation
+       */
+      template <typename Number, typename Operation>
+      __global__ void
+      reduction(Number *result, const Number *v, const size_type N);
+
+
+
+      /**
+       * Structure implementing the functions used to compute the dot product
+       * norm when using a double vector reduction.
+       */
+      template <typename Number>
+      struct DotProduct
+      {
+        __device__ static Number
+        binary_op(const Number a, const Number b);
+
+        __device__ static Number
+        reduction_op(const Number a, const Number b);
+
+        __device__ static Number
+        atomic_op(Number *dst, const Number a);
+
+        __device__ static Number
+        null_value();
+      };
+
+
+
+      /**
+       * Perform a binary operation on each element of @p v1 and @p v2 followed
+       * by reduction on the resulting array.
+       */
+      template <typename Number, typename Operation>
+      __global__ void
+      double_vector_reduction(Number *        result,
+                              const Number *  v1,
+                              const Number *  v2,
+                              const size_type N);
+
+
+
+      /**
+       * Add @p a to each element of @p val.
+       */
+      template <typename Number>
+      __global__ void
+      vec_add(Number *val, const Number a, const size_type N);
+
+
+
+      /**
+       * Addition of a multiple of a vector, i.e., <tt>val += a*V_val</tt>.
+       */
+      template <typename Number>
+      __global__ void
+      add_aV(Number *        val,
+             const Number    a,
+             const Number *  V_val,
+             const size_type N);
+
+
+
+      /**
+       * Addition of multiple scaled vector, i.e., <tt>val += a*V_val +
+       * b*W_val</tt>.
+       */
+      template <typename Number>
+      __global__ void
+      add_aVbW(Number *        val,
+               const Number    a,
+               const Number *  V_val,
+               const Number    b,
+               const Number *  W_val,
+               const size_type N);
+
+
+
+      /**
+       * Scaling and simple addition of a multiple of a vector, i.e. <tt>val =
+       * = s*val + a*V_val</tt>
+       */
+      template <typename Number>
+      __global__ void
+      sadd(const Number    s,
+           Number *        val,
+           const Number    a,
+           const Number *  V_val,
+           const size_type N);
+
+
+
+      /**
+       * Scaling and multiple additions of scaled vectors, i.e. <tt>val =
+       * = s*val + a*V_val + b*W_val</tt>
+       */
+      template <typename Number>
+      __global__ void
+      sadd(const Number    s,
+           Number *        val,
+           const Number    a,
+           const Number *  V_val,
+           const Number    b,
+           const Number *  W_val,
+           const size_type N);
+
+
+
+      /**
+       * Scale each element of this vector by the corresponding element in the
+       * argument.
+       */
+      template <typename Number>
+      __global__ void
+      scale(Number *val, const Number *V_val, const size_type N);
+
+
+
+      /**
+       * Assignment <tt>val = a*V_val</tt>.
+       */
+      template <typename Number>
+      __global__ void
+      equ(Number *val, const Number a, const Number *V_val, const size_type N);
+
+
+
+      /**
+       * Assignment <tt>val = a*V_val + b*W_val</tt>.
+       */
+      template <typename Number>
+      __global__ void
+      equ(Number *        val,
+          const Number    a,
+          const Number *  V_val,
+          const Number    b,
+          const Number *  W_val,
+          const size_type N);
+
+
+
+      /**
+       * Perform a combined operation of a vector addition and a subsequent
+       * inner product, returning the value of the inner product.
+       */
+      template <typename Number>
+      __global__ void
+      add_and_dot(Number *        res,
+                  Number *        v1,
+                  const Number *  v2,
+                  const Number *  v3,
+                  const Number    a,
+                  const size_type N);
+
+
+
+      /**
+       * Set each element of @p val to @p s.
+       */
+      template <typename Number>
+      __global__ void
+      set(Number *val, const Number s, const size_type N);
+
+
+      /**
+       * Set each element @v val to @p v using @p indices as permutation, i.e.,
+       * <tt>val[indices[i]] = v[i]</tt>.
+       */
+      template <typename Number>
+      __global__ void
+      set_permutated(Number *         val,
+                     const Number *   v,
+                     const size_type *indices,
+                     const size_type  N);
+
+
+
+      /**
+       * Add each element @v val to @p v using @p indices as permutation, i.e.,
+       * <tt>val[indices[i]] += v[i]</tt>.
+       */
+      template <typename Number>
+      __global__ void
+      add_permutated(Number *         val,
+                     const Number *   v,
+                     const size_type *indices,
+                     const size_type  N);
+    } // namespace kernel
+  }   // namespace CUDAWrappers
+} // namespace LinearAlgebra
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
+
+#endif
diff --git a/include/deal.II/lac/cuda_kernels.templates.h b/include/deal.II/lac/cuda_kernels.templates.h
new file mode 100644 (file)
index 0000000..8ae0e51
--- /dev/null
@@ -0,0 +1,575 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii__cuda_kernels_templates_h
+#define dealii__cuda_kernels_templates_h
+
+#include <deal.II/lac/cuda_kernels.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace LinearAlgebra
+{
+  namespace CUDAWrappers
+  {
+    namespace kernel
+    {
+      template <typename Number>
+      __global__ void
+      vec_scale(Number *val, const Number a, const size_type N)
+      {
+        const size_type idx_base =
+          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
+        for (unsigned int i = 0; i < chunk_size; ++i)
+          {
+            const size_type idx = idx_base + i * block_size;
+            if (idx < N)
+              val[idx] *= a;
+          }
+      }
+
+
+
+      template <typename Number, typename Binop>
+      __global__ void
+      vector_bin_op(Number *v1, Number *v2, const size_type N)
+      {
+        const size_type idx_base =
+          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
+        for (unsigned int i = 0; i < chunk_size; ++i)
+          {
+            const size_type idx = idx_base + i * block_size;
+            if (idx < N)
+              v1[idx] = Binop::operation(v1[idx], v2[idx]);
+          }
+      }
+
+
+
+      template <typename Number>
+      __device__ Number
+                 ElemSum<Number>::reduction_op(const Number a, const Number b)
+      {
+        return (a + b);
+      }
+
+
+
+      template <typename Number>
+      __device__ Number
+                 ElemSum<Number>::atomic_op(Number *dst, const Number a)
+      {
+        return atomicAdd_wrapper(dst, a);
+      }
+
+
+
+      template <typename Number>
+      __device__ Number
+                 ElemSum<Number>::element_wise_op(const Number a)
+      {
+        return a;
+      }
+
+
+
+      template <typename Number>
+      __device__ Number
+                 ElemSum<Number>::null_value()
+      {
+        return Number();
+      }
+
+
+
+      template <typename Number>
+      __device__ Number
+                 L1Norm<Number>::reduction_op(const Number a, const Number b)
+      {
+        return (a + b);
+      }
+
+
+
+      template <typename Number>
+      __device__ Number
+                 L1Norm<Number>::atomic_op(Number *dst, const Number a)
+      {
+        return atomicAdd_wrapper(dst, a);
+      }
+
+
+
+      template <typename Number>
+      __device__ Number
+                 L1Norm<Number>::element_wise_op(const Number a)
+      {
+        return std::fabs(a);
+      }
+
+
+
+      template <typename Number>
+      __device__ Number
+                 L1Norm<Number>::null_value()
+      {
+        return Number();
+      }
+
+
+
+      template <typename Number>
+      __device__ Number
+                 LInfty<Number>::reduction_op(const Number a, const Number b)
+      {
+        if (a > b)
+          return a;
+        else
+          return b;
+      }
+
+
+
+      template <typename Number>
+      __device__ Number
+                 LInfty<Number>::atomic_op(Number *dst, const Number a)
+      {
+        return atomicMax_wrapper(dst, a);
+      }
+
+
+
+      template <typename Number>
+      __device__ Number
+                 LInfty<Number>::element_wise_op(const Number a)
+      {
+        return std::fabs(a);
+      }
+
+
+
+      template <typename Number>
+      __device__ Number
+                 LInfty<Number>::null_value()
+      {
+        return Number();
+      }
+
+
+
+      template <typename Number, typename Operation>
+      __device__ void
+      reduce_within_warp(volatile Number *result_buffer, size_type local_idx)
+      {
+        if (block_size >= 64)
+          result_buffer[local_idx] =
+            Operation::reduction_op(result_buffer[local_idx],
+                                    result_buffer[local_idx + 32]);
+        if (block_size >= 32)
+          result_buffer[local_idx] =
+            Operation::reduction_op(result_buffer[local_idx],
+                                    result_buffer[local_idx + 16]);
+        if (block_size >= 16)
+          result_buffer[local_idx] =
+            Operation::reduction_op(result_buffer[local_idx],
+                                    result_buffer[local_idx + 8]);
+        if (block_size >= 8)
+          result_buffer[local_idx] =
+            Operation::reduction_op(result_buffer[local_idx],
+                                    result_buffer[local_idx + 4]);
+        if (block_size >= 4)
+          result_buffer[local_idx] =
+            Operation::reduction_op(result_buffer[local_idx],
+                                    result_buffer[local_idx + 2]);
+        if (block_size >= 2)
+          result_buffer[local_idx] =
+            Operation::reduction_op(result_buffer[local_idx],
+                                    result_buffer[local_idx + 1]);
+      }
+
+
+
+      template <typename Number, typename Operation>
+      __device__ void
+      reduce(Number *        result,
+             Number *        result_buffer,
+             const size_type local_idx,
+             const size_type global_idx,
+             const size_type N)
+      {
+        for (size_type s = block_size / 2; s > 32; s = s >> 1)
+          {
+            if (local_idx < s)
+              result_buffer[local_idx] =
+                Operation::reduction_op(result_buffer[local_idx],
+                                        result_buffer[local_idx + s]);
+            __syncthreads();
+          }
+
+        if (local_idx < 32)
+          reduce_within_warp<Number, Operation>(result_buffer, local_idx);
+
+        if (local_idx == 0)
+          Operation::atomic_op(result, result_buffer[0]);
+      }
+
+
+
+      template <typename Number, typename Operation>
+      __global__ void
+      reduction(Number *result, const Number *v, const size_type N)
+      {
+        __shared__ Number result_buffer[block_size];
+
+        const size_type global_idx =
+          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
+        const size_type local_idx = threadIdx.x;
+
+        if (global_idx < N)
+          result_buffer[local_idx] = Operation::element_wise_op(v[global_idx]);
+        else
+          result_buffer[local_idx] = Operation::null_value();
+
+        __syncthreads();
+
+        reduce<Number, Operation>(
+          result, result_buffer, local_idx, global_idx, N);
+      }
+
+
+
+      template <typename Number>
+      __device__ Number
+                 DotProduct<Number>::binary_op(const Number a, const Number b)
+      {
+        return a * b;
+      }
+
+
+
+      template <typename Number>
+      __device__ Number
+                 DotProduct<Number>::reduction_op(const Number a, const Number b)
+      {
+        return a + b;
+      }
+
+
+
+      template <typename Number>
+      __device__ Number
+                 DotProduct<Number>::atomic_op(Number *dst, const Number a)
+      {
+        return atomicAdd_wrapper(dst, a);
+      }
+
+
+
+      template <typename Number>
+      __device__ Number
+                 DotProduct<Number>::null_value()
+      {
+        return Number();
+      }
+
+
+
+      template <typename Number, typename Operation>
+      __global__ void
+      double_vector_reduction(Number *        result,
+                              const Number *  v1,
+                              const Number *  v2,
+                              const size_type N)
+      {
+        __shared__ Number result_buffer[block_size];
+
+        const size_type global_idx =
+          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
+        const size_type local_idx = threadIdx.x;
+
+        if (global_idx < N)
+          result_buffer[local_idx] =
+            Operation::binary_op(v1[global_idx], v2[global_idx]);
+        else
+          result_buffer[local_idx] = Operation::null_value();
+
+        for (unsigned int i = 1; i < chunk_size; ++i)
+          {
+            const size_type idx = global_idx + i * block_size;
+            if (idx < N)
+              result_buffer[local_idx] =
+                Operation::reduction_op(result_buffer[local_idx],
+                                        Operation::binary_op(v1[idx], v2[idx]));
+          }
+
+        __syncthreads();
+
+        reduce<Number, Operation>(
+          result, result_buffer, local_idx, global_idx, N);
+      }
+
+
+
+      template <typename Number>
+      __global__ void
+      vec_add(Number *val, const Number a, const size_type N)
+      {
+        const size_type idx_base =
+          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
+        for (unsigned int i = 0; i < chunk_size; ++i)
+          {
+            const size_type idx = idx_base + i * block_size;
+            if (idx < N)
+              val[idx] += a;
+          }
+      }
+
+
+
+      template <typename Number>
+      __global__ void
+      add_aV(Number *        val,
+             const Number    a,
+             const Number *  V_val,
+             const size_type N)
+      {
+        const size_type idx_base =
+          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
+        for (unsigned int i = 0; i < chunk_size; ++i)
+          {
+            const size_type idx = idx_base + i * block_size;
+            if (idx < N)
+              val[idx] += a * V_val[idx];
+          }
+      }
+
+
+
+      template <typename Number>
+      __global__ void
+      add_aVbW(Number *        val,
+               const Number    a,
+               const Number *  V_val,
+               const Number    b,
+               const Number *  W_val,
+               const size_type N)
+      {
+        const size_type idx_base =
+          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
+        for (unsigned int i = 0; i < chunk_size; ++i)
+          {
+            const size_type idx = idx_base + i * block_size;
+            if (idx < N)
+              val[idx] += a * V_val[idx] + b * W_val[idx];
+          }
+      }
+
+
+
+      template <typename Number>
+      __global__ void
+      sadd(const Number    s,
+           Number *        val,
+           const Number    a,
+           const Number *  V_val,
+           const size_type N)
+      {
+        const size_type idx_base =
+          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
+        for (unsigned int i = 0; i < chunk_size; ++i)
+          {
+            const size_type idx = idx_base + i * block_size;
+            if (idx < N)
+              val[idx] = s * val[idx] + a * V_val[idx];
+          }
+      }
+
+
+
+      template <typename Number>
+      __global__ void
+      sadd(const Number    s,
+           Number *        val,
+           const Number    a,
+           const Number *  V_val,
+           const Number    b,
+           const Number *  W_val,
+           const size_type N)
+      {
+        const size_type idx_base =
+          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
+        for (unsigned int i = 0; i < chunk_size; ++i)
+          {
+            const size_type idx = idx_base + i * block_size;
+            if (idx < N)
+              val[idx] = s * val[idx] + a * V_val[idx] + b * W_val[idx];
+          }
+      }
+
+
+
+      template <typename Number>
+      __global__ void
+      scale(Number *val, const Number *V_val, const size_type N)
+      {
+        const size_type idx_base =
+          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
+        for (unsigned int i = 0; i < chunk_size; ++i)
+          {
+            const size_type idx = idx_base + i * block_size;
+            if (idx < N)
+              val[idx] *= V_val[idx];
+          }
+      }
+
+
+
+      template <typename Number>
+      __global__ void
+      equ(Number *val, const Number a, const Number *V_val, const size_type N)
+      {
+        const size_type idx_base =
+          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
+        for (unsigned int i = 0; i < chunk_size; ++i)
+          {
+            const size_type idx = idx_base + i * block_size;
+            if (idx < N)
+              val[idx] = a * V_val[idx];
+          }
+      }
+
+
+
+      template <typename Number>
+      __global__ void
+      equ(Number *        val,
+          const Number    a,
+          const Number *  V_val,
+          const Number    b,
+          const Number *  W_val,
+          const size_type N)
+      {
+        const size_type idx_base =
+          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
+        for (unsigned int i = 0; i < chunk_size; ++i)
+          {
+            const size_type idx = idx_base + i * block_size;
+            if (idx < N)
+              val[idx] = a * V_val[idx] + b * W_val[idx];
+          }
+      }
+
+
+
+      template <typename Number>
+      __global__ void
+      add_and_dot(Number *        res,
+                  Number *        v1,
+                  const Number *  v2,
+                  const Number *  v3,
+                  const Number    a,
+                  const size_type N)
+      {
+        __shared__ Number res_buf[block_size];
+
+        const unsigned int global_idx =
+          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
+        const unsigned int local_idx = threadIdx.x;
+        if (global_idx < N)
+          {
+            v1[global_idx] += a * v2[global_idx];
+            res_buf[local_idx] =
+              v1[global_idx] *
+              Number(numbers::NumberTraits<Number>::conjugate(v3[global_idx]));
+          }
+        else
+          res_buf[local_idx] = 0.;
+
+        for (unsigned int i = 1; i < block_size; ++i)
+          {
+            const unsigned int idx = global_idx + i * block_size;
+            if (idx < N)
+              {
+                v1[idx] += a * v2[idx];
+                res_buf[local_idx] += v1[idx] * v3[idx];
+              }
+          }
+
+        __syncthreads();
+
+        reduce<Number, DotProduct<Number>>(
+          res, res_buf, local_idx, global_idx, N);
+      }
+
+
+
+      template <typename Number>
+      __global__ void
+      set(Number *val, const Number s, const size_type N)
+      {
+        const size_type idx_base =
+          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
+        for (unsigned int i = 0; i < chunk_size; ++i)
+          {
+            const size_type idx = idx_base + i * block_size;
+            if (idx < N)
+              val[idx] = s;
+          }
+      }
+
+
+
+      template <typename Number>
+      __global__ void
+      set_permutated(Number *         val,
+                     const Number *   v,
+                     const size_type *indices,
+                     const size_type  N)
+      {
+        const size_type idx_base =
+          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
+        for (unsigned int i = 0; i < chunk_size; ++i)
+          {
+            const size_type idx = idx_base + i * block_size;
+            if (idx < N)
+              val[indices[idx]] = v[idx];
+          }
+      }
+
+
+
+      template <typename Number>
+      __global__ void
+      add_permutated(Number *         val,
+                     const Number *   v,
+                     const size_type *indices,
+                     const size_type  N)
+      {
+        const size_type idx_base =
+          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
+        for (unsigned int i = 0; i < chunk_size; ++i)
+          {
+            const size_type idx = idx_base + i * block_size;
+            if (idx < N)
+              val[indices[idx]] += v[idx];
+          }
+      }
+    } // namespace kernel
+  }   // namespace CUDAWrappers
+} // namespace LinearAlgebra
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
index e98cd54e7cd4c6bfc85ba94aa64c2d1503ead32e..0abdfcba5d7ad7479752680581bf0899fe0dfbac 100644 (file)
@@ -146,6 +146,7 @@ ENDIF()
 IF(DEAL_II_WITH_CUDA)
   SET(_separate_src
     ${_separate_src}
+    cuda_kernels.cu
     cuda_solver_direct.cu
     cuda_sparse_matrix.cu
     cuda_vector.cu
diff --git a/source/lac/cuda_kernels.cu b/source/lac/cuda_kernels.cu
new file mode 100644 (file)
index 0000000..1ec88bd
--- /dev/null
@@ -0,0 +1,221 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/lac/cuda_kernels.templates.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace LinearAlgebra
+{
+  namespace CUDAWrappers
+  {
+    namespace kernel
+    {
+      /////////////////////////////////////////////////////////////////////////
+      // Explicit instantiation                                              //
+      /////////////////////////////////////////////////////////////////////////
+
+      template __global__ void
+      vec_scale<float>(float *, const float a, const size_type);
+      template __global__ void
+      vector_bin_op<float, Binop_Addition>(float *         v1,
+                                           float *         v2,
+                                           const size_type N);
+      template __global__ void
+      vector_bin_op<float, Binop_Subtraction>(float *         v1,
+                                              float *         v2,
+                                              const size_type N);
+      template struct ElemSum<float>;
+      template struct L1Norm<float>;
+      template struct LInfty<float>;
+      template __global__ void
+      reduction<float, ElemSum<float>>(float *         result,
+                                       const float *   v,
+                                       const size_type N);
+      template __global__ void
+      reduction<float, L1Norm<float>>(float *         result,
+                                      const float *   v,
+                                      const size_type N);
+      template __global__ void
+      reduction<float, LInfty<float>>(float *         result,
+                                      const float *   v,
+                                      const size_type N);
+      template struct DotProduct<float>;
+      template __global__ void
+      double_vector_reduction<float, DotProduct<float>>(float *         result,
+                                                        const float *   v1,
+                                                        const float *   v2,
+                                                        const size_type N);
+      template __global__ void
+      vec_add<float>(float *val, const float, const size_type N);
+      template __global__ void
+      add_aV<float>(float *         val,
+                    const float     a,
+                    const float *   V_val,
+                    const size_type N);
+      template __global__ void
+      add_aVbW<float>(float *         val,
+                      const float     a,
+                      const float *   V_val,
+                      const float     b,
+                      const float *   W_val,
+                      const size_type N);
+      template __global__ void
+      sadd<float>(const float     s,
+                  float *         val,
+                  const float     a,
+                  const float *   V_val,
+                  const size_type N);
+      template __global__ void
+      sadd<float>(const float     s,
+                  float *         val,
+                  const float     a,
+                  const float *   V_val,
+                  const float     b,
+                  const float *   W_val,
+                  const size_type N);
+      template __global__ void
+      scale<float>(float *val, const float *V_val, const size_type N);
+      template __global__ void
+      equ<float>(float *         val,
+                 const float     a,
+                 const float *   V_val,
+                 const size_type N);
+      template __global__ void
+      equ(float *         val,
+          const float     a,
+          const float *   V_val,
+          const float     b,
+          const float *   W_val,
+          const size_type N);
+      template __global__ void
+      add_and_dot<float>(float *         res,
+                         float *         v1,
+                         const float *   v2,
+                         const float *   v3,
+                         const float     a,
+                         const size_type N);
+      template __global__ void
+      set<float>(float *val, const float s, const size_type N);
+      template __global__ void
+      set_permutated<float>(float *          val,
+                            const float *    v,
+                            const size_type *indices,
+                            const size_type  N);
+      template __global__ void
+      add_permutated<float>(float *          val,
+                            const float *    v,
+                            const size_type *indices,
+                            const size_type  N);
+
+
+
+      template __global__ void
+      vec_scale<double>(double *, const double a, const size_type);
+      template __global__ void
+      vector_bin_op<double, Binop_Addition>(double *        v1,
+                                            double *        v2,
+                                            const size_type N);
+      template __global__ void
+      vector_bin_op<double, Binop_Subtraction>(double *        v1,
+                                               double *        v2,
+                                               const size_type N);
+      template struct ElemSum<double>;
+      template struct L1Norm<double>;
+      template struct LInfty<double>;
+      template __global__ void
+      reduction<double, ElemSum<double>>(double *        result,
+                                         const double *  v,
+                                         const size_type N);
+      template __global__ void
+      reduction<double, L1Norm<double>>(double *        result,
+                                        const double *  v,
+                                        const size_type N);
+      template __global__ void
+      reduction<double, LInfty<double>>(double *        result,
+                                        const double *  v,
+                                        const size_type N);
+      template struct DotProduct<double>;
+      template __global__ void
+      double_vector_reduction<double, DotProduct<double>>(double *      result,
+                                                          const double *v1,
+                                                          const double *v2,
+                                                          const size_type N);
+      template __global__ void
+      vec_add<double>(double *val, const double, const size_type N);
+      template __global__ void
+      add_aV<double>(double *        val,
+                     const double    a,
+                     const double *  V_val,
+                     const size_type N);
+      template __global__ void
+      add_aVbW<double>(double *        val,
+                       const double    a,
+                       const double *  V_val,
+                       const double    b,
+                       const double *  W_val,
+                       const size_type N);
+      template __global__ void
+      sadd<double>(const double    s,
+                   double *        val,
+                   const double    a,
+                   const double *  V_val,
+                   const size_type N);
+      template __global__ void
+      sadd<double>(const double    s,
+                   double *        val,
+                   const double    a,
+                   const double *  V_val,
+                   const double    b,
+                   const double *  W_val,
+                   const size_type N);
+      template __global__ void
+      scale<double>(double *val, const double *V_val, const size_type N);
+      template __global__ void
+      equ<double>(double *        val,
+                  const double    a,
+                  const double *  V_val,
+                  const size_type N);
+      template __global__ void
+      equ(double *        val,
+          const double    a,
+          const double *  V_val,
+          const double    b,
+          const double *  W_val,
+          const size_type N);
+      template __global__ void
+      add_and_dot<double>(double *        res,
+                          double *        v1,
+                          const double *  v2,
+                          const double *  v3,
+                          const double    a,
+                          const size_type N);
+      template __global__ void
+      set<double>(double *val, const double s, const size_type N);
+      template __global__ void
+      set_permutated<double>(double *         val,
+                             const double *   v,
+                             const size_type *indices,
+                             const size_type  N);
+      template __global__ void
+      add_permutated<double>(double *         val,
+                             const double *   v,
+                             const size_type *indices,
+                             const size_type  N);
+    } // namespace kernel
+  }   // namespace CUDAWrappers
+} // namespace LinearAlgebra
+
+DEAL_II_NAMESPACE_CLOSE
index 725dc483c3edd66019208f6330186f252f2e426d..c94d1b011e6608d2510d699c27a27792fed3415d 100644 (file)
@@ -17,6 +17,7 @@
 #include <deal.II/base/exceptions.h>
 
 #include <deal.II/lac/cuda_atomic.h>
+#include <deal.II/lac/cuda_kernels.h>
 #include <deal.II/lac/cuda_vector.h>
 #include <deal.II/lac/read_write_vector.h>
 
@@ -32,478 +33,6 @@ namespace LinearAlgebra
   {
     using ::dealii::CUDAWrappers::block_size;
     using ::dealii::CUDAWrappers::chunk_size;
-    namespace internal
-    {
-      template <typename Number>
-      __global__ void
-      vec_scale(Number *                                 val,
-                const Number                             a,
-                const typename Vector<Number>::size_type N)
-      {
-        const typename Vector<Number>::size_type idx_base =
-          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
-        for (unsigned int i = 0; i < chunk_size; ++i)
-          {
-            const typename Vector<Number>::size_type idx =
-              idx_base + i * block_size;
-            if (idx < N)
-              val[idx] *= a;
-          }
-      }
-
-
-
-      struct Binop_Addition
-      {
-        template <typename Number>
-        __device__ static inline Number
-        operation(const Number a, const Number b)
-        {
-          return a + b;
-        }
-      };
-
-
-
-      struct Binop_Subtraction
-      {
-        template <typename Number>
-        __device__ static inline Number
-        operation(const Number a, const Number b)
-        {
-          return a - b;
-        }
-      };
-
-
-
-      template <typename Number, typename Binop>
-      __global__ void
-      vector_bin_op(Number *                                 v1,
-                    Number *                                 v2,
-                    const typename Vector<Number>::size_type N)
-      {
-        const typename Vector<Number>::size_type idx_base =
-          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
-        for (unsigned int i = 0; i < chunk_size; ++i)
-          {
-            const typename Vector<Number>::size_type idx =
-              idx_base + i * block_size;
-            if (idx < N)
-              v1[idx] = Binop::operation(v1[idx], v2[idx]);
-          }
-      }
-
-
-
-      template <typename Number>
-      struct ElemSum
-      {
-        __device__ static Number
-        reduction_op(const Number a, const Number b)
-        {
-          return (a + b);
-        }
-
-        __device__ static Number
-        atomic_op(Number *dst, const Number a)
-        {
-          return atomicAdd_wrapper(dst, a);
-        }
-
-        __device__ static Number
-        element_wise_op(const Number a)
-        {
-          return a;
-        }
-
-        __device__ static Number
-        null_value()
-        {
-          return Number();
-        }
-      };
-
-
-
-      template <typename Number>
-      struct L1Norm
-      {
-        __device__ static Number
-        reduction_op(const Number a, const Number b)
-        {
-          return (a + b);
-        }
-
-        __device__ static Number
-        atomic_op(Number *dst, const Number a)
-        {
-          return atomicAdd_wrapper(dst, a);
-        }
-
-        __device__ static Number
-        element_wise_op(const Number a)
-        {
-          return std::fabs(a);
-        }
-
-        __device__ static Number
-        null_value()
-        {
-          return Number();
-        }
-      };
-
-
-
-      template <typename Number>
-      struct LInfty
-      {
-        __device__ static Number
-        reduction_op(const Number a, const Number b)
-        {
-          if (a > b)
-            return a;
-          else
-            return b;
-        }
-
-        __device__ static Number
-        atomic_op(Number *dst, const Number a)
-        {
-          return atomicMax_wrapper(dst, a);
-        }
-
-        __device__ static Number
-        element_wise_op(const Number a)
-        {
-          return std::fabs(a);
-        }
-
-        __device__ static Number
-        null_value()
-        {
-          return Number();
-        }
-      };
-
-
-
-      template <typename Number, typename Operation>
-      __device__ void
-      reduce_within_warp(volatile Number *                  result_buffer,
-                         typename Vector<Number>::size_type local_idx)
-      {
-        if (block_size >= 64)
-          result_buffer[local_idx] =
-            Operation::reduction_op(result_buffer[local_idx],
-                                    result_buffer[local_idx + 32]);
-        if (block_size >= 32)
-          result_buffer[local_idx] =
-            Operation::reduction_op(result_buffer[local_idx],
-                                    result_buffer[local_idx + 16]);
-        if (block_size >= 16)
-          result_buffer[local_idx] =
-            Operation::reduction_op(result_buffer[local_idx],
-                                    result_buffer[local_idx + 8]);
-        if (block_size >= 8)
-          result_buffer[local_idx] =
-            Operation::reduction_op(result_buffer[local_idx],
-                                    result_buffer[local_idx + 4]);
-        if (block_size >= 4)
-          result_buffer[local_idx] =
-            Operation::reduction_op(result_buffer[local_idx],
-                                    result_buffer[local_idx + 2]);
-        if (block_size >= 2)
-          result_buffer[local_idx] =
-            Operation::reduction_op(result_buffer[local_idx],
-                                    result_buffer[local_idx + 1]);
-      }
-
-
-
-      template <typename Number, typename Operation>
-      __device__ void
-      reduce(Number *                                 result,
-             Number *                                 result_buffer,
-             const typename Vector<Number>::size_type local_idx,
-             const typename Vector<Number>::size_type global_idx,
-             const typename Vector<Number>::size_type N)
-      {
-        for (typename Vector<Number>::size_type s = block_size / 2; s > 32;
-             s                                    = s >> 1)
-          {
-            if (local_idx < s)
-              result_buffer[local_idx] =
-                Operation::reduction_op(result_buffer[local_idx],
-                                        result_buffer[local_idx + s]);
-            __syncthreads();
-          }
-
-        if (local_idx < 32)
-          reduce_within_warp<Number, Operation>(result_buffer, local_idx);
-
-        if (local_idx == 0)
-          Operation::atomic_op(result, result_buffer[0]);
-      }
-
-
-
-      template <typename Number, typename Operation>
-      __global__ void
-      reduction(Number *                                 result,
-                const Number *                           v,
-                const typename Vector<Number>::size_type N)
-      {
-        __shared__ Number result_buffer[block_size];
-
-        const typename Vector<Number>::size_type global_idx =
-          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
-        const typename Vector<Number>::size_type local_idx = threadIdx.x;
-
-        if (global_idx < N)
-          result_buffer[local_idx] = Operation::element_wise_op(v[global_idx]);
-        else
-          result_buffer[local_idx] = Operation::null_value();
-
-        __syncthreads();
-
-        reduce<Number, Operation>(
-          result, result_buffer, local_idx, global_idx, N);
-      }
-
-
-
-      template <typename Number>
-      struct DotProduct
-      {
-        __device__ static Number
-        binary_op(const Number a, const Number b)
-        {
-          return a * b;
-        }
-
-        __device__ static Number
-        reduction_op(const Number a, const Number b)
-        {
-          return a + b;
-        }
-
-        __device__ static Number
-        atomic_op(Number *dst, const Number a)
-        {
-          return atomicAdd_wrapper(dst, a);
-        }
-
-        __device__ static Number
-        null_value()
-        {
-          return Number();
-        }
-      };
-
-
-
-      template <typename Number, typename Operation>
-      __global__ void
-      double_vector_reduction(Number *                                 result,
-                              Number *                                 v1,
-                              Number *                                 v2,
-                              const typename Vector<Number>::size_type N)
-      {
-        __shared__ Number result_buffer[block_size];
-
-        const typename Vector<Number>::size_type global_idx =
-          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
-        const typename Vector<Number>::size_type local_idx = threadIdx.x;
-
-        if (global_idx < N)
-          result_buffer[local_idx] =
-            Operation::binary_op(v1[global_idx], v2[global_idx]);
-        else
-          result_buffer[local_idx] = Operation::null_value();
-
-        for (unsigned int i = 1; i < chunk_size; ++i)
-          {
-            const typename Vector<Number>::size_type idx =
-              global_idx + i * block_size;
-            if (idx < N)
-              result_buffer[local_idx] =
-                Operation::reduction_op(result_buffer[local_idx],
-                                        Operation::binary_op(v1[idx], v2[idx]));
-          }
-
-        __syncthreads();
-
-        reduce<Number, Operation>(
-          result, result_buffer, local_idx, global_idx, N);
-      }
-
-
-
-      template <typename Number>
-      __global__ void
-      vec_add(Number *                                 val,
-              const Number                             a,
-              const typename Vector<Number>::size_type N)
-      {
-        const typename Vector<Number>::size_type idx_base =
-          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
-        for (unsigned int i = 0; i < chunk_size; ++i)
-          {
-            const typename Vector<Number>::size_type idx =
-              idx_base + i * block_size;
-            if (idx < N)
-              val[idx] += a;
-          }
-      }
-
-
-
-      template <typename Number>
-      __global__ void
-      add_aV(Number *                                 val,
-             const Number                             a,
-             Number *                                 V_val,
-             const typename Vector<Number>::size_type N)
-      {
-        const typename Vector<Number>::size_type idx_base =
-          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
-        for (unsigned int i = 0; i < chunk_size; ++i)
-          {
-            const typename Vector<Number>::size_type idx =
-              idx_base + i * block_size;
-            if (idx < N)
-              val[idx] += a * V_val[idx];
-          }
-      }
-
-
-
-      template <typename Number>
-      __global__ void
-      add_aVbW(Number *                                 val,
-               const Number                             a,
-               Number *                                 V_val,
-               const Number                             b,
-               Number *                                 W_val,
-               const typename Vector<Number>::size_type N)
-      {
-        const typename Vector<Number>::size_type idx_base =
-          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
-        for (unsigned int i = 0; i < chunk_size; ++i)
-          {
-            const typename Vector<Number>::size_type idx =
-              idx_base + i * block_size;
-            if (idx < N)
-              val[idx] += a * V_val[idx] + b * W_val[idx];
-          }
-      }
-
-
-
-      template <typename Number>
-      __global__ void
-      sadd(const Number                             s,
-           Number *                                 val,
-           const Number                             a,
-           const Number *                           V_val,
-           const typename Vector<Number>::size_type N)
-      {
-        const typename Vector<Number>::size_type idx_base =
-          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
-        for (unsigned int i = 0; i < chunk_size; ++i)
-          {
-            const typename Vector<Number>::size_type idx =
-              idx_base + i * block_size;
-            if (idx < N)
-              val[idx] = s * val[idx] + a * V_val[idx];
-          }
-      }
-
-
-
-      template <typename Number>
-      __global__ void
-      scale(Number *                                 val,
-            const Number *                           V_val,
-            const typename Vector<Number>::size_type N)
-      {
-        const typename Vector<Number>::size_type idx_base =
-          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
-        for (unsigned int i = 0; i < chunk_size; ++i)
-          {
-            const typename Vector<Number>::size_type idx =
-              idx_base + i * block_size;
-            if (idx < N)
-              val[idx] *= V_val[idx];
-          }
-      }
-
-
-
-      template <typename Number>
-      __global__ void
-      equ(Number *                                 val,
-          const Number                             a,
-          const Number *                           V_val,
-          const typename Vector<Number>::size_type N)
-      {
-        const typename Vector<Number>::size_type idx_base =
-          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
-        for (unsigned int i = 0; i < chunk_size; ++i)
-          {
-            const typename Vector<Number>::size_type idx =
-              idx_base + i * block_size;
-            if (idx < N)
-              val[idx] = a * V_val[idx];
-          }
-      }
-
-
-
-      template <typename Number>
-      __global__ void
-      add_and_dot(Number *                                 res,
-                  Number *                                 v1,
-                  const Number *                           v2,
-                  const Number *                           v3,
-                  const Number                             a,
-                  const typename Vector<Number>::size_type N)
-      {
-        __shared__ Number res_buf[block_size];
-
-        const unsigned int global_idx =
-          threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
-        const unsigned int local_idx = threadIdx.x;
-        if (global_idx < N)
-          {
-            v1[global_idx] += a * v2[global_idx];
-            res_buf[local_idx] =
-              v1[global_idx] *
-              Number(numbers::NumberTraits<Number>::conjugate(v3[global_idx]));
-          }
-        else
-          res_buf[local_idx] = 0.;
-
-        for (unsigned int i = 1; i < block_size; ++i)
-          {
-            const unsigned int idx = global_idx + i * block_size;
-            if (idx < N)
-              {
-                v1[idx] += a * v2[idx];
-                res_buf[local_idx] += v1[idx] * v3[idx];
-              }
-          }
-
-        __syncthreads();
-
-        reduce<Number, DotProduct<Number>>(
-          res, res_buf, local_idx, global_idx, N);
-      }
-    } // namespace internal
-
-
 
     template <typename Number>
     Vector<Number>::Vector()
@@ -633,7 +162,7 @@ namespace LinearAlgebra
           // Add the two vectors
           const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
 
-          internal::vector_bin_op<Number, internal::Binop_Addition>
+          kernel::vector_bin_op<Number, kernel::Binop_Addition>
             <<<n_blocks, block_size>>>(val, tmp, n_elements);
           // Check that the kernel was launched correctly
           AssertCuda(cudaGetLastError());
@@ -671,7 +200,7 @@ namespace LinearAlgebra
     {
       AssertIsFinite(factor);
       const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
-      internal::vec_scale<Number>
+      kernel::vec_scale<Number>
         <<<n_blocks, block_size>>>(val, factor, n_elements);
 
       // Check that the kernel was launched correctly
@@ -691,7 +220,7 @@ namespace LinearAlgebra
       AssertIsFinite(factor);
       Assert(factor != Number(0.), ExcZero());
       const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
-      internal::vec_scale<Number>
+      kernel::vec_scale<Number>
         <<<n_blocks, block_size>>>(val, 1. / factor, n_elements);
 
       // Check that the kernel was launched correctly
@@ -720,7 +249,7 @@ namespace LinearAlgebra
 
       const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
 
-      internal::vector_bin_op<Number, internal::Binop_Addition>
+      kernel::vector_bin_op<Number, kernel::Binop_Addition>
         <<<n_blocks, block_size>>>(val, down_V.val, n_elements);
 
       // Check that the kernel was launched correctly
@@ -749,7 +278,7 @@ namespace LinearAlgebra
 
       const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
 
-      internal::vector_bin_op<Number, internal::Binop_Subtraction>
+      kernel::vector_bin_op<Number, kernel::Binop_Subtraction>
         <<<n_blocks, block_size>>>(val, down_V.val, n_elements);
 
       // Check that the kernel was launched correctly
@@ -782,7 +311,7 @@ namespace LinearAlgebra
       error_code = cudaMemset(result_device, Number(), sizeof(Number));
 
       const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
-      internal::double_vector_reduction<Number, internal::DotProduct<Number>>
+      kernel::double_vector_reduction<Number, kernel::DotProduct<Number>>
         <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
                                                   val,
                                                   down_V.val,
@@ -811,7 +340,7 @@ namespace LinearAlgebra
     {
       AssertIsFinite(a);
       const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
-      internal::vec_add<Number><<<n_blocks, block_size>>>(val, a, n_elements);
+      kernel::vec_add<Number><<<n_blocks, block_size>>>(val, a, n_elements);
 
       // Check that the kernel was launched correctly
       AssertCuda(cudaGetLastError());
@@ -838,7 +367,7 @@ namespace LinearAlgebra
                "Cannot add two vectors with different numbers of elements."));
 
       const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
-      internal::add_aV<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
+      kernel::add_aV<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
         val, a, down_V.val, n_elements);
 
       // Check that the kernel was launched correctly
@@ -880,7 +409,7 @@ namespace LinearAlgebra
                "Cannot add two vectors with different numbers of elements."));
 
       const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
-      internal::add_aVbW<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
+      kernel::add_aVbW<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
         val, a, down_V.val, b, down_W.val, n_elements);
 
       // Check that the kernel was launched correctly
@@ -911,7 +440,7 @@ namespace LinearAlgebra
                "Cannot add two vectors with different numbers of elements."));
 
       const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
-      internal::sadd<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
+      kernel::sadd<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
         s, val, a, down_V.val, n_elements);
 
       // Check that the kernel was launched correctly
@@ -938,7 +467,7 @@ namespace LinearAlgebra
                "Cannot scale two vectors with different numbers of elements."));
 
       const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
-      internal::scale<Number>
+      kernel::scale<Number>
         <<<dim3(n_blocks, 1), dim3(block_size)>>>(val,
                                                   down_scaling_factors.val,
                                                   n_elements);
@@ -969,8 +498,10 @@ namespace LinearAlgebra
           "Cannot assign two vectors with different numbers of elements."));
 
       const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
-      internal::equ<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
-        val, a, down_V.val, n_elements);
+      kernel::equ<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(val,
+                                                                   a,
+                                                                   down_V.val,
+                                                                   n_elements);
 
       // Check that the kernel was launched correctly
       AssertCuda(cudaGetLastError());
@@ -999,7 +530,7 @@ namespace LinearAlgebra
       error_code = cudaMemset(result_device, Number(), sizeof(Number));
 
       const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
-      internal::reduction<Number, internal::ElemSum<Number>>
+      kernel::reduction<Number, kernel::ElemSum<Number>>
         <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
                                                   val,
                                                   n_elements);
@@ -1031,7 +562,7 @@ namespace LinearAlgebra
       error_code = cudaMemset(result_device, Number(), sizeof(Number));
 
       const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
-      internal::reduction<Number, internal::L1Norm<Number>>
+      kernel::reduction<Number, kernel::L1Norm<Number>>
         <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
                                                   val,
                                                   n_elements);
@@ -1071,7 +602,7 @@ namespace LinearAlgebra
       error_code = cudaMemset(result_device, Number(), sizeof(Number));
 
       const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
-      internal::reduction<Number, internal::LInfty<Number>>
+      kernel::reduction<Number, kernel::LInfty<Number>>
         <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
                                                   val,
                                                   n_elements);
@@ -1121,7 +652,7 @@ namespace LinearAlgebra
       AssertCuda(error_code);
 
       const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
-      internal::add_and_dot<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
+      kernel::add_and_dot<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
         res_d, val, down_V.val, down_W.val, a, n_elements);
 
       Number res;
index 1d84c7f4c5dc530eab320960acad994660f28c54..9cefd6824b84256afcfbf5a0b179d0d1f51370ee 100644 (file)
@@ -50,6 +50,8 @@ FILE(GLOB_RECURSE _headers RELATIVE ${_include_dir}/deal.II
 # them from the list
 #
 LIST(REMOVE_ITEM _headers "lac/cuda_atomic.h"
+  "lac/cuda_kernels.h"
+  "lac/cuda_kernels.templates.h"
   "matrix_free/cuda_fe_evaluation.h"
   "matrix_free/cuda_matrix_free.templates.h"
   "matrix_free/cuda_tensor_product_kernels.h")

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.