]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Add CUDAWrappers::Vector.
authorBruno Turcksin <bruno.turcksin@gmail.com>
Fri, 13 May 2016 17:31:03 +0000 (13:31 -0400)
committerBruno Turcksin <bruno.turcksin@gmail.com>
Fri, 26 Aug 2016 20:45:33 +0000 (16:45 -0400)
contrib/utilities/indent
include/deal.II/base/exceptions.h
include/deal.II/lac/cuda_vector.h [new file with mode: 0644]
include/deal.II/lac/read_write_vector.h
include/deal.II/lac/read_write_vector.templates.h
source/lac/CMakeLists.txt
source/lac/cuda_vector.cu [new file with mode: 0644]

index 84809134e099562ca86def809e217f15bc998d27..d31995750fec3a9d92acc1feb6433f09fc29afae 100755 (executable)
@@ -42,7 +42,7 @@ fi
 
 # collect all header and source files and process them in batches of 50 files
 # with up to 10 in parallel
-find tests include source examples \( -name '*.cc' -o -name '*.h' \) -print | xargs -n 50 -P 10 astyle --options=contrib/styles/astyle.rc
+find tests include source examples \( -name '*.cc' -o -name '*.h' -o -name '*.cu' -o -name '*.cuh' \) -print | xargs -n 50 -P 10 astyle --options=contrib/styles/astyle.rc
 
 # format .inst.in files. We need to replace \{ and \} because it confuses
 # astyle.
index 63b338f265d07caf3f0a58e01054a71266a3c7e1..5eca2b25d87922da0f7039d3dda66f6363f31ed2 100644 (file)
@@ -1134,6 +1134,25 @@ namespace StandardExceptions
 #define AssertIsFinite(number) Assert(dealii::numbers::is_finite(number), \
                                       dealii::ExcNumberNotFinite(std::complex<double>(number)))
 
+#ifdef DEAL_II_WITH_CUDA
+/**
+ * An assertion that checks that the error code produced by calling a CUDA
+ * routine is equal to cudaSuccess.
+ *
+ * @ingroup Exceptions
+ * @author Bruno Turcksin, 2016
+ */
+#define CudaAssert(error_code)                                                \
+  {                                                                           \
+    if (error_code != cudaSuccess)                                            \
+      {                                                                       \
+        fprintf(stderr,"Error in %s (%d): %s\n",__FILE__,                     \
+                __LINE__,cudaGetErrorString(error_code));                     \
+        exit(1);                                                              \
+      }                                                                       \
+  }
+#endif
+
 using namespace StandardExceptions;
 
 DEAL_II_NAMESPACE_CLOSE
diff --git a/include/deal.II/lac/cuda_vector.h b/include/deal.II/lac/cuda_vector.h
new file mode 100644 (file)
index 0000000..8a925f6
--- /dev/null
@@ -0,0 +1,277 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii__cuda_vector_h
+#define dealii__cuda_vector_h
+
+#include <deal.II/base/config.h>
+#include <deal.II/lac/vector_space_vector.h>
+
+
+#ifdef DEAL_II_WITH_CUDA
+
+DEAL_II_NAMESPACE_OPEN
+
+class CommunicationPatternBase;
+class IndexSet;
+template <typename Number> class ReadWriteVector;
+
+namespace LinearAlgebra
+{
+  namespace CUDAWrappers
+  {
+    /**
+     * This class implements a vector using CUDA for use on Nvidia GPUs. This
+     * class is derived from the LinearAlgebra::VectorSpaceVector class.
+     *
+     * @ingroup CUDAWrappers
+     * @ingroup Vectors
+     * @author Karl Ljungkvist, Bruno Turcksin, 2016
+     */
+    template <typename Number>
+    class Vector: public VectorSpaceVector<Number>
+    {
+    public:
+      typedef typename VectorSpaceVector<Number>::size_type  size_type;
+      typedef typename VectorSpaceVector<Number>::real_type  real_type;
+
+      /**
+       * Constructor. Create a vector of dimension zero.
+       */
+      Vector();
+
+      /**
+       * Copy constructor.
+       */
+      Vector(const Vector<Number> &V);
+
+      /**
+       * Constructor. Set dimension to @p n and initialize all elements with
+       * zero.
+       *
+       * The constructor is made explicit to avoid accident like this:
+       * <tt>v=0;</tt>. Presumably, the user wants to set every elements of
+       * the vector to zero, but instead, what happens is this call:
+       * <tt>v=Vector@<Number@>(0);</tt>, i.e. the vector is replaced by one
+       * of length zero.
+       */
+      explicit Vector(const size_type n);
+
+      /**
+       * Destructor.
+       */
+      ~Vector();
+
+      /**
+       * Reinit functionality. The flag <tt>omit_zeroing_entries</tt>
+       * determines wheter the vector should be filled with zero (false) or
+       * left untouched (true).
+       */
+      void reinit(const size_type n,
+                  const bool      omit_zeroing_entries = false);
+
+      /**
+       * Import all the element from the input vector @p V.
+       * VectorOperation::values @p operation is used to decide if the
+       * elements int @p V should be added to the current vector or replace
+       * the current elements. The last parameter is not used. It is only used
+       * for distributed vectors. This is the function that should be used to
+       * copy a vector to the GPU.
+       */
+      virtual void import(const ReadWriteVector<Number> &V,
+                          VectorOperation::values operation,
+                          std_cxx11::shared_ptr<const CommunicationPatternBase> communication_pattern =
+                            std_cxx11::shared_ptr<const CommunicationPatternBase> ()) override;
+
+      /**
+       * Multiply the entive vector by a fixed factor.
+       */
+      virtual Vector<Number> &operator*= (const Number factor) override;
+
+      /**
+       * Divide the entire vector by a fixed factor.
+       */
+      virtual Vector<Number> &operator/= (const Number factor) override;
+
+      /**
+       * Add the vector @p V to the present one.
+       */
+      virtual Vector<Number> &operator+= (const VectorSpaceVector<Number> &V) override;
+
+      /**
+       * Subtract the vector @p V from the present one.
+       */
+      virtual Vector<Number> &operator-= (const VectorSpaceVector<Number> &V) override;
+
+      /**
+       * Return the scalar product of two vectors.
+       */
+      virtual Number operator* (const VectorSpaceVector<Number> &V) const override;
+
+      /**
+       * Add @p to all components. Note that @p a is a scalar not a vector.
+       */
+      virtual void add(const Number a) override;
+
+      /**
+       * Simple addition of a multiple of a vector, i.e. <tt>*this += a*V</tt>.
+       */
+      virtual void add(const Number a, const VectorSpaceVector<Number> &V) override;
+
+      /**
+       * Multiple addition of scaled vectors, i.e. <tt>*this += a*V</tt>.
+       */
+      virtual void add(const Number a, const VectorSpaceVector<Number> &V,
+                       const Number b, const VectorSpaceVector<Number> &W) override;
+
+      /**
+       * Scaling and simple addition of a multiple of a vector, i.e. <tt>*this
+       * = s*(*this)+a*V</tt>
+       */
+      virtual void sadd(const Number s, const Number a,
+                        const VectorSpaceVector<Number> &V) override;
+
+      /**
+       * Scale each element of this vector by the corresponding element in the
+       * argument. This function is mostly meant to simulate multiplication
+       * (and immediate re-assignment) by a diagonal scaling matrix.
+       */
+      virtual void scale(const VectorSpaceVector<Number> &scaling_factors) override;
+
+      /**
+       * Assignement <tt>*this = a*V</tt>.
+       */
+      virtual void equ(const Number a, const VectorSpaceVector<Number> &V) override;
+
+      /**
+       * Return the l<sub>1</sub> norm of the vector (i.e., the sum of the
+       * absolute values of all entries among all processors).
+       */
+      virtual real_type l1_norm() const override;
+
+      /**
+       * Return the l<sub>2</sub> norm of the vector (i.e., the square root of
+       * the sum of the square of all entries among all processors).
+       */
+      virtual real_type l2_norm() const override;
+
+      /**
+       * Return the maximum norm of the vector (i.e., the maximum absolute
+       * value among all entries and among all processors).
+       */
+      virtual real_type linfty_norm() const override;
+
+      /**
+       * Perform a combined operation of a vector addition and a subsequent
+       * inner product, returning the value of the inner product. In other
+       * words, the result of this function is the same as if the user called
+       * @code
+       * this->add(a, V);
+       * return_value = *this * W;
+       * @endcode
+       *
+       * The reason this function exists is that this operation involves less
+       * memory transfer than calling the two functions separately. This
+       * method only needs to load three vectors, @p this, @p V, @p W, whereas
+       * calling separate methods means to load the calling vector @p this
+       * twice. Since most vector operations are memory transfer limited, this
+       * reduces the time by 25\% (or 50\% if @p W equals @p this).
+       */
+      virtual Number add_and_dot(const Number                     a,
+                                 const VectorSpaceVector<Number> &V,
+                                 const VectorSpaceVector<Number> &W) override;
+
+      /**
+       * Return the pointer to the underlying array.
+       */
+      Number *get_values() const;
+
+      /**
+       * Return the size of the vector.
+       */
+      virtual size_type size() const override;
+
+      /**
+       * Return an index set that describe which elements of this vector are
+       * owned by the current processor, i.e. [0, size).
+       */
+      virtual dealii::IndexSet locally_owned_elements() const override;
+
+      /**
+       * Print the vector to the output stream @p out.
+       */
+      virtual void print(std::ostream       &out,
+                         const unsigned int  precision=2,
+                         const bool          scientific=true,
+                         const bool          across=true) const override;
+
+      /**
+       * Return the memory consumption of this class in bytes.
+       */
+      virtual std::size_t memory_consumption() const override;
+
+      /**
+       * Attempt to perform an operation between two incompatible vector types.
+       *
+       * @ingroup Exceptions
+       */
+      DeclException0(ExcVectorTypeNotCompatible);
+
+    private:
+      /**
+       * Pointer to the array of elements of this vector.
+       */
+      Number *val;
+
+      /**
+       * Number of elements in the vector.
+       */
+      size_type n_elements;
+    };
+
+
+
+    // ------------------------------ Inline functions -----------------------------
+    template <typename Number>
+    inline
+    Number *Vector<Number>::get_values() const
+    {
+      return val;
+    }
+
+
+
+    template <typename Number>
+    inline
+    typename Vector<Number>::size_type Vector<Number>::size() const
+    {
+      return n_elements;
+    }
+
+
+    template <typename Number>
+    inline
+    IndexSet Vector<Number>::locally_owned_elements() const
+    {
+      return complete_index_set(n_elements);
+    }
+  }
+}
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
+
+#endif
index 71264567a62b12b77a4dec042af254d267f2d66a..cade414d85c8e26dac226ad679a2b340e5e1b254 100644 (file)
@@ -73,6 +73,16 @@ namespace LinearAlgebra
 }
 #endif
 
+#ifdef DEAL_II_WITH_CUDA
+namespace LinearAlgebra
+{
+  namespace CUDAWrappers
+  {
+    template <typename> class Vector;
+  }
+}
+#endif
+
 namespace LinearAlgebra
 {
   /*! @addtogroup Vectors
@@ -306,6 +316,19 @@ namespace LinearAlgebra
                   std_cxx11::shared_ptr<const CommunicationPatternBase> ());
 #endif
 
+#ifdef DEAL_II_WITH_CUDA
+    /**
+     * Import all the elements present in the vector's IndexSet from the input
+     * vector @p cuda_vec. VectorOperation::values @p operation is used to
+     * decide if the elements in @p V should be added to the current vector or
+     * replace the current elements. The last parameter is not used.
+     */
+    void import(const CUDAWrappers::Vector<Number> &cuda_vec,
+                VectorOperation::values operation,
+                std_cxx11::shared_ptr<const CommunicationPatternBase> communication_pattern =
+                  std_cxx11::shared_ptr<const CommunicationPatternBase> ());
+#endif
+
     /**
      * The value returned by this function denotes the dimension of the vector
      * spaces that are modeled by objects of this kind. However, objects of
index 59ce51a7c495c94a59aee62fa112f09480162fdc..8496f35c6864353137f418241a13e0b525b23099 100644 (file)
 #  include "Epetra_Import.h"
 #endif
 
+#ifdef DEAL_II_WITH_CUDA
+#  include <deal.II/lac/cuda_vector.h>
+#  include <cuda_runtime_api.h>
+#endif
+
 DEAL_II_NAMESPACE_OPEN
 
 
@@ -375,6 +380,39 @@ namespace LinearAlgebra
 
 
 
+#ifdef DEAL_II_WITH_CUDA
+  template <typename Number>
+  void
+  ReadWriteVector<Number>::import(const LinearAlgebra::CUDAWrappers::Vector<Number> &cuda_vec,
+                                  VectorOperation::values                            operation,
+                                  std_cxx11::shared_ptr<const CommunicationPatternBase> )
+  {
+    const unsigned int n_elements = stored_elements.n_elements();
+    if (operation == VectorOperation::insert)
+      {
+        cudaError_t error_code = cudaMemcpy(&val[0], cuda_vec.get_values(),
+                                            n_elements*sizeof(Number),
+                                            cudaMemcpyDeviceToHost);
+        CudaAssert(error_code);
+      }
+    else
+      {
+        // Copy the vector from the device to a temporary vector on the host
+        std::vector<Number> tmp(n_elements);
+        cudaError_t error_code = cudaMemcpy(&tmp[0], cuda_vec.get_values(),
+                                            n_elements*sizeof(Number),
+                                            cudaMemcpyDeviceToHost);
+        CudaAssert(error_code);
+
+        // Add the two vectors
+        for (unsigned int i=0; i<n_elements; ++i)
+          val[i] += tmp[i];
+      }
+  }
+#endif
+
+
+
   template <typename Number>
   void
   ReadWriteVector<Number>::swap (ReadWriteVector<Number> &v)
index 924b993baf9b84f1168290ec1510ec54dc739314..ca536009dc84f17faffb222fbbbe2ec906aa17de 100644 (file)
@@ -132,6 +132,14 @@ IF(DEAL_II_WITH_TRILINOS)
   )
 ENDIF()
 
+# Add CUDA wrapper files
+IF(DEAL_II_WITH_CUDA)
+  SET(_src
+    ${_src}
+    cuda_vector.cu
+  )
+ENDIF()
+
 
 
 FILE(GLOB _header
diff --git a/source/lac/cuda_vector.cu b/source/lac/cuda_vector.cu
new file mode 100644 (file)
index 0000000..b954c3f
--- /dev/null
@@ -0,0 +1,1012 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/lac/cuda_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+#include <deal.II/base/exceptions.h>
+#include <cmath>
+
+#ifdef DEAL_II_WITH_CUDA
+
+DEAL_II_NAMESPACE_OPEN
+
+#define BLOCK_SIZE 512
+#define CHUNK_SIZE 8
+
+namespace LinearAlgebra
+{
+  namespace CUDAWrappers
+  {
+    namespace internal
+    {
+      template <typename Number>
+      __global__ void vec_scale(Number                                   *val,
+                                const Number                              a,
+                                const typename Vector<Number>::size_type  N)
+      {
+        const typename Vector<Number>::size_type idx_base = threadIdx.x +
+                                                            blockIdx.x *
+                                                            (blockDim.x*CHUNK_SIZE);
+        for (unsigned int i=0; i<CHUNK_SIZE; ++i)
+          {
+            const typename Vector<Number>::size_type idx = idx_base +
+                                                           i*BLOCK_SIZE;
+            if (idx<N)
+              val[idx] *= a;
+          }
+      }
+
+
+
+      struct Binop_Addition
+      {
+        template <typename Number>
+        __device__ static inline Number operation(const Number a,
+                                                  const Number b)
+        {
+          return a+b;
+        }
+      };
+
+
+
+      struct Binop_Subtraction
+      {
+        template <typename Number>
+        __device__ static inline Number operation(const Number a,
+                                                  const Number b)
+        {
+          return a-b;
+        }
+      };
+
+
+
+      template <typename Number, typename Binop>
+      __global__ void vector_bin_op(Number                                   *v1,
+                                    Number                                   *v2,
+                                    const typename Vector<Number>::size_type  N)
+      {
+        const typename Vector<Number>::size_type idx_base = threadIdx.x +
+                                                            blockIdx.x *
+                                                            (blockDim.x*CHUNK_SIZE);
+        for (unsigned int i=0; i<CHUNK_SIZE; ++i)
+          {
+            const typename Vector<Number>::size_type idx = idx_base +
+                                                           i*BLOCK_SIZE;
+            if (idx<N)
+              v1[idx] = Binop::operation(v1[idx],v2[idx]);
+          }
+      }
+
+
+      template <typename Number>
+      struct L1Norm
+      {
+        __device__ static Number reduction_op(const Number a, const Number b)
+        {
+          return std::abs(a) + std::abs(b);
+        }
+
+        __device__ static void atomic_op(Number *dst, const Number a)
+        {
+          *dst = std::abs(*dst) + std::abs(a);
+        }
+
+        __device__ static Number null_value()
+        {
+          return Number();
+        }
+      };
+
+
+      template <typename Number>
+      struct LInfty
+      {
+        __device__ static Number reduction_op(const Number a, const Number b)
+        {
+          if  (std::abs(a) > std::abs(b))
+            return std::abs(a);
+          else
+            return std::abs(b);
+        }
+
+        __device__ static void atomic_op(Number *dst, const Number a)
+        {
+          if (std::abs(*dst) < std::abs(a))
+            *dst = std::abs(a);
+          else
+            *dst = std::abs(*dst);
+        }
+
+        __device__ static Number null_value()
+        {
+          return Number();
+        }
+      };
+
+
+
+      template <typename Number, typename Operation>
+      __device__ void reduce_within_warp(volatile Number                    *result_buffer,
+                                         typename Vector<Number>::size_type  local_idx)
+      {
+        if (BLOCK_SIZE >= 64)
+          result_buffer[local_idx] =
+            Operation::reduction_op(result_buffer[local_idx],
+                                    result_buffer[local_idx+32]);
+        if (BLOCK_SIZE >= 32)
+          result_buffer[local_idx] =
+            Operation::reduction_op(result_buffer[local_idx],
+                                    result_buffer[local_idx+16]);
+        if (BLOCK_SIZE >= 16)
+          result_buffer[local_idx] =
+            Operation::reduction_op(result_buffer[local_idx],
+                                    result_buffer[local_idx+8]);
+        if (BLOCK_SIZE >= 8)
+          result_buffer[local_idx] =
+            Operation::reduction_op(result_buffer[local_idx],
+                                    result_buffer[local_idx+4]);
+        if (BLOCK_SIZE >= 4)
+          result_buffer[local_idx] =
+            Operation::reduction_op(result_buffer[local_idx],
+                                    result_buffer[local_idx+2]);
+        if (BLOCK_SIZE >= 2)
+          result_buffer[local_idx] =
+            Operation::reduction_op(result_buffer[local_idx],
+                                    result_buffer[local_idx+1]);
+      }
+
+
+
+      template <typename Number, typename Operation>
+      __device__ void reduce(Number                                   *result,
+                             Number                                   *result_buffer,
+                             const typename Vector<Number>::size_type  local_idx,
+                             const typename Vector<Number>::size_type  global_idx,
+                             const typename Vector<Number>::size_type  N)
+      {
+        for (typename Vector<Number>::size_type s=BLOCK_SIZE/2; s>32; s=s>>1)
+          {
+            if (local_idx < s)
+              result_buffer[local_idx] = Operation::reduction_op(result_buffer[local_idx],
+                                                                 result_buffer[local_idx+s]);
+            __syncthreads();
+          }
+
+        if (local_idx < 32)
+          reduce_within_warp<Number,Operation>(result_buffer, local_idx);
+
+        if (local_idx == 0)
+          Operation::atomic_op(result, result_buffer[0]);
+      }
+
+
+
+      template <typename Number, typename Operation>
+      __global__ void reduction(Number       *result,
+                                const Number *v,
+                                const typename Vector<Number>::size_type N)
+      {
+        __shared__ Number result_buffer[BLOCK_SIZE];
+
+        const typename Vector<Number>::size_type global_idx = threadIdx.x +
+                                                              blockIdx.x*(blockDim.x*CHUNK_SIZE);
+        const typename Vector<Number>::size_type local_idx = threadIdx.x;
+
+        reduce<Number,Operation> (result, result_buffer, local_idx, global_idx, N);
+      }
+
+
+
+      template <typename Number>
+      struct DotProduct
+      {
+        __device__ static Number binary_op(const Number a, const Number b)
+        {
+          return a*b;
+        }
+
+        __device__ static Number reduction_op(const Number a, const Number b)
+        {
+          return a+b;
+        }
+
+        __device__ static void atomic_op(Number *dst, const Number a)
+        {
+          *dst += a;
+        }
+
+        __device__ static Number null_value()
+        {
+          return Number();
+        }
+      };
+
+
+
+      template <typename Number, typename Operation>
+      __global__ void double_vector_reduction(Number       *result,
+                                              Number *v1,
+                                              Number *v2,
+                                              const typename Vector<Number>::size_type N)
+      {
+        __shared__ Number result_buffer[BLOCK_SIZE];
+
+        const typename Vector<Number>::size_type global_idx = threadIdx.x +
+                                                              blockIdx.x*(blockDim.x*CHUNK_SIZE);
+        const typename Vector<Number>::size_type local_idx = threadIdx.x;
+
+        if (global_idx<N)
+          result_buffer[local_idx] = Operation::binary_op(v1[global_idx],v2[global_idx]);
+        else
+          result_buffer[local_idx] = Operation::null_value();
+
+        for (unsigned int i=1; i<CHUNK_SIZE; ++i)
+          {
+            const typename Vector<Number>::size_type idx = global_idx +
+                                                           i*BLOCK_SIZE;
+            if (idx<N)
+              result_buffer[local_idx] =
+                Operation::reduction_op(result_buffer[local_idx],
+                                        Operation::binary_op(v1[idx], v2[idx]));
+          }
+
+        __syncthreads();
+
+        reduce<Number,Operation> (result,result_buffer,local_idx,global_idx,N);
+      }
+
+
+
+      template <typename Number>
+      __global__ void vec_add(Number       *val,
+                              const Number  a,
+                              const typename Vector<Number>::size_type  N)
+      {
+        const typename Vector<Number>::size_type idx_base = threadIdx.x +
+                                                            blockIdx.x *
+                                                            (blockDim.x*CHUNK_SIZE);
+        for (unsigned int i=0; i<CHUNK_SIZE; ++i)
+          {
+            const typename Vector<Number>::size_type idx = idx_base +
+                                                           i*BLOCK_SIZE;
+            if (idx<N)
+              val[idx] += a;
+          }
+      }
+
+
+
+      template <typename Number>
+      __global__ void add_aV(Number       *val,
+                             const Number  a,
+                             Number       *V_val,
+                             const typename Vector<Number>::size_type  N)
+      {
+        const typename Vector<Number>::size_type idx_base = threadIdx.x +
+                                                            blockIdx.x *
+                                                            (blockDim.x*CHUNK_SIZE);
+        for (unsigned int i=0; i<CHUNK_SIZE; ++i)
+          {
+            const typename Vector<Number>::size_type idx = idx_base +
+                                                           i*BLOCK_SIZE;
+            if (idx<N)
+              val[idx] += a*V_val[idx];
+          }
+      }
+
+
+
+      template <typename Number>
+      __global__ void add_aVbW(Number       *val,
+                               const Number  a,
+                               Number       *V_val,
+                               const Number  b,
+                               Number       *W_val,
+                               const typename Vector<Number>::size_type  N)
+      {
+        const typename Vector<Number>::size_type idx_base = threadIdx.x +
+                                                            blockIdx.x *
+                                                            (blockDim.x*CHUNK_SIZE);
+        for (unsigned int i=0; i<CHUNK_SIZE; ++i)
+          {
+            const typename Vector<Number>::size_type idx = idx_base +
+                                                           i*BLOCK_SIZE;
+            if (idx<N)
+              val[idx] += a*V_val[idx] + b*W_val[idx];
+          }
+      }
+
+
+
+      template <typename Number>
+      __global__ void sadd(const Number  s,
+                           Number       *val,
+                           const Number  a,
+                           const Number *V_val,
+                           const typename Vector<Number>::size_type  N)
+      {
+        const typename Vector<Number>::size_type idx_base = threadIdx.x +
+                                                            blockIdx.x *
+                                                            (blockDim.x*CHUNK_SIZE);
+        for (unsigned int i=0; i<CHUNK_SIZE; ++i)
+          {
+            const typename Vector<Number>::size_type idx = idx_base +
+                                                           i*BLOCK_SIZE;
+            if (idx<N)
+              val[idx] = s*val[idx] + a*V_val[idx];
+          }
+      }
+
+
+
+      template <typename Number>
+      __global__ void scale(Number       *val,
+                            const Number *V_val,
+                            const typename Vector<Number>::size_type N)
+      {
+        const typename Vector<Number>::size_type idx_base = threadIdx.x +
+                                                            blockIdx.x *
+                                                            (blockDim.x*CHUNK_SIZE);
+        for (unsigned int i=0; i<CHUNK_SIZE; ++i)
+          {
+            const typename Vector<Number>::size_type idx = idx_base +
+                                                           i*BLOCK_SIZE;
+            if (idx<N)
+              val[idx] *= V_val[idx];
+          }
+      }
+
+
+
+      template <typename Number>
+      __global__ void equ(Number       *val,
+                          const Number a,
+                          const Number *V_val,
+                          const typename Vector<Number>::size_type N)
+      {
+        const typename Vector<Number>::size_type idx_base = threadIdx.x +
+                                                            blockIdx.x *
+                                                            (blockDim.x*CHUNK_SIZE);
+        for (unsigned int i=0; i<CHUNK_SIZE; ++i)
+          {
+            const typename Vector<Number>::size_type idx = idx_base +
+                                                           i*BLOCK_SIZE;
+            if (idx<N)
+              val[idx] = a * V_val[idx];
+          }
+      }
+
+
+
+      template <typename Number>
+      __global__ void add_and_dot(Number       *res,
+                                  Number       *v1,
+                                  const Number *v2,
+                                  const Number *v3,
+                                  const Number  a,
+                                  const typename Vector<Number>::size_type N)
+      {
+        __shared__ Number res_buf[BLOCK_SIZE];
+
+        const unsigned int global_idx = threadIdx.x + blockIdx.x *
+                                        (blockDim.x*CHUNK_SIZE);
+        const unsigned int local_idx = threadIdx.x;
+        if (global_idx < N)
+          {
+            v1[global_idx] += a*v2[global_idx];
+            res_buf[local_idx] = v1[global_idx]*v3[global_idx];
+          }
+        else
+          res_buf[local_idx] = 0.;
+
+        for (unsigned int i=1; i<BLOCK_SIZE; ++i)
+          {
+            const unsigned int idx = global_idx + i*BLOCK_SIZE;
+            if (idx < N)
+              {
+                v1[idx] += a*v2[idx];
+                res_buf[local_idx] += v1[idx]*v3[idx];
+              }
+          }
+      }
+    }
+
+
+
+    template <typename Number>
+    Vector<Number>::Vector()
+      :
+      val(nullptr),
+      n_elements(0)
+    {}
+
+
+
+    template <typename Number>
+    Vector<Number>::Vector(const Vector<Number> &V)
+      :
+      n_elements(V.n_elements)
+    {
+      // Allocate the memory
+      cudaError_t error_code = cudaMalloc(&val, n_elements*sizeof(Number));
+      CudaAssert(error_code);
+      // Copy the values.
+      error_code = cudaMemcpy(val, V.val,n_elements*sizeof(Number),
+                              cudaMemcpyDeviceToDevice);
+      CudaAssert(error_code);
+    }
+
+
+
+    template <typename Number>
+    Vector<Number>::Vector(const size_type n)
+      :
+      n_elements(n)
+    {
+      // Allocate the memory
+      cudaError_t error_code = cudaMalloc(&val, n_elements*sizeof(Number));
+      CudaAssert(error_code);
+    }
+
+
+
+    template <typename Number>
+    Vector<Number>::~Vector()
+    {
+      if (val != nullptr)
+        {
+          cudaError_t error_code = cudaFree(val);
+          CudaAssert(error_code);
+          val = nullptr;
+          n_elements = 0;
+        }
+    }
+
+
+
+    template <typename Number>
+    void Vector<Number>::reinit(const size_type n,
+                                const bool      omit_zeroing_entries)
+    {
+      // Resize the underlying array if necessary
+      if (n == 0)
+        {
+          if (val != nullptr)
+            {
+              cudaError_t error_code = cudaFree(val);
+              CudaAssert(error_code);
+              val = nullptr;
+            }
+        }
+      else
+        {
+          if (n_elements != n)
+            {
+              cudaError_t error_code = cudaFree(val);
+              CudaAssert(error_code);
+            }
+
+          cudaError_t error_code = cudaMalloc(&val, n*sizeof(Number));
+          CudaAssert(error_code);
+
+          // If necessary set the elements to zero
+          if (omit_zeroing_entries == false)
+            {
+              cudaError_t error_code = cudaMemset(val, 0,
+                                                  n_elements*sizeof(Number));
+              CudaAssert(error_code);
+            }
+        }
+      n_elements = n;
+    }
+
+
+
+    template <typename Number>
+    void Vector<Number>::import(const ReadWriteVector<Number> &V,
+                                VectorOperation::values operation,
+                                std_cxx11::shared_ptr<const CommunicationPatternBase> )
+    {
+      if (operation == VectorOperation::insert)
+        {
+          cudaError_t error_code = cudaMemcpy(val, V.begin(),
+                                              n_elements*sizeof(Number),
+                                              cudaMemcpyHostToDevice);
+          CudaAssert(error_code);
+        }
+      else
+        {
+          // Create a temporary vector on the device
+          Number *tmp;
+          cudaError_t error_code = cudaMalloc(&tmp, n_elements*sizeof(Number));
+          CudaAssert(error_code);
+
+          // Copy the vector from the host to the temporary vector on the device
+          error_code = cudaMemcpy(&tmp[0], V.begin(), n_elements*sizeof(Number),
+                                  cudaMemcpyHostToDevice);
+          CudaAssert(error_code);
+
+          // Add the two vectors
+          const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE);
+
+          internal::vector_bin_op<Number,internal::Binop_Addition>
+          <<<n_blocks,BLOCK_SIZE>>>(val, tmp, n_elements);
+          // Check that the kernel was launched correctly
+          CudaAssert(cudaGetLastError());
+          // Check that there was no problem during the execution of the kernel
+          CudaAssert(cudaDeviceSynchronize());
+
+          // Delete the temporary vector
+          error_code = cudaFree(tmp);
+          CudaAssert(error_code);
+        }
+    }
+
+
+
+    template <typename Number>
+    Vector<Number> &Vector<Number>::operator*= (const Number factor)
+    {
+      AssertIsFinite(factor);
+      const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE);
+      internal::vec_scale<Number> <<<n_blocks,BLOCK_SIZE>>>(val,
+                                                            factor, n_elements);
+
+      // Check that the kernel was launched correctly
+      CudaAssert(cudaGetLastError());
+      // Check that there was no problem during the execution of the kernel
+      CudaAssert(cudaDeviceSynchronize());
+
+      return *this;
+    }
+
+
+
+    template <typename Number>
+    Vector<Number> &Vector<Number>::operator/= (const Number factor)
+    {
+      AssertIsFinite(factor);
+      Assert(factor!=Number(0.), ExcZero());
+      const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE);
+      internal::vec_scale<Number> <<<n_blocks,BLOCK_SIZE>>>(val,
+                                                            1./factor, n_elements);
+
+      // Check that the kernel was launched correctly
+      CudaAssert(cudaGetLastError());
+      // Check that there was no problem during the execution of the kernel
+      CudaAssert(cudaDeviceSynchronize());
+
+      return *this;
+    }
+
+
+
+    template <typename Number>
+    Vector<Number> &Vector<Number>::operator+= (const VectorSpaceVector<Number> &V)
+    {
+      // Check that casting will work
+      Assert(dynamic_cast<const Vector<Number>*>(&V)!=nullptr,
+             ExcVectorTypeNotCompatible());
+
+      // Downcast V. If it fails, it throw an exception.
+      const Vector<Number> &down_V = dynamic_cast<const Vector<Number>&>(V);
+      Assert(down_V.size()==this->size(),
+             ExcMessage("Cannot add two vectors with different numbers of elements"));
+
+      const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE);
+
+      internal::vector_bin_op<Number,internal::Binop_Addition>
+      <<<n_blocks,BLOCK_SIZE>>>(val, down_V.val, n_elements);
+
+      // Check that the kernel was launched correctly
+      CudaAssert(cudaGetLastError());
+      // Check that there was no problem during the execution of the kernel
+      CudaAssert(cudaDeviceSynchronize());
+
+      return *this;
+    }
+
+
+
+    template <typename Number>
+    Vector<Number> &Vector<Number>::operator-= (const VectorSpaceVector<Number> &V)
+    {
+      // Check that casting will work
+      Assert(dynamic_cast<const Vector<Number>*>(&V)!=nullptr,
+             ExcVectorTypeNotCompatible());
+
+      // Downcast V. If fails, throws an exception.
+      const Vector<Number> &down_V = dynamic_cast<const Vector<Number>&>(V);
+      Assert(down_V.size()==this->size(),
+             ExcMessage("Cannot add two vectors with different numbers of elements."));
+
+      const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE);
+
+      internal::vector_bin_op<Number,internal::Binop_Subtraction>
+      <<<n_blocks,BLOCK_SIZE>>>(val, down_V.val, n_elements);
+
+      // Check that the kernel was launched correctly
+      CudaAssert(cudaGetLastError());
+      // Check that there was no problem during the execution of the kernel
+      CudaAssert(cudaDeviceSynchronize());
+
+      return *this;
+    }
+
+
+
+    template <typename Number>
+    Number Vector<Number>::operator* (const VectorSpaceVector<Number> &V) const
+    {
+      // Check that casting will work
+      Assert(dynamic_cast<const Vector<Number>*>(&V)!=nullptr,
+             ExcVectorTypeNotCompatible());
+
+      // Downcast V. If fails, throws an exception.
+      const Vector<Number> &down_V = dynamic_cast<const Vector<Number>&>(V);
+      Assert(down_V.size()==this->size(),
+             ExcMessage("Cannot add two vectors with different numbers of elements"));
+
+      Number *result_device;
+      cudaError_t error_code = cudaMalloc(&result_device, n_elements*sizeof(Number));
+      CudaAssert(error_code);
+      error_code = cudaMemset(result_device, Number(), sizeof(Number));
+
+      const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE);
+      internal::double_vector_reduction<Number, internal::DotProduct<Number>>
+          <<<dim3(n_blocks,1),dim3(BLOCK_SIZE)>>> (result_device, val,
+                                                   down_V.val,
+                                                   static_cast<unsigned int>(n_elements));
+
+      // Copy the result back to the host
+      Number result;
+      error_code = cudaMemcpy(&result, result_device, sizeof(Number),
+                              cudaMemcpyDeviceToHost);
+      CudaAssert(error_code);
+      // Free the memory on the device
+      error_code = cudaFree(result_device);
+      CudaAssert(error_code);
+
+      return result;
+    }
+
+
+
+    template <typename Number>
+    void Vector<Number>::add(const Number a)
+    {
+      AssertIsFinite(a);
+      const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE);
+      internal::vec_add<Number> <<<n_blocks,BLOCK_SIZE>>>(val, a,
+                                                          n_elements);
+
+      // Check that the kernel was launched correctly
+      CudaAssert(cudaGetLastError());
+      // Check that there was no problem during the execution of the kernel
+      CudaAssert(cudaDeviceSynchronize());
+    }
+
+
+
+    template <typename Number>
+    void Vector<Number>::add(const Number a, const VectorSpaceVector<Number> &V)
+    {
+      AssertIsFinite(a);
+
+      // Check that casting will work.
+      Assert(dynamic_cast<const Vector<Number>*>(&V) != nullptr,
+             ExcVectorTypeNotCompatible());
+
+      // Downcast V. If fails, throw an exception.
+      const Vector<Number> &down_V = dynamic_cast<const Vector<Number>&>(V);
+      Assert(down_V.size() == this->size(),
+             ExcMessage("Cannot add two vectors with different numbers of elements."));
+
+      const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE);
+      internal::add_aV<Number> <<<dim3(n_blocks,1),dim3(BLOCK_SIZE)>>> (val,
+          a, down_V.val, n_elements);
+
+      // Check that the kernel was launched correctly
+      CudaAssert(cudaGetLastError());
+      // Check that there was no problem during the execution of the kernel
+      CudaAssert(cudaDeviceSynchronize());
+    }
+
+
+
+    template <typename Number>
+    void Vector<Number>::add(const Number a, const VectorSpaceVector<Number> &V,
+                             const Number b, const VectorSpaceVector<Number> &W)
+    {
+      AssertIsFinite(a);
+      AssertIsFinite(b);
+
+      // Check that casting will work.
+      Assert(dynamic_cast<const Vector<Number>*>(&V) != nullptr,
+             ExcVectorTypeNotCompatible());
+
+      // Downcast V. If fails, throw an exception.
+      const Vector<Number> &down_V = dynamic_cast<const Vector<Number>&>(V);
+      Assert(down_V.size() == this->size(),
+             ExcMessage("Cannot add two vectors with different numbers of elements."));
+
+      // Check that casting will work.
+      Assert(dynamic_cast<const Vector<Number>*>(&W) != nullptr,
+             ExcVectorTypeNotCompatible());
+
+      // Downcast V. If fails, throw an exception.
+      const Vector<Number> &down_W = dynamic_cast<const Vector<Number>&>(W);
+      Assert(down_W.size() == this->size(),
+             ExcMessage("Cannot add two vectors with different numbers of elements."));
+
+      const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE);
+      internal::add_aVbW<Number> <<<dim3(n_blocks,1),dim3(BLOCK_SIZE)>>> (val,
+          a, down_V.val, b, down_W.val, n_elements);
+
+      // Check that the kernel was launched correctly
+      CudaAssert(cudaGetLastError());
+      // Check that there was no problem during the execution of the kernel
+      CudaAssert(cudaDeviceSynchronize());
+    }
+
+
+
+    template <typename Number>
+    void Vector<Number>::sadd(const Number s, const Number a,
+                              const VectorSpaceVector<Number> &V)
+    {
+      AssertIsFinite(s);
+      AssertIsFinite(a);
+
+      // Check that casting will work.
+      Assert(dynamic_cast<const Vector<Number>*>(&V) != nullptr,
+             ExcVectorTypeNotCompatible());
+
+      // Downcast V. If fails, throw an exception.
+      const Vector<Number> &down_V = dynamic_cast<const Vector<Number>&>(V);
+      Assert(down_V.size() == this->size(),
+             ExcMessage("Cannot add two vectors with different numbers of elements."));
+
+      const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE);
+      internal::sadd<Number> <<<dim3(n_blocks,1),dim3(BLOCK_SIZE)>>> (s, val,
+          a, down_V.val, n_elements);
+
+      // Check that the kernel was launched correctly
+      CudaAssert(cudaGetLastError());
+      // Check that there was no problem during the execution of the kernel
+      CudaAssert(cudaDeviceSynchronize());
+    }
+
+
+
+    template <typename Number>
+    void Vector<Number>::scale(const VectorSpaceVector<Number> &scaling_factors)
+    {
+      // Check that casting will work.
+      Assert(dynamic_cast<const Vector<Number>*>(&scaling_factors) != nullptr,
+             ExcVectorTypeNotCompatible());
+
+      // Downcast V. If fails, throw an exception.
+      const Vector<Number> &down_scaling_factors =
+        dynamic_cast<const Vector<Number>&>(scaling_factors);
+      Assert(down_scaling_factors.size() == this->size(),
+             ExcMessage("Cannot scale two vectors with different numbers of elements."));
+
+      const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE);
+      internal::scale<Number> <<<dim3(n_blocks,1),dim3(BLOCK_SIZE)>>> (val,
+          down_scaling_factors.val, n_elements);
+
+      // Check that the kernel was launched correctly
+      CudaAssert(cudaGetLastError());
+      // Check that there was no problem during the execution of the kernel
+      CudaAssert(cudaDeviceSynchronize());
+    }
+
+
+
+    template <typename Number>
+    void Vector<Number>::equ(const Number a, const VectorSpaceVector<Number> &V)
+    {
+      AssertIsFinite(a);
+
+      // Check that casting will work.
+      Assert(dynamic_cast<const Vector<Number>*>(&V) != nullptr,
+             ExcVectorTypeNotCompatible());
+
+      // Downcast V. If fails, throw an exception.
+      const Vector<Number> &down_V = dynamic_cast<const Vector<Number>&>(V);
+      Assert(down_V.size() == this->size(),
+             ExcMessage("Cannot assign two vectors with different numbers of elements."));
+
+      const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE);
+      internal::equ<Number> <<<dim3(n_blocks,1),dim3(BLOCK_SIZE)>>> (val, a,
+          down_V.val, n_elements);
+
+      // Check that the kernel was launched correctly
+      CudaAssert(cudaGetLastError());
+      // Check that there was no problem during the execution of the kernel
+      CudaAssert(cudaDeviceSynchronize());
+    }
+
+
+
+    template <typename Number>
+    typename Vector<Number>::real_type Vector<Number>::l1_norm() const
+    {
+      Number *result_device;
+      cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
+      CudaAssert(error_code);
+      error_code = cudaMemset(result_device, Number(), sizeof(Number));
+
+      const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE);
+      internal::reduction<Number, internal::L1Norm<Number>>
+                                                         <<<dim3(n_blocks,1),dim3(BLOCK_SIZE)>>> (
+                                                           result_device, val,
+                                                           n_elements);
+
+      // Copy the result back to the host
+      Number result;
+      error_code = cudaMemcpy(&result, result_device, sizeof(Number),
+                              cudaMemcpyDeviceToHost);
+      CudaAssert(error_code);
+      // Free the memory on the device
+      error_code = cudaFree(result_device);
+      CudaAssert(error_code);
+
+      return result;
+    }
+
+
+
+    template <typename Number>
+    typename Vector<Number>::real_type Vector<Number>::l2_norm() const
+    {
+      return std::sqrt((*this)*(*this));
+    }
+
+
+
+    template <typename Number>
+    typename Vector<Number>::real_type Vector<Number>::linfty_norm() const
+    {
+      Number *result_device;
+      cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
+      CudaAssert(error_code);
+      error_code = cudaMemset(result_device, Number(), sizeof(Number));
+
+      const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE);
+      internal::reduction<Number, internal::LInfty<Number>>
+                                                         <<<dim3(n_blocks,1),dim3(BLOCK_SIZE)>>> (
+                                                           result_device, val,
+                                                           n_elements);
+
+      // Copy the result back to the host
+      Number result;
+      error_code = cudaMemcpy(&result, result_device, sizeof(Number),
+                              cudaMemcpyDeviceToHost);
+      CudaAssert(error_code);
+      // Free the memory on the device
+      error_code = cudaFree(result_device);
+      CudaAssert(error_code);
+
+      return result;
+    }
+
+
+
+    template <typename Number>
+    Number Vector<Number>::add_and_dot(const Number                     a,
+                                       const VectorSpaceVector<Number> &V,
+                                       const VectorSpaceVector<Number> &W)
+    {
+      AssertIsFinite(a);
+
+      // Check that casting will work
+      Assert(dynamic_cast<const Vector<Number>*>(&V)!=nullptr,
+             ExcVectorTypeNotCompatible());
+      Assert(dynamic_cast<const Vector<Number>*>(&W)!=nullptr,
+             ExcVectorTypeNotCompatible());
+
+      // Downcast V and W. If it fails, throw an exceptiion.
+      const Vector<Number> &down_V = dynamic_cast<const Vector<Number>&>(V);
+      Assert(down_V.size() == this->size(),
+             ExcMessage("Vector V has the wrong size."));
+      const Vector<Number> &down_W = dynamic_cast<const Vector<Number>&>(W);
+      Assert(down_W.size() == this->size(),
+             ExcMessage("Vector W has the wrong size."));
+
+      Number *res_d;
+      cudaError_t error_code = cudaMalloc(&res_d, sizeof(Number));
+      CudaAssert(error_code);
+      error_code = cudaMemset(res_d, 0., sizeof(Number));
+      CudaAssert(error_code);
+
+      const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE);
+      internal::add_and_dot<Number> <<<dim3(n_blocks,1),dim3(BLOCK_SIZE)>>>(
+        res_d, val, down_V.val, down_W.val, a, n_elements);
+
+      Number res;
+      error_code = cudaMemcpy(&res, res_d, sizeof(Number), cudaMemcpyDeviceToHost);
+      CudaAssert(error_code);
+      error_code = cudaFree(res_d);
+
+      return res;
+    }
+
+
+
+    template <typename Number>
+    void Vector<Number>::print(std::ostream       &out,
+                               const unsigned int  precision,
+                               const bool          scientific,
+                               const bool          across) const
+    {
+      AssertThrow(out, ExcIO());
+      std::ios::fmtflags old_flags = out.flags();
+      unsigned int old_precision = out.precision (precision);
+
+      out.precision (precision);
+      if (scientific)
+        out.setf (std::ios::scientific, std::ios::floatfield);
+      else
+        out.setf (std::ios::fixed, std::ios::floatfield);
+
+      out << "IndexSet: ";
+      complete_index_set(n_elements).print(out);
+      out << std::endl;
+
+      // Copy the vector to the host
+      Number *cpu_val = new Number[n_elements];
+      cudaError_t error_code = cudaMemcpy(cpu_val, val,
+                                          n_elements*sizeof(Number),
+                                          cudaMemcpyHostToDevice);
+      CudaAssert(error_code);
+      for (unsigned int i=0; i<n_elements; ++i)
+        out << cpu_val[i] << std::endl;
+      out << std::flush;
+      delete [] cpu_val;
+      cpu_val = nullptr;
+
+
+      AssertThrow (out, ExcIO());
+      // reset output format
+      out.flags (old_flags);
+      out.precision(old_precision);
+    }
+
+
+
+    template <typename Number>
+    std::size_t Vector<Number>::memory_consumption() const
+    {
+      std::size_t memory = sizeof(*this);
+      memory += sizeof (Number) * static_cast<std::size_t>(n_elements);
+
+      return memory;
+    }
+
+
+
+    // Explicit Instanationation
+    template class Vector<float>;
+    template class Vector<double>;
+  }
+}
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.