From: Bruno Turcksin Date: Fri, 13 May 2016 17:31:03 +0000 (-0400) Subject: Add CUDAWrappers::Vector. X-Git-Tag: v8.5.0-rc1~695^2~3 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=14cdf0fdc986546db5209788b8951b1365eb3a6f;p=dealii.git Add CUDAWrappers::Vector. --- diff --git a/contrib/utilities/indent b/contrib/utilities/indent index 84809134e0..d31995750f 100755 --- a/contrib/utilities/indent +++ b/contrib/utilities/indent @@ -42,7 +42,7 @@ fi # collect all header and source files and process them in batches of 50 files # with up to 10 in parallel -find tests include source examples \( -name '*.cc' -o -name '*.h' \) -print | xargs -n 50 -P 10 astyle --options=contrib/styles/astyle.rc +find tests include source examples \( -name '*.cc' -o -name '*.h' -o -name '*.cu' -o -name '*.cuh' \) -print | xargs -n 50 -P 10 astyle --options=contrib/styles/astyle.rc # format .inst.in files. We need to replace \{ and \} because it confuses # astyle. diff --git a/include/deal.II/base/exceptions.h b/include/deal.II/base/exceptions.h index 63b338f265..5eca2b25d8 100644 --- a/include/deal.II/base/exceptions.h +++ b/include/deal.II/base/exceptions.h @@ -1134,6 +1134,25 @@ namespace StandardExceptions #define AssertIsFinite(number) Assert(dealii::numbers::is_finite(number), \ dealii::ExcNumberNotFinite(std::complex(number))) +#ifdef DEAL_II_WITH_CUDA +/** + * An assertion that checks that the error code produced by calling a CUDA + * routine is equal to cudaSuccess. + * + * @ingroup Exceptions + * @author Bruno Turcksin, 2016 + */ +#define CudaAssert(error_code) \ + { \ + if (error_code != cudaSuccess) \ + { \ + fprintf(stderr,"Error in %s (%d): %s\n",__FILE__, \ + __LINE__,cudaGetErrorString(error_code)); \ + exit(1); \ + } \ + } +#endif + using namespace StandardExceptions; DEAL_II_NAMESPACE_CLOSE diff --git a/include/deal.II/lac/cuda_vector.h b/include/deal.II/lac/cuda_vector.h new file mode 100644 index 0000000000..8a925f6346 --- /dev/null +++ b/include/deal.II/lac/cuda_vector.h @@ -0,0 +1,277 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2016 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#ifndef dealii__cuda_vector_h +#define dealii__cuda_vector_h + +#include +#include + + +#ifdef DEAL_II_WITH_CUDA + +DEAL_II_NAMESPACE_OPEN + +class CommunicationPatternBase; +class IndexSet; +template class ReadWriteVector; + +namespace LinearAlgebra +{ + namespace CUDAWrappers + { + /** + * This class implements a vector using CUDA for use on Nvidia GPUs. This + * class is derived from the LinearAlgebra::VectorSpaceVector class. + * + * @ingroup CUDAWrappers + * @ingroup Vectors + * @author Karl Ljungkvist, Bruno Turcksin, 2016 + */ + template + class Vector: public VectorSpaceVector + { + public: + typedef typename VectorSpaceVector::size_type size_type; + typedef typename VectorSpaceVector::real_type real_type; + + /** + * Constructor. Create a vector of dimension zero. + */ + Vector(); + + /** + * Copy constructor. + */ + Vector(const Vector &V); + + /** + * Constructor. Set dimension to @p n and initialize all elements with + * zero. + * + * The constructor is made explicit to avoid accident like this: + * v=0;. Presumably, the user wants to set every elements of + * the vector to zero, but instead, what happens is this call: + * v=Vector@(0);, i.e. the vector is replaced by one + * of length zero. + */ + explicit Vector(const size_type n); + + /** + * Destructor. + */ + ~Vector(); + + /** + * Reinit functionality. The flag omit_zeroing_entries + * determines wheter the vector should be filled with zero (false) or + * left untouched (true). + */ + void reinit(const size_type n, + const bool omit_zeroing_entries = false); + + /** + * Import all the element from the input vector @p V. + * VectorOperation::values @p operation is used to decide if the + * elements int @p V should be added to the current vector or replace + * the current elements. The last parameter is not used. It is only used + * for distributed vectors. This is the function that should be used to + * copy a vector to the GPU. + */ + virtual void import(const ReadWriteVector &V, + VectorOperation::values operation, + std_cxx11::shared_ptr communication_pattern = + std_cxx11::shared_ptr ()) override; + + /** + * Multiply the entive vector by a fixed factor. + */ + virtual Vector &operator*= (const Number factor) override; + + /** + * Divide the entire vector by a fixed factor. + */ + virtual Vector &operator/= (const Number factor) override; + + /** + * Add the vector @p V to the present one. + */ + virtual Vector &operator+= (const VectorSpaceVector &V) override; + + /** + * Subtract the vector @p V from the present one. + */ + virtual Vector &operator-= (const VectorSpaceVector &V) override; + + /** + * Return the scalar product of two vectors. + */ + virtual Number operator* (const VectorSpaceVector &V) const override; + + /** + * Add @p to all components. Note that @p a is a scalar not a vector. + */ + virtual void add(const Number a) override; + + /** + * Simple addition of a multiple of a vector, i.e. *this += a*V. + */ + virtual void add(const Number a, const VectorSpaceVector &V) override; + + /** + * Multiple addition of scaled vectors, i.e. *this += a*V. + */ + virtual void add(const Number a, const VectorSpaceVector &V, + const Number b, const VectorSpaceVector &W) override; + + /** + * Scaling and simple addition of a multiple of a vector, i.e. *this + * = s*(*this)+a*V + */ + virtual void sadd(const Number s, const Number a, + const VectorSpaceVector &V) override; + + /** + * Scale each element of this vector by the corresponding element in the + * argument. This function is mostly meant to simulate multiplication + * (and immediate re-assignment) by a diagonal scaling matrix. + */ + virtual void scale(const VectorSpaceVector &scaling_factors) override; + + /** + * Assignement *this = a*V. + */ + virtual void equ(const Number a, const VectorSpaceVector &V) override; + + /** + * Return the l1 norm of the vector (i.e., the sum of the + * absolute values of all entries among all processors). + */ + virtual real_type l1_norm() const override; + + /** + * Return the l2 norm of the vector (i.e., the square root of + * the sum of the square of all entries among all processors). + */ + virtual real_type l2_norm() const override; + + /** + * Return the maximum norm of the vector (i.e., the maximum absolute + * value among all entries and among all processors). + */ + virtual real_type linfty_norm() const override; + + /** + * Perform a combined operation of a vector addition and a subsequent + * inner product, returning the value of the inner product. In other + * words, the result of this function is the same as if the user called + * @code + * this->add(a, V); + * return_value = *this * W; + * @endcode + * + * The reason this function exists is that this operation involves less + * memory transfer than calling the two functions separately. This + * method only needs to load three vectors, @p this, @p V, @p W, whereas + * calling separate methods means to load the calling vector @p this + * twice. Since most vector operations are memory transfer limited, this + * reduces the time by 25\% (or 50\% if @p W equals @p this). + */ + virtual Number add_and_dot(const Number a, + const VectorSpaceVector &V, + const VectorSpaceVector &W) override; + + /** + * Return the pointer to the underlying array. + */ + Number *get_values() const; + + /** + * Return the size of the vector. + */ + virtual size_type size() const override; + + /** + * Return an index set that describe which elements of this vector are + * owned by the current processor, i.e. [0, size). + */ + virtual dealii::IndexSet locally_owned_elements() const override; + + /** + * Print the vector to the output stream @p out. + */ + virtual void print(std::ostream &out, + const unsigned int precision=2, + const bool scientific=true, + const bool across=true) const override; + + /** + * Return the memory consumption of this class in bytes. + */ + virtual std::size_t memory_consumption() const override; + + /** + * Attempt to perform an operation between two incompatible vector types. + * + * @ingroup Exceptions + */ + DeclException0(ExcVectorTypeNotCompatible); + + private: + /** + * Pointer to the array of elements of this vector. + */ + Number *val; + + /** + * Number of elements in the vector. + */ + size_type n_elements; + }; + + + + // ------------------------------ Inline functions ----------------------------- + template + inline + Number *Vector::get_values() const + { + return val; + } + + + + template + inline + typename Vector::size_type Vector::size() const + { + return n_elements; + } + + + template + inline + IndexSet Vector::locally_owned_elements() const + { + return complete_index_set(n_elements); + } + } +} + +DEAL_II_NAMESPACE_CLOSE + +#endif + +#endif diff --git a/include/deal.II/lac/read_write_vector.h b/include/deal.II/lac/read_write_vector.h index 71264567a6..cade414d85 100644 --- a/include/deal.II/lac/read_write_vector.h +++ b/include/deal.II/lac/read_write_vector.h @@ -73,6 +73,16 @@ namespace LinearAlgebra } #endif +#ifdef DEAL_II_WITH_CUDA +namespace LinearAlgebra +{ + namespace CUDAWrappers + { + template class Vector; + } +} +#endif + namespace LinearAlgebra { /*! @addtogroup Vectors @@ -306,6 +316,19 @@ namespace LinearAlgebra std_cxx11::shared_ptr ()); #endif +#ifdef DEAL_II_WITH_CUDA + /** + * Import all the elements present in the vector's IndexSet from the input + * vector @p cuda_vec. VectorOperation::values @p operation is used to + * decide if the elements in @p V should be added to the current vector or + * replace the current elements. The last parameter is not used. + */ + void import(const CUDAWrappers::Vector &cuda_vec, + VectorOperation::values operation, + std_cxx11::shared_ptr communication_pattern = + std_cxx11::shared_ptr ()); +#endif + /** * The value returned by this function denotes the dimension of the vector * spaces that are modeled by objects of this kind. However, objects of diff --git a/include/deal.II/lac/read_write_vector.templates.h b/include/deal.II/lac/read_write_vector.templates.h index 59ce51a7c4..8496f35c68 100644 --- a/include/deal.II/lac/read_write_vector.templates.h +++ b/include/deal.II/lac/read_write_vector.templates.h @@ -36,6 +36,11 @@ # include "Epetra_Import.h" #endif +#ifdef DEAL_II_WITH_CUDA +# include +# include +#endif + DEAL_II_NAMESPACE_OPEN @@ -375,6 +380,39 @@ namespace LinearAlgebra +#ifdef DEAL_II_WITH_CUDA + template + void + ReadWriteVector::import(const LinearAlgebra::CUDAWrappers::Vector &cuda_vec, + VectorOperation::values operation, + std_cxx11::shared_ptr ) + { + const unsigned int n_elements = stored_elements.n_elements(); + if (operation == VectorOperation::insert) + { + cudaError_t error_code = cudaMemcpy(&val[0], cuda_vec.get_values(), + n_elements*sizeof(Number), + cudaMemcpyDeviceToHost); + CudaAssert(error_code); + } + else + { + // Copy the vector from the device to a temporary vector on the host + std::vector tmp(n_elements); + cudaError_t error_code = cudaMemcpy(&tmp[0], cuda_vec.get_values(), + n_elements*sizeof(Number), + cudaMemcpyDeviceToHost); + CudaAssert(error_code); + + // Add the two vectors + for (unsigned int i=0; i void ReadWriteVector::swap (ReadWriteVector &v) diff --git a/source/lac/CMakeLists.txt b/source/lac/CMakeLists.txt index 924b993baf..ca536009dc 100644 --- a/source/lac/CMakeLists.txt +++ b/source/lac/CMakeLists.txt @@ -132,6 +132,14 @@ IF(DEAL_II_WITH_TRILINOS) ) ENDIF() +# Add CUDA wrapper files +IF(DEAL_II_WITH_CUDA) + SET(_src + ${_src} + cuda_vector.cu + ) +ENDIF() + FILE(GLOB _header diff --git a/source/lac/cuda_vector.cu b/source/lac/cuda_vector.cu new file mode 100644 index 0000000000..b954c3f7ab --- /dev/null +++ b/source/lac/cuda_vector.cu @@ -0,0 +1,1012 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2016 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#include +#include +#include +#include + +#ifdef DEAL_II_WITH_CUDA + +DEAL_II_NAMESPACE_OPEN + +#define BLOCK_SIZE 512 +#define CHUNK_SIZE 8 + +namespace LinearAlgebra +{ + namespace CUDAWrappers + { + namespace internal + { + template + __global__ void vec_scale(Number *val, + const Number a, + const typename Vector::size_type N) + { + const typename Vector::size_type idx_base = threadIdx.x + + blockIdx.x * + (blockDim.x*CHUNK_SIZE); + for (unsigned int i=0; i::size_type idx = idx_base + + i*BLOCK_SIZE; + if (idx + __device__ static inline Number operation(const Number a, + const Number b) + { + return a+b; + } + }; + + + + struct Binop_Subtraction + { + template + __device__ static inline Number operation(const Number a, + const Number b) + { + return a-b; + } + }; + + + + template + __global__ void vector_bin_op(Number *v1, + Number *v2, + const typename Vector::size_type N) + { + const typename Vector::size_type idx_base = threadIdx.x + + blockIdx.x * + (blockDim.x*CHUNK_SIZE); + for (unsigned int i=0; i::size_type idx = idx_base + + i*BLOCK_SIZE; + if (idx + struct L1Norm + { + __device__ static Number reduction_op(const Number a, const Number b) + { + return std::abs(a) + std::abs(b); + } + + __device__ static void atomic_op(Number *dst, const Number a) + { + *dst = std::abs(*dst) + std::abs(a); + } + + __device__ static Number null_value() + { + return Number(); + } + }; + + + template + struct LInfty + { + __device__ static Number reduction_op(const Number a, const Number b) + { + if (std::abs(a) > std::abs(b)) + return std::abs(a); + else + return std::abs(b); + } + + __device__ static void atomic_op(Number *dst, const Number a) + { + if (std::abs(*dst) < std::abs(a)) + *dst = std::abs(a); + else + *dst = std::abs(*dst); + } + + __device__ static Number null_value() + { + return Number(); + } + }; + + + + template + __device__ void reduce_within_warp(volatile Number *result_buffer, + typename Vector::size_type local_idx) + { + if (BLOCK_SIZE >= 64) + result_buffer[local_idx] = + Operation::reduction_op(result_buffer[local_idx], + result_buffer[local_idx+32]); + if (BLOCK_SIZE >= 32) + result_buffer[local_idx] = + Operation::reduction_op(result_buffer[local_idx], + result_buffer[local_idx+16]); + if (BLOCK_SIZE >= 16) + result_buffer[local_idx] = + Operation::reduction_op(result_buffer[local_idx], + result_buffer[local_idx+8]); + if (BLOCK_SIZE >= 8) + result_buffer[local_idx] = + Operation::reduction_op(result_buffer[local_idx], + result_buffer[local_idx+4]); + if (BLOCK_SIZE >= 4) + result_buffer[local_idx] = + Operation::reduction_op(result_buffer[local_idx], + result_buffer[local_idx+2]); + if (BLOCK_SIZE >= 2) + result_buffer[local_idx] = + Operation::reduction_op(result_buffer[local_idx], + result_buffer[local_idx+1]); + } + + + + template + __device__ void reduce(Number *result, + Number *result_buffer, + const typename Vector::size_type local_idx, + const typename Vector::size_type global_idx, + const typename Vector::size_type N) + { + for (typename Vector::size_type s=BLOCK_SIZE/2; s>32; s=s>>1) + { + if (local_idx < s) + result_buffer[local_idx] = Operation::reduction_op(result_buffer[local_idx], + result_buffer[local_idx+s]); + __syncthreads(); + } + + if (local_idx < 32) + reduce_within_warp(result_buffer, local_idx); + + if (local_idx == 0) + Operation::atomic_op(result, result_buffer[0]); + } + + + + template + __global__ void reduction(Number *result, + const Number *v, + const typename Vector::size_type N) + { + __shared__ Number result_buffer[BLOCK_SIZE]; + + const typename Vector::size_type global_idx = threadIdx.x + + blockIdx.x*(blockDim.x*CHUNK_SIZE); + const typename Vector::size_type local_idx = threadIdx.x; + + reduce (result, result_buffer, local_idx, global_idx, N); + } + + + + template + struct DotProduct + { + __device__ static Number binary_op(const Number a, const Number b) + { + return a*b; + } + + __device__ static Number reduction_op(const Number a, const Number b) + { + return a+b; + } + + __device__ static void atomic_op(Number *dst, const Number a) + { + *dst += a; + } + + __device__ static Number null_value() + { + return Number(); + } + }; + + + + template + __global__ void double_vector_reduction(Number *result, + Number *v1, + Number *v2, + const typename Vector::size_type N) + { + __shared__ Number result_buffer[BLOCK_SIZE]; + + const typename Vector::size_type global_idx = threadIdx.x + + blockIdx.x*(blockDim.x*CHUNK_SIZE); + const typename Vector::size_type local_idx = threadIdx.x; + + if (global_idx::size_type idx = global_idx + + i*BLOCK_SIZE; + if (idx (result,result_buffer,local_idx,global_idx,N); + } + + + + template + __global__ void vec_add(Number *val, + const Number a, + const typename Vector::size_type N) + { + const typename Vector::size_type idx_base = threadIdx.x + + blockIdx.x * + (blockDim.x*CHUNK_SIZE); + for (unsigned int i=0; i::size_type idx = idx_base + + i*BLOCK_SIZE; + if (idx + __global__ void add_aV(Number *val, + const Number a, + Number *V_val, + const typename Vector::size_type N) + { + const typename Vector::size_type idx_base = threadIdx.x + + blockIdx.x * + (blockDim.x*CHUNK_SIZE); + for (unsigned int i=0; i::size_type idx = idx_base + + i*BLOCK_SIZE; + if (idx + __global__ void add_aVbW(Number *val, + const Number a, + Number *V_val, + const Number b, + Number *W_val, + const typename Vector::size_type N) + { + const typename Vector::size_type idx_base = threadIdx.x + + blockIdx.x * + (blockDim.x*CHUNK_SIZE); + for (unsigned int i=0; i::size_type idx = idx_base + + i*BLOCK_SIZE; + if (idx + __global__ void sadd(const Number s, + Number *val, + const Number a, + const Number *V_val, + const typename Vector::size_type N) + { + const typename Vector::size_type idx_base = threadIdx.x + + blockIdx.x * + (blockDim.x*CHUNK_SIZE); + for (unsigned int i=0; i::size_type idx = idx_base + + i*BLOCK_SIZE; + if (idx + __global__ void scale(Number *val, + const Number *V_val, + const typename Vector::size_type N) + { + const typename Vector::size_type idx_base = threadIdx.x + + blockIdx.x * + (blockDim.x*CHUNK_SIZE); + for (unsigned int i=0; i::size_type idx = idx_base + + i*BLOCK_SIZE; + if (idx + __global__ void equ(Number *val, + const Number a, + const Number *V_val, + const typename Vector::size_type N) + { + const typename Vector::size_type idx_base = threadIdx.x + + blockIdx.x * + (blockDim.x*CHUNK_SIZE); + for (unsigned int i=0; i::size_type idx = idx_base + + i*BLOCK_SIZE; + if (idx + __global__ void add_and_dot(Number *res, + Number *v1, + const Number *v2, + const Number *v3, + const Number a, + const typename Vector::size_type N) + { + __shared__ Number res_buf[BLOCK_SIZE]; + + const unsigned int global_idx = threadIdx.x + blockIdx.x * + (blockDim.x*CHUNK_SIZE); + const unsigned int local_idx = threadIdx.x; + if (global_idx < N) + { + v1[global_idx] += a*v2[global_idx]; + res_buf[local_idx] = v1[global_idx]*v3[global_idx]; + } + else + res_buf[local_idx] = 0.; + + for (unsigned int i=1; i + Vector::Vector() + : + val(nullptr), + n_elements(0) + {} + + + + template + Vector::Vector(const Vector &V) + : + n_elements(V.n_elements) + { + // Allocate the memory + cudaError_t error_code = cudaMalloc(&val, n_elements*sizeof(Number)); + CudaAssert(error_code); + // Copy the values. + error_code = cudaMemcpy(val, V.val,n_elements*sizeof(Number), + cudaMemcpyDeviceToDevice); + CudaAssert(error_code); + } + + + + template + Vector::Vector(const size_type n) + : + n_elements(n) + { + // Allocate the memory + cudaError_t error_code = cudaMalloc(&val, n_elements*sizeof(Number)); + CudaAssert(error_code); + } + + + + template + Vector::~Vector() + { + if (val != nullptr) + { + cudaError_t error_code = cudaFree(val); + CudaAssert(error_code); + val = nullptr; + n_elements = 0; + } + } + + + + template + void Vector::reinit(const size_type n, + const bool omit_zeroing_entries) + { + // Resize the underlying array if necessary + if (n == 0) + { + if (val != nullptr) + { + cudaError_t error_code = cudaFree(val); + CudaAssert(error_code); + val = nullptr; + } + } + else + { + if (n_elements != n) + { + cudaError_t error_code = cudaFree(val); + CudaAssert(error_code); + } + + cudaError_t error_code = cudaMalloc(&val, n*sizeof(Number)); + CudaAssert(error_code); + + // If necessary set the elements to zero + if (omit_zeroing_entries == false) + { + cudaError_t error_code = cudaMemset(val, 0, + n_elements*sizeof(Number)); + CudaAssert(error_code); + } + } + n_elements = n; + } + + + + template + void Vector::import(const ReadWriteVector &V, + VectorOperation::values operation, + std_cxx11::shared_ptr ) + { + if (operation == VectorOperation::insert) + { + cudaError_t error_code = cudaMemcpy(val, V.begin(), + n_elements*sizeof(Number), + cudaMemcpyHostToDevice); + CudaAssert(error_code); + } + else + { + // Create a temporary vector on the device + Number *tmp; + cudaError_t error_code = cudaMalloc(&tmp, n_elements*sizeof(Number)); + CudaAssert(error_code); + + // Copy the vector from the host to the temporary vector on the device + error_code = cudaMemcpy(&tmp[0], V.begin(), n_elements*sizeof(Number), + cudaMemcpyHostToDevice); + CudaAssert(error_code); + + // Add the two vectors + const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + + internal::vector_bin_op + <<>>(val, tmp, n_elements); + // Check that the kernel was launched correctly + CudaAssert(cudaGetLastError()); + // Check that there was no problem during the execution of the kernel + CudaAssert(cudaDeviceSynchronize()); + + // Delete the temporary vector + error_code = cudaFree(tmp); + CudaAssert(error_code); + } + } + + + + template + Vector &Vector::operator*= (const Number factor) + { + AssertIsFinite(factor); + const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + internal::vec_scale <<>>(val, + factor, n_elements); + + // Check that the kernel was launched correctly + CudaAssert(cudaGetLastError()); + // Check that there was no problem during the execution of the kernel + CudaAssert(cudaDeviceSynchronize()); + + return *this; + } + + + + template + Vector &Vector::operator/= (const Number factor) + { + AssertIsFinite(factor); + Assert(factor!=Number(0.), ExcZero()); + const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + internal::vec_scale <<>>(val, + 1./factor, n_elements); + + // Check that the kernel was launched correctly + CudaAssert(cudaGetLastError()); + // Check that there was no problem during the execution of the kernel + CudaAssert(cudaDeviceSynchronize()); + + return *this; + } + + + + template + Vector &Vector::operator+= (const VectorSpaceVector &V) + { + // Check that casting will work + Assert(dynamic_cast*>(&V)!=nullptr, + ExcVectorTypeNotCompatible()); + + // Downcast V. If it fails, it throw an exception. + const Vector &down_V = dynamic_cast&>(V); + Assert(down_V.size()==this->size(), + ExcMessage("Cannot add two vectors with different numbers of elements")); + + const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + + internal::vector_bin_op + <<>>(val, down_V.val, n_elements); + + // Check that the kernel was launched correctly + CudaAssert(cudaGetLastError()); + // Check that there was no problem during the execution of the kernel + CudaAssert(cudaDeviceSynchronize()); + + return *this; + } + + + + template + Vector &Vector::operator-= (const VectorSpaceVector &V) + { + // Check that casting will work + Assert(dynamic_cast*>(&V)!=nullptr, + ExcVectorTypeNotCompatible()); + + // Downcast V. If fails, throws an exception. + const Vector &down_V = dynamic_cast&>(V); + Assert(down_V.size()==this->size(), + ExcMessage("Cannot add two vectors with different numbers of elements.")); + + const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + + internal::vector_bin_op + <<>>(val, down_V.val, n_elements); + + // Check that the kernel was launched correctly + CudaAssert(cudaGetLastError()); + // Check that there was no problem during the execution of the kernel + CudaAssert(cudaDeviceSynchronize()); + + return *this; + } + + + + template + Number Vector::operator* (const VectorSpaceVector &V) const + { + // Check that casting will work + Assert(dynamic_cast*>(&V)!=nullptr, + ExcVectorTypeNotCompatible()); + + // Downcast V. If fails, throws an exception. + const Vector &down_V = dynamic_cast&>(V); + Assert(down_V.size()==this->size(), + ExcMessage("Cannot add two vectors with different numbers of elements")); + + Number *result_device; + cudaError_t error_code = cudaMalloc(&result_device, n_elements*sizeof(Number)); + CudaAssert(error_code); + error_code = cudaMemset(result_device, Number(), sizeof(Number)); + + const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + internal::double_vector_reduction> + <<>> (result_device, val, + down_V.val, + static_cast(n_elements)); + + // Copy the result back to the host + Number result; + error_code = cudaMemcpy(&result, result_device, sizeof(Number), + cudaMemcpyDeviceToHost); + CudaAssert(error_code); + // Free the memory on the device + error_code = cudaFree(result_device); + CudaAssert(error_code); + + return result; + } + + + + template + void Vector::add(const Number a) + { + AssertIsFinite(a); + const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + internal::vec_add <<>>(val, a, + n_elements); + + // Check that the kernel was launched correctly + CudaAssert(cudaGetLastError()); + // Check that there was no problem during the execution of the kernel + CudaAssert(cudaDeviceSynchronize()); + } + + + + template + void Vector::add(const Number a, const VectorSpaceVector &V) + { + AssertIsFinite(a); + + // Check that casting will work. + Assert(dynamic_cast*>(&V) != nullptr, + ExcVectorTypeNotCompatible()); + + // Downcast V. If fails, throw an exception. + const Vector &down_V = dynamic_cast&>(V); + Assert(down_V.size() == this->size(), + ExcMessage("Cannot add two vectors with different numbers of elements.")); + + const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + internal::add_aV <<>> (val, + a, down_V.val, n_elements); + + // Check that the kernel was launched correctly + CudaAssert(cudaGetLastError()); + // Check that there was no problem during the execution of the kernel + CudaAssert(cudaDeviceSynchronize()); + } + + + + template + void Vector::add(const Number a, const VectorSpaceVector &V, + const Number b, const VectorSpaceVector &W) + { + AssertIsFinite(a); + AssertIsFinite(b); + + // Check that casting will work. + Assert(dynamic_cast*>(&V) != nullptr, + ExcVectorTypeNotCompatible()); + + // Downcast V. If fails, throw an exception. + const Vector &down_V = dynamic_cast&>(V); + Assert(down_V.size() == this->size(), + ExcMessage("Cannot add two vectors with different numbers of elements.")); + + // Check that casting will work. + Assert(dynamic_cast*>(&W) != nullptr, + ExcVectorTypeNotCompatible()); + + // Downcast V. If fails, throw an exception. + const Vector &down_W = dynamic_cast&>(W); + Assert(down_W.size() == this->size(), + ExcMessage("Cannot add two vectors with different numbers of elements.")); + + const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + internal::add_aVbW <<>> (val, + a, down_V.val, b, down_W.val, n_elements); + + // Check that the kernel was launched correctly + CudaAssert(cudaGetLastError()); + // Check that there was no problem during the execution of the kernel + CudaAssert(cudaDeviceSynchronize()); + } + + + + template + void Vector::sadd(const Number s, const Number a, + const VectorSpaceVector &V) + { + AssertIsFinite(s); + AssertIsFinite(a); + + // Check that casting will work. + Assert(dynamic_cast*>(&V) != nullptr, + ExcVectorTypeNotCompatible()); + + // Downcast V. If fails, throw an exception. + const Vector &down_V = dynamic_cast&>(V); + Assert(down_V.size() == this->size(), + ExcMessage("Cannot add two vectors with different numbers of elements.")); + + const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + internal::sadd <<>> (s, val, + a, down_V.val, n_elements); + + // Check that the kernel was launched correctly + CudaAssert(cudaGetLastError()); + // Check that there was no problem during the execution of the kernel + CudaAssert(cudaDeviceSynchronize()); + } + + + + template + void Vector::scale(const VectorSpaceVector &scaling_factors) + { + // Check that casting will work. + Assert(dynamic_cast*>(&scaling_factors) != nullptr, + ExcVectorTypeNotCompatible()); + + // Downcast V. If fails, throw an exception. + const Vector &down_scaling_factors = + dynamic_cast&>(scaling_factors); + Assert(down_scaling_factors.size() == this->size(), + ExcMessage("Cannot scale two vectors with different numbers of elements.")); + + const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + internal::scale <<>> (val, + down_scaling_factors.val, n_elements); + + // Check that the kernel was launched correctly + CudaAssert(cudaGetLastError()); + // Check that there was no problem during the execution of the kernel + CudaAssert(cudaDeviceSynchronize()); + } + + + + template + void Vector::equ(const Number a, const VectorSpaceVector &V) + { + AssertIsFinite(a); + + // Check that casting will work. + Assert(dynamic_cast*>(&V) != nullptr, + ExcVectorTypeNotCompatible()); + + // Downcast V. If fails, throw an exception. + const Vector &down_V = dynamic_cast&>(V); + Assert(down_V.size() == this->size(), + ExcMessage("Cannot assign two vectors with different numbers of elements.")); + + const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + internal::equ <<>> (val, a, + down_V.val, n_elements); + + // Check that the kernel was launched correctly + CudaAssert(cudaGetLastError()); + // Check that there was no problem during the execution of the kernel + CudaAssert(cudaDeviceSynchronize()); + } + + + + template + typename Vector::real_type Vector::l1_norm() const + { + Number *result_device; + cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number)); + CudaAssert(error_code); + error_code = cudaMemset(result_device, Number(), sizeof(Number)); + + const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + internal::reduction> + <<>> ( + result_device, val, + n_elements); + + // Copy the result back to the host + Number result; + error_code = cudaMemcpy(&result, result_device, sizeof(Number), + cudaMemcpyDeviceToHost); + CudaAssert(error_code); + // Free the memory on the device + error_code = cudaFree(result_device); + CudaAssert(error_code); + + return result; + } + + + + template + typename Vector::real_type Vector::l2_norm() const + { + return std::sqrt((*this)*(*this)); + } + + + + template + typename Vector::real_type Vector::linfty_norm() const + { + Number *result_device; + cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number)); + CudaAssert(error_code); + error_code = cudaMemset(result_device, Number(), sizeof(Number)); + + const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + internal::reduction> + <<>> ( + result_device, val, + n_elements); + + // Copy the result back to the host + Number result; + error_code = cudaMemcpy(&result, result_device, sizeof(Number), + cudaMemcpyDeviceToHost); + CudaAssert(error_code); + // Free the memory on the device + error_code = cudaFree(result_device); + CudaAssert(error_code); + + return result; + } + + + + template + Number Vector::add_and_dot(const Number a, + const VectorSpaceVector &V, + const VectorSpaceVector &W) + { + AssertIsFinite(a); + + // Check that casting will work + Assert(dynamic_cast*>(&V)!=nullptr, + ExcVectorTypeNotCompatible()); + Assert(dynamic_cast*>(&W)!=nullptr, + ExcVectorTypeNotCompatible()); + + // Downcast V and W. If it fails, throw an exceptiion. + const Vector &down_V = dynamic_cast&>(V); + Assert(down_V.size() == this->size(), + ExcMessage("Vector V has the wrong size.")); + const Vector &down_W = dynamic_cast&>(W); + Assert(down_W.size() == this->size(), + ExcMessage("Vector W has the wrong size.")); + + Number *res_d; + cudaError_t error_code = cudaMalloc(&res_d, sizeof(Number)); + CudaAssert(error_code); + error_code = cudaMemset(res_d, 0., sizeof(Number)); + CudaAssert(error_code); + + const int n_blocks = 1 + (n_elements-1)/(CHUNK_SIZE*BLOCK_SIZE); + internal::add_and_dot <<>>( + res_d, val, down_V.val, down_W.val, a, n_elements); + + Number res; + error_code = cudaMemcpy(&res, res_d, sizeof(Number), cudaMemcpyDeviceToHost); + CudaAssert(error_code); + error_code = cudaFree(res_d); + + return res; + } + + + + template + void Vector::print(std::ostream &out, + const unsigned int precision, + const bool scientific, + const bool across) const + { + AssertThrow(out, ExcIO()); + std::ios::fmtflags old_flags = out.flags(); + unsigned int old_precision = out.precision (precision); + + out.precision (precision); + if (scientific) + out.setf (std::ios::scientific, std::ios::floatfield); + else + out.setf (std::ios::fixed, std::ios::floatfield); + + out << "IndexSet: "; + complete_index_set(n_elements).print(out); + out << std::endl; + + // Copy the vector to the host + Number *cpu_val = new Number[n_elements]; + cudaError_t error_code = cudaMemcpy(cpu_val, val, + n_elements*sizeof(Number), + cudaMemcpyHostToDevice); + CudaAssert(error_code); + for (unsigned int i=0; i + std::size_t Vector::memory_consumption() const + { + std::size_t memory = sizeof(*this); + memory += sizeof (Number) * static_cast(n_elements); + + return memory; + } + + + + // Explicit Instanationation + template class Vector; + template class Vector; + } +} + +DEAL_II_NAMESPACE_CLOSE + +#endif