--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_cuda_sparse_matrix_h
+#define dealii_cuda_sparse_matrix_h
+
+#include <deal.II/base/config.h>
+#include <deal.II/base/subscriptor.h>
+
+#ifdef DEAL_II_WITH_CUDA
+#include <deal.II/lac/sparse_matrix.h>
+#include <deal.II/lac/cuda_vector.h>
+#include <cusparse.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace CUDAWrappers
+{
+ /**
+ * This class is a wrapper around cuSPARSE csr sparse matrix. Unlike deal.II's
+ * own SparseMatrix all elements within each row are stored in increasing
+ * column index order.
+ *
+ * @note Instantiations for this template are provided for <tt>@<float@> and
+ * @<double@></tt>.
+ *
+ * @ingroup Matrix1
+ * @author Bruno Turcksin
+ * @date 2018
+ */
+ template <typename Number>
+ class SparseMatrix: public virtual Subscriptor
+ {
+ public:
+ /**
+ * Declare type for container size.
+ */
+ typedef unsigned int size_type;
+
+ /**
+ * Type of the matrix entries.
+ */
+ typedef Number value_type;
+
+ /**
+ * Declare a type that holds real-valued numbers with the same precision
+ * as the template argument to this class.
+ */
+ typedef Number real_type;
+
+ /**
+ * @name Constructors and initialization
+ */
+ //@{
+ /**
+ * Constructor. Initialize the matrix to be empty, without any structure,
+ * i.e., the matrix is not usable at all. This constructor is therefore
+ * only useful for matrices which are members of a class.
+ *
+ * You have to initialize the matrix before usage with reinit.
+ */
+ SparseMatrix();
+
+ /**
+ * Constructor. Takes a cuSPARSE handle and a sparse matrix on the host.
+ * The sparse matrix on the host is copied on the device and the elements
+ * are reordered according to the format supported by cuSPARSE.
+ */
+ SparseMatrix(cusparseHandle_t handle,
+ const ::dealii::SparseMatrix<Number> &sparse_matrix_host);
+
+ /**
+ * Move constructor. Create a new SparseMatrix by stealing the internal
+ * data.
+ */
+ SparseMatrix(CUDAWrappers::SparseMatrix<Number> &&);
+
+ /**
+ * Copy constructor is deleted.
+ */
+ SparseMatrix(const CUDAWrappers::SparseMatrix<Number> &) = delete;
+
+ /**
+ * Destructor. Free all memory.
+ */
+ ~SparseMatrix();
+
+ /**
+ * Reinitialize the sparse matrix. The sparse matrix on the host is copied
+ * to the device and the elementes are reordered according to the format
+ * supported by cuSPARSE.
+ */
+ void reinit(cusparseHandle_t handle,
+ const ::dealii::SparseMatrix<Number> &sparse_matrix_host);
+ //@}
+
+ /**
+ * @name Information on the matrix
+ */
+ //@{
+ /**
+ * Return the dimension of the codomain (or range) space. Note that the
+ * matrix is of dimension $m \times n$.
+ */
+ size_type m() const;
+
+ /**
+ * Return the dimension of the domain space. Note that the matrix is of
+ * dimension $m \times n$.
+ */
+ size_type n() const;
+
+ /**
+ * Return the number of nonzero elements of this matrix. Actually, it
+ * returns the number of entries in the sparsity pattern; if any of the
+ * entries should happen to be zero, it is counted anyway.
+ */
+ std::size_t n_nonzero_elements() const;
+ //@}
+
+ /**
+ * @name Modifying entries
+ */
+ //@{
+ /**
+ * Multiply the entire matrix by a fixed factor.
+ */
+ SparseMatrix &operator*= (const Number factor);
+
+ /**
+ * Divide the entrie matrix by a fixed factor.
+ */
+ SparseMatrix &operator/= (const Number factor);
+ //@}
+
+ /**
+ * @name Multiplications
+ */
+ //@{
+ /**
+ * Matrix-vector multiplication: let $dst = M \cdot src$ with $M$
+ * being this matrix.
+ */
+ void vmult(LinearAlgebra::CUDAWrappers::Vector<Number> &dst,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &src) const;
+
+ /**
+ * Matrix-vector multiplication: let $dst = M^T \cdot src$ with
+ * $M$ being this matrix. This function does the same as vmult() but
+ * takes thes transposed matrix.
+ */
+ void Tvmult(LinearAlgebra::CUDAWrappers::Vector<Number> &dst,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &src) const;
+
+ /**
+ * Adding matrix-vector multiplication. Add $M \cdot src$ on $dst$
+ * with $M$ being this matrix.
+ */
+ void vmult_add(LinearAlgebra::CUDAWrappers::Vector<Number> &dst,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &src) const;
+
+ /**
+ * Adding matrix-vector multiplication. Add $M^T \cdot src$ to
+ * $dst$ with $M$ being this matrix. This function foes the same
+ * as vmult_add() but takes the transposed matrix.
+ */
+ void Tvmult_add(LinearAlgebra::CUDAWrappers::Vector<Number> &dst,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &src) const;
+
+ /**
+ * Return the square of the norm of the vector $v$ with respect to the the
+ * norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful,
+ * e.g., in the finite context, where the $L_2$ norm of a function equals
+ * the matrix norm with respect to the mass matrix of the vector
+ * representing the nodal values of the finite element function.
+ *
+ * Obviously, the matrix needs to be quadratic for this operation.
+ */
+ Number matrix_norm_square(const LinearAlgebra::CUDAWrappers::Vector<Number> &v) const;
+
+ /**
+ * Compute the matrix scalar product $\left(u,Mv\right)$.
+ */
+ Number matrix_scalar_product(const LinearAlgebra::CUDAWrappers::Vector<Number> &u,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &v) const;
+
+ /**
+ * Compute the residual of an equation $M \cdot x=b$, where the residual is
+ * defined to be $r=b-M \cdot x$. Write the residual into $dst$. The
+ * $l_2$ norm of the residual vector is returned.
+ *
+ * Source $x$ and destination $dst$ must not be the same vector.
+ */
+ Number residual(LinearAlgebra::CUDAWrappers::Vector<Number> &dst,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &x,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &b) const;
+ //@}
+
+ /**
+ * @name Matrix norms
+ */
+ //@{
+ /**
+ * Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
+ * columns\ }j}\sum_{\mathrm{all\ rows\ }i} |M_{ij}|$, (max. sum of
+ * columns). This is the natural matrix norm that is compatible to the
+ * $l_1$-norm for vectors, i.e., $|Mv|_1\leq |M|_1 |v|_1$.
+ */
+ Number l1_norm() const;
+
+ /**
+ * Return the $l_\infty$-norm of the matrix, that is
+ * $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
+ * |M_{ij}|$, (max. sum of rows). This is the natural norm that is
+ * compatible to the $l_\infty$-norm of vectors, i.e., $|Mv|_\infty \leq
+ * |M|_\infty |v|_\infty$.
+ */
+ Number linfty_norm() const;
+
+ /**
+ * Return the frobenius norm of the matrix, i.e., the square root of the
+ * sum of squares of all entries in the matrix.
+ */
+ Number frobenius_norm() const;
+ //@}
+
+ /**
+ *@name Access to underlying CUDA data
+ */
+ //@{
+ /**
+ * Return a tuple containing the pointer to the values of matrix, the
+ * pointer to the columns indices, the pointer to the rows pointer, and
+ * the cuSPARSE matrix description.
+ */
+ std::tuple<Number *, int *, int *, cusparseMatDescr_t>
+ get_cusparse_matrix();
+ //*}
+
+ private:
+ /**
+ * cuSPARSE used to call cuSPARSE function. The cuSPARSE handle needs to
+ * be mutable to be called in a const function.
+ */
+ mutable cusparseHandle_t cusparse_handle;
+
+ /**
+ * Number of non-zero elements in the sparse matrix.
+ */
+ int nnz;
+
+ /**
+ * Number of rows of the sparse matrix.
+ */
+ int n_rows;
+
+ /**
+ * Number of columns of the sparse matrix.
+ */
+ int n_cols;
+
+ /**
+ * Pointer to the values (on the device) of the sparse matrix.
+ */
+ Number *val_dev;
+
+ /**
+ * Pointer to the column indices (on the device) of the sparse matrix.
+ */
+ int *column_index_dev;
+
+ /**
+ * Pointer to the row pointer (on the device) of the sparse matrix.
+ */
+ int *row_ptr_dev;
+
+ /**
+ * cuSPARSE description of the sparse matrix.
+ */
+ cusparseMatDescr_t descr;
+ };
+
+
+
+ template <typename Number>
+ inline
+ unsigned int SparseMatrix<Number>::m() const
+ {
+ return n_rows;
+ }
+
+
+
+ template <typename Number>
+ inline
+ unsigned int SparseMatrix<Number>::n() const
+ {
+ return n_cols;
+ }
+
+
+
+ template <typename Number>
+ inline
+ std::size_t SparseMatrix<Number>::n_nonzero_elements() const
+ {
+ return nnz;
+ }
+}
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
+#endif
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/lac/cuda_sparse_matrix.h>
+#include <deal.II/base/cuda_size.h>
+#include <deal.II/base/exceptions.h>
+#include <deal.II/lac/cuda_atomic.h>
+
+#ifdef DEAL_II_WITH_CUDA
+
+#include <cusparse.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace CUDAWrappers
+{
+ using ::dealii::CUDAWrappers::block_size;
+
+ namespace internal
+ {
+ template <typename Number>
+ __global__ void scale(Number *val,
+ const Number a,
+ const typename SparseMatrix<Number>::size_type N)
+ {
+ const typename SparseMatrix<Number>::size_type idx = threadIdx.x +
+ blockIdx.x * blockDim.x;
+ if (idx<N)
+ val[idx] *= a;
+ }
+
+
+
+ void csrmv(cusparseHandle_t handle, bool transpose, int m, int n, int nnz,
+ const cusparseMatDescr_t descr, const float *A_val_dev,
+ const int *A_row_ptr_dev, const int *A_column_index_dev,
+ const float *x, bool add, float *y)
+ {
+ float alpha = 1.;
+ float beta = add ? 1. : 0.;
+ cusparseOperation_t cusparse_operation = transpose ?
+ CUSPARSE_OPERATION_TRANSPOSE :
+ CUSPARSE_OPERATION_NON_TRANSPOSE;
+
+ cusparseStatus_t error_code;
+ // This function performs y = alpha*op(A)*x + beta*y
+ error_code = cusparseScsrmv(handle, cusparse_operation, m, n, nnz,
+ &alpha, descr, A_val_dev, A_row_ptr_dev,
+ A_column_index_dev, x, &beta, y);
+ AssertCusparse(error_code);
+ }
+
+
+
+ void csrmv(cusparseHandle_t handle, bool transpose, int m, int n, int nnz,
+ const cusparseMatDescr_t descr, const double *A_val_dev,
+ const int *A_row_ptr_dev, const int *A_column_index_dev,
+ const double *x, bool add, double *y)
+ {
+ double alpha = 1.;
+ double beta = add ? 1. : 0.;
+ cusparseOperation_t cusparse_operation = transpose ?
+ CUSPARSE_OPERATION_TRANSPOSE :
+ CUSPARSE_OPERATION_NON_TRANSPOSE;
+
+ cusparseStatus_t error_code;
+ // This function performs y = alpha*op(A)*x + beta*y
+ error_code = cusparseDcsrmv(handle, cusparse_operation, m, n, nnz,
+ &alpha, descr, A_val_dev, A_row_ptr_dev,
+ A_column_index_dev, x, &beta, y);
+ AssertCusparse(error_code);
+ }
+
+
+
+ template <typename Number>
+ __global__ void l1_norm(const typename SparseMatrix<Number>::size_type n_rows,
+ const Number *val_dev, const int *column_index_dev,
+ const int *row_ptr_dev, Number *sums)
+ {
+ const typename SparseMatrix<Number>::size_type row = threadIdx.x +
+ blockIdx.x * blockDim.x;
+
+ if (row<n_rows)
+ {
+ for (int j=row_ptr_dev[row]; j<row_ptr_dev[row+1] ; ++j)
+ dealii::LinearAlgebra::CUDAWrappers::atomicAdd_wrapper(
+ &sums[column_index_dev[j]], abs(val_dev[j]));
+ }
+ }
+
+
+
+ template <typename Number>
+ __global__ void linfty_norm(const typename SparseMatrix<Number>::size_type n_rows,
+ const Number *val_dev, const int *column_index_dev,
+ const int *row_ptr_dev, Number *sums)
+ {
+ const typename SparseMatrix<Number>::size_type row = threadIdx.x +
+ blockIdx.x * blockDim.x;
+
+ if (row<n_rows)
+ {
+ sums[row] = (Number) 0.;
+ for (int j=row_ptr_dev[row]; j<row_ptr_dev[row+1] ; ++j)
+ sums[row] += abs(val_dev[j]);
+ }
+ }
+ }
+
+
+
+ template <typename Number>
+ SparseMatrix<Number>::SparseMatrix()
+ :
+ nnz(0),
+ n_rows(0),
+ val_dev(nullptr),
+ column_index_dev(nullptr),
+ row_ptr_dev(nullptr),
+ descr(nullptr)
+ {}
+
+
+
+ template <typename Number>
+ SparseMatrix<Number>::SparseMatrix(cusparseHandle_t handle,
+ const ::dealii::SparseMatrix<Number> &sparse_matrix_host)
+ :
+ val_dev(nullptr),
+ column_index_dev(nullptr),
+ row_ptr_dev(nullptr),
+ descr(nullptr)
+ {
+ reinit(handle, sparse_matrix_host);
+ }
+
+
+
+ template <typename Number>
+ SparseMatrix<Number>::SparseMatrix(CUDAWrappers::SparseMatrix<Number> &&other)
+ {
+ cusparse_handle = other.cusparse_handle;
+ nnz = other.nnz;
+ n_rows = other.n_rows;
+ n_cols = other.n_cols;
+ val_dev = other.val_dev;
+ column_index_dev = other.column_index_dev;
+ row_ptr_dev = other.row_ptr_dev;
+ descr = other.descr;
+
+ other.nnz = 0;
+ other.n_rows = 0;
+ other.n_cols = 0;
+ other.val_dev = nullptr;
+ other.column_index_dev = nullptr;
+ other.row_ptr_dev = nullptr;
+ other.descr = nullptr;
+ }
+
+
+
+ template <typename Number>
+ SparseMatrix<Number>::~SparseMatrix<Number>()
+ {
+ if (val_dev != nullptr)
+ {
+ cudaError_t error_code = cudaFree(val_dev);
+ AssertCuda(error_code);
+ val_dev = nullptr;
+ }
+
+ if (column_index_dev != nullptr)
+ {
+ cudaError_t error_code = cudaFree(column_index_dev);
+ AssertCuda(error_code);
+ column_index_dev = nullptr;
+ }
+
+ if (row_ptr_dev != nullptr)
+ {
+ cudaError_t error_code = cudaFree(row_ptr_dev);
+ AssertCuda(error_code);
+ row_ptr_dev = nullptr;
+ }
+
+ if (descr != nullptr)
+ {
+ cusparseStatus_t cusparse_error_code = cusparseDestroyMatDescr(descr);
+ AssertCusparse(cusparse_error_code);
+ descr = nullptr;
+ }
+
+ nnz = 0;
+ n_rows = 0;
+ }
+
+
+
+ template <typename Number>
+ void SparseMatrix<Number>::reinit(cusparseHandle_t handle,
+ const ::dealii::SparseMatrix<Number> &sparse_matrix_host)
+ {
+ cusparse_handle = handle;
+ nnz = sparse_matrix_host.n_nonzero_elements();
+ n_rows = sparse_matrix_host.m();
+ n_cols = sparse_matrix_host.n();
+ unsigned int const row_ptr_size = n_rows + 1;
+ std::vector<Number> val;
+ val.reserve(nnz);
+ std::vector<int> column_index;
+ column_index.reserve(nnz);
+ std::vector<int> row_ptr(row_ptr_size, 0);
+
+ // dealii::SparseMatrix stores the diagonal first in each row so we need to do some
+ // reordering
+ for (int row = 0; row < n_rows; ++row)
+ {
+ auto p_end = sparse_matrix_host.end(row);
+ unsigned int counter = 0;
+ for (auto p = sparse_matrix_host.begin(row); p != p_end; ++p)
+ {
+ val.emplace_back(p->value());
+ column_index.emplace_back(p->column());
+ ++counter;
+ }
+ row_ptr[row + 1] = row_ptr[row] + counter;
+
+ // Sort the elements in the row
+ unsigned int const offset = row_ptr[row];
+ int const diag_index = column_index[offset];
+ Number diag_elem = sparse_matrix_host.diag_element(row);
+ unsigned int pos = 1;
+ while ((column_index[offset + pos] < row) && (pos < counter))
+ {
+ val[offset + pos - 1] = val[offset + pos];
+ column_index[offset + pos - 1] = column_index[offset + pos];
+ ++pos;
+ }
+ val[offset + pos - 1] = diag_elem;
+ column_index[offset + pos - 1] = diag_index;
+ }
+
+ // Copy the elements to the gpu
+ cudaError_t error_code = cudaMalloc(&val_dev, nnz * sizeof(Number));
+ AssertCuda(error_code);
+ error_code = cudaMemcpy(val_dev, &val[0], nnz * sizeof(Number),
+ cudaMemcpyHostToDevice);
+ AssertCuda(error_code);
+
+ // Copy the column indices to the gpu
+ error_code = cudaMalloc(&column_index_dev, nnz * sizeof(int));
+ AssertCuda(error_code);
+ error_code = cudaMemcpy(column_index_dev, &column_index[0], nnz * sizeof(int),
+ cudaMemcpyHostToDevice);
+ AssertCuda(error_code);
+
+ // Copy the row pointer to the gpu
+ error_code = cudaMalloc(&row_ptr_dev, row_ptr_size * sizeof(int));
+ AssertCuda(error_code);
+ error_code = cudaMemcpy(row_ptr_dev, &row_ptr[0], row_ptr_size * sizeof(int),
+ cudaMemcpyHostToDevice);
+ AssertCuda(error_code);
+
+ // Create the matrix descriptor
+ cusparseStatus_t cusparse_error_code = cusparseCreateMatDescr(&descr);
+ AssertCusparse(cusparse_error_code);
+ cusparse_error_code = cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
+ AssertCusparse(cusparse_error_code);
+ cusparse_error_code = cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
+ AssertCusparse(cusparse_error_code);
+ }
+
+
+
+ template <typename Number>
+ SparseMatrix<Number> &SparseMatrix<Number>::operator*= (const Number factor)
+ {
+ AssertIsFinite(factor);
+ const int n_blocks = 1 + (nnz-1)/block_size;
+ internal::scale<Number> <<<n_blocks,block_size>>>(val_dev, factor, nnz);
+
+ // Check that the kernel was launched correctly
+ AssertCuda(cudaGetLastError());
+ // Check that there was no problem during the execution of the kernel
+ AssertCuda(cudaDeviceSynchronize());
+
+ return *this;
+ }
+
+
+
+ template <typename Number>
+ SparseMatrix<Number> &SparseMatrix<Number>::operator/= (const Number factor)
+ {
+ AssertIsFinite(factor);
+ Assert(factor!=Number(0.), ExcZero());
+ const int n_blocks = 1 + (nnz-1)/block_size;
+ internal::scale<Number> <<<n_blocks,block_size>>>(val_dev, 1./factor, nnz);
+
+ // Check that the kernel was launched correctly
+ AssertCuda(cudaGetLastError());
+ // Check that there was no problem during the execution of the kernel
+ AssertCuda(cudaDeviceSynchronize());
+
+ return *this;
+ }
+
+
+
+ template <typename Number>
+ void SparseMatrix<Number>::vmult(LinearAlgebra::CUDAWrappers::Vector<Number> &dst,
+ const
+ LinearAlgebra::CUDAWrappers::Vector<Number> &src) const
+ {
+ internal::csrmv(cusparse_handle, false, n_rows, n_cols, nnz, descr, val_dev,
+ row_ptr_dev, column_index_dev, src.get_values(), false,
+ dst.get_values());
+ }
+
+
+
+ template <typename Number>
+ void SparseMatrix<Number>::Tvmult(LinearAlgebra::CUDAWrappers::Vector<Number> &dst,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &src) const
+ {
+ internal::csrmv(cusparse_handle, true, n_rows, n_cols, nnz, descr, val_dev,
+ row_ptr_dev, column_index_dev, src.get_values(), false,
+ dst.get_values());
+ }
+
+
+
+ template <typename Number>
+ void SparseMatrix<Number>::vmult_add(LinearAlgebra::CUDAWrappers::Vector<Number> &dst,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &src) const
+ {
+ internal::csrmv(cusparse_handle, false, n_rows, n_cols, nnz, descr, val_dev,
+ row_ptr_dev, column_index_dev, src.get_values(), true,
+ dst.get_values());
+ }
+
+
+
+ template <typename Number>
+ void SparseMatrix<Number>::Tvmult_add(LinearAlgebra::CUDAWrappers::Vector<Number> &dst,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &src) const
+ {
+ internal::csrmv(cusparse_handle, true, n_rows, n_cols, nnz, descr, val_dev,
+ row_ptr_dev, column_index_dev, src.get_values(), true,
+ dst.get_values());
+ }
+
+
+
+ template <typename Number>
+ Number SparseMatrix<Number>::matrix_norm_square(const LinearAlgebra::CUDAWrappers::Vector<Number> &v) const
+ {
+ LinearAlgebra::CUDAWrappers::Vector<Number> tmp = v;
+ vmult(tmp, v);
+
+ return v*tmp;
+ }
+
+
+
+ template <typename Number>
+ Number SparseMatrix<Number>::matrix_scalar_product(const LinearAlgebra::CUDAWrappers::Vector<Number> &u,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &v) const
+ {
+ LinearAlgebra::CUDAWrappers::Vector<Number> tmp = v;
+ vmult(tmp, v);
+
+ return u*tmp;
+ }
+
+
+
+ template <typename Number>
+ Number SparseMatrix<Number>::residual(LinearAlgebra::CUDAWrappers::Vector<Number> &dst,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &x,
+ const LinearAlgebra::CUDAWrappers::Vector<Number> &b) const
+ {
+ vmult(dst, x);
+ dst.sadd(-1., 1., b);
+
+ return dst.l2_norm();
+ }
+
+
+
+ template <typename Number>
+ Number SparseMatrix<Number>::l1_norm() const
+ {
+ LinearAlgebra::CUDAWrappers::Vector<real_type> column_sums(n_cols);
+ const int n_blocks = 1 + (nnz-1)/block_size;
+ internal::l1_norm<Number> <<<n_blocks,block_size>>>(n_rows, val_dev,
+ column_index_dev,
+ row_ptr_dev,
+ column_sums.get_values());
+ // Check that the kernel was launched correctly
+ AssertCuda(cudaGetLastError());
+ // Check that there was no problem during the execution of the kernel
+ AssertCuda(cudaDeviceSynchronize());
+
+ return column_sums.linfty_norm();
+ }
+
+
+
+ template <typename Number>
+ Number SparseMatrix<Number>::linfty_norm() const
+ {
+ LinearAlgebra::CUDAWrappers::Vector<real_type> row_sums(n_rows);
+ const int n_blocks = 1 + (nnz-1)/block_size;
+ internal::linfty_norm<Number> <<<n_blocks,block_size>>>(n_rows, val_dev,
+ column_index_dev,
+ row_ptr_dev,
+ row_sums.get_values());
+ // Check that the kernel was launched correctly
+ AssertCuda(cudaGetLastError());
+ // Check that there was no problem during the execution of the kernel
+ AssertCuda(cudaDeviceSynchronize());
+
+ return row_sums.linfty_norm();
+ }
+
+
+
+ template <typename Number>
+ Number SparseMatrix<Number>::frobenius_norm() const
+ {
+ LinearAlgebra::CUDAWrappers::Vector<real_type> matrix_values(nnz);
+ cudaError_t cuda_error = cudaMemcpy(matrix_values.get_values(), val_dev,
+ nnz*sizeof(Number),
+ cudaMemcpyDeviceToDevice);
+
+ return matrix_values.l2_norm();
+ }
+
+
+
+ template <typename Number>
+ std::tuple<Number *, int *, int *, cusparseMatDescr_t>
+ SparseMatrix<Number>::get_cusparse_matrix()
+ {
+ return std::make_tuple(val_dev, column_index_dev, row_ptr_dev, descr);
+ }
+
+
+
+ template class SparseMatrix<float>;
+ template class SparseMatrix<double>;
+}
+DEAL_II_NAMESPACE_CLOSE
+
+#endif