private:
/**
- * cuSPARSE handle used to call cuSPARSE functions. The cuSPARSE handle
- * needs to be mutable to be called in a const function.
+ * cuSPARSE handle used to call cuSPARSE functions.
*/
- mutable cusparseHandle_t cusparse_handle;
+ cusparseHandle_t cusparse_handle;
/**
* Number of non-zero elements in the sparse matrix.
/**
* Pointer to the values (on the device) of the sparse matrix.
*/
- Number *val_dev;
+ std::unique_ptr<Number[], void (*)(Number *)> val_dev;
/**
* Pointer to the column indices (on the device) of the sparse matrix.
*/
- int *column_index_dev;
+ std::unique_ptr<int[], void (*)(int *)> column_index_dev;
/**
* Pointer to the row pointer (on the device) of the sparse matrix.
*/
- int *row_ptr_dev;
+ std::unique_ptr<int[], void (*)(int *)> row_ptr_dev;
/**
* cuSPARSE description of the sparse matrix.
std::vector<int> rows(n_rows + 1);
std::vector<int> cols(nnz);
std::vector<double> val(nnz);
- Utilities::CUDA::copy_to_host(row_ptr_dev, rows);
- Utilities::CUDA::copy_to_host(column_index_dev, cols);
- Utilities::CUDA::copy_to_host(val_dev, val);
+ Utilities::CUDA::copy_to_host(row_ptr_dev.get(), rows);
+ Utilities::CUDA::copy_to_host(column_index_dev.get(), cols);
+ Utilities::CUDA::copy_to_host(val_dev.get(), val);
bool has_diagonal = false;
Number diagonal = Number();
std::vector<int> rows(n_rows + 1);
std::vector<int> cols(nnz);
std::vector<Number> val(nnz);
- Utilities::CUDA::copy_to_host(row_ptr_dev, rows);
- Utilities::CUDA::copy_to_host(column_index_dev, cols);
- Utilities::CUDA::copy_to_host(val_dev, val);
+ Utilities::CUDA::copy_to_host(row_ptr_dev.get(), rows);
+ Utilities::CUDA::copy_to_host(column_index_dev.get(), cols);
+ Utilities::CUDA::copy_to_host(val_dev.get(), val);
unsigned int width = width_;
namespace CUDAWrappers
{
+ namespace
+ {
+ template <typename Number>
+ void
+ delete_device_vector(Number *device_ptr) noexcept
+ {
+ const cudaError_t error_code = cudaFree(device_ptr);
+ (void)error_code;
+ AssertNothrow(error_code == cudaSuccess,
+ dealii::ExcCudaError(cudaGetErrorString(error_code)));
+ }
+
+ template <typename Number>
+ Number *
+ allocate_device_vector(const std::size_t size)
+ {
+ Number *device_ptr;
+ Utilities::CUDA::malloc(device_ptr, size);
+ return device_ptr;
+ }
+ } // namespace
+
namespace internal
{
template <typename Number>
SparseMatrix<Number>::SparseMatrix()
: nnz(0)
, n_rows(0)
- , val_dev(nullptr)
- , column_index_dev(nullptr)
- , row_ptr_dev(nullptr)
+ , val_dev(nullptr, delete_device_vector<Number>)
+ , column_index_dev(nullptr, delete_device_vector<int>)
+ , row_ptr_dev(nullptr, delete_device_vector<int>)
, descr(nullptr)
{}
SparseMatrix<Number>::SparseMatrix(
Utilities::CUDA::Handle & handle,
const ::dealii::SparseMatrix<Number> &sparse_matrix_host)
- : val_dev(nullptr)
- , column_index_dev(nullptr)
- , row_ptr_dev(nullptr)
+ : val_dev(nullptr, delete_device_vector<Number>)
+ , column_index_dev(nullptr, delete_device_vector<int>)
+ , row_ptr_dev(nullptr, delete_device_vector<int>)
, descr(nullptr)
{
reinit(handle, sparse_matrix_host);
template <typename Number>
SparseMatrix<Number>::SparseMatrix(CUDAWrappers::SparseMatrix<Number> &&other)
+ : cusparse_handle(other.cusparse_handle)
+ , nnz(other.nnz)
+ , n_rows(other.n_rows)
+ , n_cols(other.n_cols)
+ , val_dev(std::move(other.val_dev))
+ , column_index_dev(std::move(other.column_index_dev))
+ , row_ptr_dev(std::move(other.row_ptr_dev))
+ , descr(other.descr)
{
- cusparse_handle = other.cusparse_handle;
- nnz = other.nnz;
- n_rows = other.n_rows;
- n_cols = other.n_cols;
- val_dev = other.val_dev;
- column_index_dev = other.column_index_dev;
- row_ptr_dev = other.row_ptr_dev;
- descr = other.descr;
-
- other.nnz = 0;
- other.n_rows = 0;
- other.n_cols = 0;
- other.val_dev = nullptr;
- other.column_index_dev = nullptr;
- other.row_ptr_dev = nullptr;
- other.descr = nullptr;
+ other.nnz = 0;
+ other.n_rows = 0;
+ other.n_cols = 0;
+ other.descr = nullptr;
}
template <typename Number>
SparseMatrix<Number>::~SparseMatrix<Number>()
{
- if (val_dev != nullptr)
- {
- const cudaError_t error_code = cudaFree(val_dev);
- AssertNothrowCuda(error_code);
- val_dev = nullptr;
- }
-
- if (column_index_dev != nullptr)
- {
- const cudaError_t error_code = cudaFree(column_index_dev);
- AssertNothrowCuda(error_code);
- column_index_dev = nullptr;
- }
-
- if (row_ptr_dev != nullptr)
- {
- const cudaError_t error_code = cudaFree(row_ptr_dev);
- AssertNothrowCuda(error_code);
- row_ptr_dev = nullptr;
- }
-
if (descr != nullptr)
{
const cusparseStatus_t cusparse_error_code =
nnz = other.nnz;
n_rows = other.n_rows;
n_cols = other.n_cols;
- val_dev = other.val_dev;
- column_index_dev = other.column_index_dev;
- row_ptr_dev = other.row_ptr_dev;
+ val_dev = std::move(other.val_dev);
+ column_index_dev = std::move(other.column_index_dev);
+ row_ptr_dev = std::move(other.row_ptr_dev);
descr = other.descr;
- other.nnz = 0;
- other.n_rows = 0;
- other.n_cols = 0;
- other.val_dev = nullptr;
- other.column_index_dev = nullptr;
- other.row_ptr_dev = nullptr;
- other.descr = nullptr;
+ other.nnz = 0;
+ other.n_rows = 0;
+ other.n_cols = 0;
+ other.descr = nullptr;
return *this;
}
}
// Copy the elements to the gpu
- cudaError_t error_code = cudaMalloc(&val_dev, nnz * sizeof(Number));
- AssertCuda(error_code);
- error_code = cudaMemcpy(val_dev,
- &val[0],
- nnz * sizeof(Number),
- cudaMemcpyHostToDevice);
+ val_dev.reset(allocate_device_vector<Number>(nnz));
+ cudaError_t error_code = cudaMemcpy(val_dev.get(),
+ &val[0],
+ nnz * sizeof(Number),
+ cudaMemcpyHostToDevice);
AssertCuda(error_code);
// Copy the column indices to the gpu
- error_code = cudaMalloc(&column_index_dev, nnz * sizeof(int));
+ column_index_dev.reset(allocate_device_vector<int>(nnz));
AssertCuda(error_code);
- error_code = cudaMemcpy(column_index_dev,
+ error_code = cudaMemcpy(column_index_dev.get(),
&column_index[0],
nnz * sizeof(int),
cudaMemcpyHostToDevice);
AssertCuda(error_code);
// Copy the row pointer to the gpu
- error_code = cudaMalloc(&row_ptr_dev, row_ptr_size * sizeof(int));
+ row_ptr_dev.reset(allocate_device_vector<int>(row_ptr_size));
AssertCuda(error_code);
- error_code = cudaMemcpy(row_ptr_dev,
+ error_code = cudaMemcpy(row_ptr_dev.get(),
&row_ptr[0],
row_ptr_size * sizeof(int),
cudaMemcpyHostToDevice);
{
AssertIsFinite(factor);
const int n_blocks = 1 + (nnz - 1) / block_size;
- internal::scale<Number><<<n_blocks, block_size>>>(val_dev, factor, nnz);
+ internal::scale<Number>
+ <<<n_blocks, block_size>>>(val_dev.get(), factor, nnz);
// Check that the kernel was launched correctly
AssertCuda(cudaGetLastError());
Assert(factor != Number(0.), ExcZero());
const int n_blocks = 1 + (nnz - 1) / block_size;
internal::scale<Number>
- <<<n_blocks, block_size>>>(val_dev, 1. / factor, nnz);
+ <<<n_blocks, block_size>>>(val_dev.get(), 1. / factor, nnz);
// Check that the kernel was launched correctly
AssertCuda(cudaGetLastError());
n_cols,
nnz,
descr,
- val_dev,
- row_ptr_dev,
- column_index_dev,
+ val_dev.get(),
+ row_ptr_dev.get(),
+ column_index_dev.get(),
src.get_values(),
false,
dst.get_values());
n_cols,
nnz,
descr,
- val_dev,
- row_ptr_dev,
- column_index_dev,
+ val_dev.get(),
+ row_ptr_dev.get(),
+ column_index_dev.get(),
src.get_values(),
false,
dst.get_values());
n_cols,
nnz,
descr,
- val_dev,
- row_ptr_dev,
- column_index_dev,
+ val_dev.get(),
+ row_ptr_dev.get(),
+ column_index_dev.get(),
src.get_values(),
true,
dst.get_values());
n_cols,
nnz,
descr,
- val_dev,
- row_ptr_dev,
- column_index_dev,
+ val_dev.get(),
+ row_ptr_dev.get(),
+ column_index_dev.get(),
src.get_values(),
true,
dst.get_values());
{
LinearAlgebra::CUDAWrappers::Vector<real_type> column_sums(n_cols);
const int n_blocks = 1 + (nnz - 1) / block_size;
- internal::l1_norm<Number><<<n_blocks, block_size>>>(
- n_rows, val_dev, column_index_dev, row_ptr_dev, column_sums.get_values());
+ internal::l1_norm<Number>
+ <<<n_blocks, block_size>>>(n_rows,
+ val_dev.get(),
+ column_index_dev.get(),
+ row_ptr_dev.get(),
+ column_sums.get_values());
// Check that the kernel was launched correctly
AssertCuda(cudaGetLastError());
// Check that there was no problem during the execution of the kernel
{
LinearAlgebra::CUDAWrappers::Vector<real_type> row_sums(n_rows);
const int n_blocks = 1 + (nnz - 1) / block_size;
- internal::linfty_norm<Number><<<n_blocks, block_size>>>(
- n_rows, val_dev, column_index_dev, row_ptr_dev, row_sums.get_values());
+ internal::linfty_norm<Number>
+ <<<n_blocks, block_size>>>(n_rows,
+ val_dev.get(),
+ column_index_dev.get(),
+ row_ptr_dev.get(),
+ row_sums.get_values());
// Check that the kernel was launched correctly
AssertCuda(cudaGetLastError());
// Check that there was no problem during the execution of the kernel
{
LinearAlgebra::CUDAWrappers::Vector<real_type> matrix_values(nnz);
cudaError_t cuda_error = cudaMemcpy(matrix_values.get_values(),
- val_dev,
+ val_dev.get(),
nnz * sizeof(Number),
cudaMemcpyDeviceToDevice);
std::tuple<Number *, int *, int *, cusparseMatDescr_t>
SparseMatrix<Number>::get_cusparse_matrix() const
{
- return std::make_tuple(val_dev, column_index_dev, row_ptr_dev, descr);
+ return std::make_tuple(val_dev.get(),
+ column_index_dev.get(),
+ row_ptr_dev.get(),
+ descr);
}