From d0c97f3920e07dd4e80d6d407384e53191159cea Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Tue, 21 Aug 2018 09:26:43 +0200 Subject: [PATCH] Use unique_ptr in CUDAWrappers::SparseMatrix --- include/deal.II/lac/cuda_sparse_matrix.h | 23 ++- source/lac/cuda_sparse_matrix.cu | 171 ++++++++++++----------- 2 files changed, 99 insertions(+), 95 deletions(-) diff --git a/include/deal.II/lac/cuda_sparse_matrix.h b/include/deal.II/lac/cuda_sparse_matrix.h index 4599622de6..d80210ec81 100644 --- a/include/deal.II/lac/cuda_sparse_matrix.h +++ b/include/deal.II/lac/cuda_sparse_matrix.h @@ -331,10 +331,9 @@ namespace CUDAWrappers private: /** - * cuSPARSE handle used to call cuSPARSE functions. The cuSPARSE handle - * needs to be mutable to be called in a const function. + * cuSPARSE handle used to call cuSPARSE functions. */ - mutable cusparseHandle_t cusparse_handle; + cusparseHandle_t cusparse_handle; /** * Number of non-zero elements in the sparse matrix. @@ -354,17 +353,17 @@ namespace CUDAWrappers /** * Pointer to the values (on the device) of the sparse matrix. */ - Number *val_dev; + std::unique_ptr val_dev; /** * Pointer to the column indices (on the device) of the sparse matrix. */ - int *column_index_dev; + std::unique_ptr column_index_dev; /** * Pointer to the row pointer (on the device) of the sparse matrix. */ - int *row_ptr_dev; + std::unique_ptr row_ptr_dev; /** * cuSPARSE description of the sparse matrix. @@ -415,9 +414,9 @@ namespace CUDAWrappers std::vector rows(n_rows + 1); std::vector cols(nnz); std::vector val(nnz); - Utilities::CUDA::copy_to_host(row_ptr_dev, rows); - Utilities::CUDA::copy_to_host(column_index_dev, cols); - Utilities::CUDA::copy_to_host(val_dev, val); + Utilities::CUDA::copy_to_host(row_ptr_dev.get(), rows); + Utilities::CUDA::copy_to_host(column_index_dev.get(), cols); + Utilities::CUDA::copy_to_host(val_dev.get(), val); bool has_diagonal = false; Number diagonal = Number(); @@ -474,9 +473,9 @@ namespace CUDAWrappers std::vector rows(n_rows + 1); std::vector cols(nnz); std::vector val(nnz); - Utilities::CUDA::copy_to_host(row_ptr_dev, rows); - Utilities::CUDA::copy_to_host(column_index_dev, cols); - Utilities::CUDA::copy_to_host(val_dev, val); + Utilities::CUDA::copy_to_host(row_ptr_dev.get(), rows); + Utilities::CUDA::copy_to_host(column_index_dev.get(), cols); + Utilities::CUDA::copy_to_host(val_dev.get(), val); unsigned int width = width_; diff --git a/source/lac/cuda_sparse_matrix.cu b/source/lac/cuda_sparse_matrix.cu index 0b8700c5f5..d4411b8319 100644 --- a/source/lac/cuda_sparse_matrix.cu +++ b/source/lac/cuda_sparse_matrix.cu @@ -27,6 +27,28 @@ DEAL_II_NAMESPACE_OPEN namespace CUDAWrappers { + namespace + { + template + void + delete_device_vector(Number *device_ptr) noexcept + { + const cudaError_t error_code = cudaFree(device_ptr); + (void)error_code; + AssertNothrow(error_code == cudaSuccess, + dealii::ExcCudaError(cudaGetErrorString(error_code))); + } + + template + Number * + allocate_device_vector(const std::size_t size) + { + Number *device_ptr; + Utilities::CUDA::malloc(device_ptr, size); + return device_ptr; + } + } // namespace + namespace internal { template @@ -168,9 +190,9 @@ namespace CUDAWrappers SparseMatrix::SparseMatrix() : nnz(0) , n_rows(0) - , val_dev(nullptr) - , column_index_dev(nullptr) - , row_ptr_dev(nullptr) + , val_dev(nullptr, delete_device_vector) + , column_index_dev(nullptr, delete_device_vector) + , row_ptr_dev(nullptr, delete_device_vector) , descr(nullptr) {} @@ -180,9 +202,9 @@ namespace CUDAWrappers SparseMatrix::SparseMatrix( Utilities::CUDA::Handle & handle, const ::dealii::SparseMatrix &sparse_matrix_host) - : val_dev(nullptr) - , column_index_dev(nullptr) - , row_ptr_dev(nullptr) + : val_dev(nullptr, delete_device_vector) + , column_index_dev(nullptr, delete_device_vector) + , row_ptr_dev(nullptr, delete_device_vector) , descr(nullptr) { reinit(handle, sparse_matrix_host); @@ -192,23 +214,19 @@ namespace CUDAWrappers template SparseMatrix::SparseMatrix(CUDAWrappers::SparseMatrix &&other) + : cusparse_handle(other.cusparse_handle) + , nnz(other.nnz) + , n_rows(other.n_rows) + , n_cols(other.n_cols) + , val_dev(std::move(other.val_dev)) + , column_index_dev(std::move(other.column_index_dev)) + , row_ptr_dev(std::move(other.row_ptr_dev)) + , descr(other.descr) { - cusparse_handle = other.cusparse_handle; - nnz = other.nnz; - n_rows = other.n_rows; - n_cols = other.n_cols; - val_dev = other.val_dev; - column_index_dev = other.column_index_dev; - row_ptr_dev = other.row_ptr_dev; - descr = other.descr; - - other.nnz = 0; - other.n_rows = 0; - other.n_cols = 0; - other.val_dev = nullptr; - other.column_index_dev = nullptr; - other.row_ptr_dev = nullptr; - other.descr = nullptr; + other.nnz = 0; + other.n_rows = 0; + other.n_cols = 0; + other.descr = nullptr; } @@ -216,27 +234,6 @@ namespace CUDAWrappers template SparseMatrix::~SparseMatrix() { - if (val_dev != nullptr) - { - const cudaError_t error_code = cudaFree(val_dev); - AssertNothrowCuda(error_code); - val_dev = nullptr; - } - - if (column_index_dev != nullptr) - { - const cudaError_t error_code = cudaFree(column_index_dev); - AssertNothrowCuda(error_code); - column_index_dev = nullptr; - } - - if (row_ptr_dev != nullptr) - { - const cudaError_t error_code = cudaFree(row_ptr_dev); - AssertNothrowCuda(error_code); - row_ptr_dev = nullptr; - } - if (descr != nullptr) { const cusparseStatus_t cusparse_error_code = @@ -259,18 +256,15 @@ namespace CUDAWrappers nnz = other.nnz; n_rows = other.n_rows; n_cols = other.n_cols; - val_dev = other.val_dev; - column_index_dev = other.column_index_dev; - row_ptr_dev = other.row_ptr_dev; + val_dev = std::move(other.val_dev); + column_index_dev = std::move(other.column_index_dev); + row_ptr_dev = std::move(other.row_ptr_dev); descr = other.descr; - other.nnz = 0; - other.n_rows = 0; - other.n_cols = 0; - other.val_dev = nullptr; - other.column_index_dev = nullptr; - other.row_ptr_dev = nullptr; - other.descr = nullptr; + other.nnz = 0; + other.n_rows = 0; + other.n_cols = 0; + other.descr = nullptr; return *this; } @@ -324,27 +318,26 @@ namespace CUDAWrappers } // Copy the elements to the gpu - cudaError_t error_code = cudaMalloc(&val_dev, nnz * sizeof(Number)); - AssertCuda(error_code); - error_code = cudaMemcpy(val_dev, - &val[0], - nnz * sizeof(Number), - cudaMemcpyHostToDevice); + val_dev.reset(allocate_device_vector(nnz)); + cudaError_t error_code = cudaMemcpy(val_dev.get(), + &val[0], + nnz * sizeof(Number), + cudaMemcpyHostToDevice); AssertCuda(error_code); // Copy the column indices to the gpu - error_code = cudaMalloc(&column_index_dev, nnz * sizeof(int)); + column_index_dev.reset(allocate_device_vector(nnz)); AssertCuda(error_code); - error_code = cudaMemcpy(column_index_dev, + error_code = cudaMemcpy(column_index_dev.get(), &column_index[0], nnz * sizeof(int), cudaMemcpyHostToDevice); AssertCuda(error_code); // Copy the row pointer to the gpu - error_code = cudaMalloc(&row_ptr_dev, row_ptr_size * sizeof(int)); + row_ptr_dev.reset(allocate_device_vector(row_ptr_size)); AssertCuda(error_code); - error_code = cudaMemcpy(row_ptr_dev, + error_code = cudaMemcpy(row_ptr_dev.get(), &row_ptr[0], row_ptr_size * sizeof(int), cudaMemcpyHostToDevice); @@ -369,7 +362,8 @@ namespace CUDAWrappers { AssertIsFinite(factor); const int n_blocks = 1 + (nnz - 1) / block_size; - internal::scale<<>>(val_dev, factor, nnz); + internal::scale + <<>>(val_dev.get(), factor, nnz); // Check that the kernel was launched correctly AssertCuda(cudaGetLastError()); @@ -389,7 +383,7 @@ namespace CUDAWrappers Assert(factor != Number(0.), ExcZero()); const int n_blocks = 1 + (nnz - 1) / block_size; internal::scale - <<>>(val_dev, 1. / factor, nnz); + <<>>(val_dev.get(), 1. / factor, nnz); // Check that the kernel was launched correctly AssertCuda(cudaGetLastError()); @@ -413,9 +407,9 @@ namespace CUDAWrappers n_cols, nnz, descr, - val_dev, - row_ptr_dev, - column_index_dev, + val_dev.get(), + row_ptr_dev.get(), + column_index_dev.get(), src.get_values(), false, dst.get_values()); @@ -435,9 +429,9 @@ namespace CUDAWrappers n_cols, nnz, descr, - val_dev, - row_ptr_dev, - column_index_dev, + val_dev.get(), + row_ptr_dev.get(), + column_index_dev.get(), src.get_values(), false, dst.get_values()); @@ -457,9 +451,9 @@ namespace CUDAWrappers n_cols, nnz, descr, - val_dev, - row_ptr_dev, - column_index_dev, + val_dev.get(), + row_ptr_dev.get(), + column_index_dev.get(), src.get_values(), true, dst.get_values()); @@ -479,9 +473,9 @@ namespace CUDAWrappers n_cols, nnz, descr, - val_dev, - row_ptr_dev, - column_index_dev, + val_dev.get(), + row_ptr_dev.get(), + column_index_dev.get(), src.get_values(), true, dst.get_values()); @@ -537,8 +531,12 @@ namespace CUDAWrappers { LinearAlgebra::CUDAWrappers::Vector column_sums(n_cols); const int n_blocks = 1 + (nnz - 1) / block_size; - internal::l1_norm<<>>( - n_rows, val_dev, column_index_dev, row_ptr_dev, column_sums.get_values()); + internal::l1_norm + <<>>(n_rows, + val_dev.get(), + column_index_dev.get(), + row_ptr_dev.get(), + column_sums.get_values()); // Check that the kernel was launched correctly AssertCuda(cudaGetLastError()); // Check that there was no problem during the execution of the kernel @@ -555,8 +553,12 @@ namespace CUDAWrappers { LinearAlgebra::CUDAWrappers::Vector row_sums(n_rows); const int n_blocks = 1 + (nnz - 1) / block_size; - internal::linfty_norm<<>>( - n_rows, val_dev, column_index_dev, row_ptr_dev, row_sums.get_values()); + internal::linfty_norm + <<>>(n_rows, + val_dev.get(), + column_index_dev.get(), + row_ptr_dev.get(), + row_sums.get_values()); // Check that the kernel was launched correctly AssertCuda(cudaGetLastError()); // Check that there was no problem during the execution of the kernel @@ -573,7 +575,7 @@ namespace CUDAWrappers { LinearAlgebra::CUDAWrappers::Vector matrix_values(nnz); cudaError_t cuda_error = cudaMemcpy(matrix_values.get_values(), - val_dev, + val_dev.get(), nnz * sizeof(Number), cudaMemcpyDeviceToDevice); @@ -586,7 +588,10 @@ namespace CUDAWrappers std::tuple SparseMatrix::get_cusparse_matrix() const { - return std::make_tuple(val_dev, column_index_dev, row_ptr_dev, descr); + return std::make_tuple(val_dev.get(), + column_index_dev.get(), + row_ptr_dev.get(), + descr); } -- 2.39.5