From ef8dddcf299cc6c433dbef1b1d225e76075d1360 Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Mon, 27 Aug 2018 18:27:37 +0200 Subject: [PATCH] Fix documentation --- include/deal.II/lac/cuda_precondition.h | 25 +++++++++++++------------ source/lac/cuda_precondition.cu | 14 ++++++++------ 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/include/deal.II/lac/cuda_precondition.h b/include/deal.II/lac/cuda_precondition.h index 8da6e50348..ea3fc7c88f 100644 --- a/include/deal.II/lac/cuda_precondition.h +++ b/include/deal.II/lac/cuda_precondition.h @@ -75,7 +75,7 @@ namespace CUDAWrappers { /** * Constructor. cuSPARSE allows to compute and use level information. - * According to the documentation it is this might improve performance. + * According to the documentation this might improve performance. * It is suggested to try both options. */ AdditionalData(bool use_level_analysis = true); @@ -140,7 +140,7 @@ namespace CUDAWrappers const LinearAlgebra::CUDAWrappers::Vector &src) const; /** - * Return the dimension of the codomain (or range) space. Note that the + * Return the dimension of the codomain (or range) space. Note that the * matrix is square and has dimension $m \times m$. * * @note This function should only be called if the preconditioner has been @@ -150,8 +150,8 @@ namespace CUDAWrappers m() const; /** - * Return the dimension of the codomain (or range) space. Note that the - * matrix is square and has dimension $m \times m$. + * Return the dimension of the codomain (or range) space. Note that the + * matrix is square and has dimension $n \times n$. * * @note This function should only be called if the preconditioner has been * initialized. @@ -220,7 +220,8 @@ namespace CUDAWrappers std::unique_ptr tmp_dev; /** - * + * Pointer to an internal buffer (on the device) that is used for + * computing the decomposition. */ std::unique_ptr buffer_dev; @@ -259,7 +260,7 @@ namespace CUDAWrappers /** * This class implements an incomplete LU factorization preconditioner for - * @em symmetric CUDAWrappers::SparseMatrix matrices. + * CUDAWrappers::SparseMatrix matrices. * * The implementation closely follows the one documented in the cuSPARSE * documentation @@ -346,15 +347,14 @@ namespace CUDAWrappers const LinearAlgebra::CUDAWrappers::Vector &src) const; /** - * Apply the preconditioner. Since the preconditioner is symmetric, this - * is the same as vmult(). + * Apply the transposed preconditioner. Not yet implemented. */ void Tvmult(LinearAlgebra::CUDAWrappers::Vector & dst, const LinearAlgebra::CUDAWrappers::Vector &src) const; /** - * Return the dimension of the codomain (or range) space. Note that the + * Return the dimension of the codomain (or range) space. Note that the * matrix is square and has dimension $m \times m$. * * @note This function should only be called if the preconditioner has been @@ -364,8 +364,8 @@ namespace CUDAWrappers m() const; /** - * Return the dimension of the codomain (or range) space. Note that the - * matrix is square and has dimension $m \times m$. + * Return the dimension of the codomain (or range) space. Note that the + * matrix is square and has dimension $n \times n$. * * @note This function should only be called if the preconditioner has been * initialized. @@ -439,7 +439,8 @@ namespace CUDAWrappers std::unique_ptr tmp_dev; /** - * + * Pointer to an internal buffer (on the device) that is used for + * computing the decomposition. */ std::unique_ptr buffer_dev; diff --git a/source/lac/cuda_precondition.cu b/source/lac/cuda_precondition.cu index 0030d5ca05..0e8c7db8d3 100644 --- a/source/lac/cuda_precondition.cu +++ b/source/lac/cuda_precondition.cu @@ -1315,17 +1315,18 @@ namespace CUDAWrappers const auto cusparse_matrix = A.get_cusparse_matrix(); const Number *const A_val_dev = std::get<0>(cusparse_matrix); - // create a copy of the matrix entries + // create a copy of the matrix entries since the algorithm works in-place. P_val_dev.reset(allocate_device_vector(n_nonzero_elements)); - cudaError_t cuda_status = cudaMemcpy(P_val_dev.get(), + cudaError_t cuda_status = cudaMemcpy(P_val_dev.get(), A_val_dev, n_nonzero_elements * sizeof(Number), cudaMemcpyDeviceToDevice); + P_column_index_dev = std::get<1>(cusparse_matrix); P_row_ptr_dev = std::get<2>(cusparse_matrix); const cusparseMatDescr_t mat_descr = std::get<3>(cusparse_matrix); - // initializa an internal buffer we need later on + // initialize an internal buffer we need later on tmp_dev.reset(allocate_device_vector(n_rows)); // step 3: query how much memory used in csric02 and csrsv2, and allocate @@ -1635,17 +1636,18 @@ namespace CUDAWrappers const auto cusparse_matrix = A.get_cusparse_matrix(); const Number *const A_val_dev = std::get<0>(cusparse_matrix); - // create a copy of the matrix entries + // create a copy of the matrix entries since the algorithm works in-place. P_val_dev.reset(allocate_device_vector(n_nonzero_elements)); - cudaError_t cuda_status = cudaMemcpy(P_val_dev.get(), + cudaError_t cuda_status = cudaMemcpy(P_val_dev.get(), A_val_dev, n_nonzero_elements * sizeof(Number), cudaMemcpyDeviceToDevice); + P_column_index_dev = std::get<1>(cusparse_matrix); P_row_ptr_dev = std::get<2>(cusparse_matrix); const cusparseMatDescr_t mat_descr = std::get<3>(cusparse_matrix); - // initializa an internal buffer we need later on + // initialize an internal buffer we need later on tmp_dev.reset(allocate_device_vector(n_rows)); // step 3: query how much memory used in csrilu02 and csrsv2, and allocate -- 2.39.5