From 7340c8c78fbd8a5aa933a910d3f028af9f25e60d Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Sun, 19 Aug 2018 14:31:41 +0200 Subject: [PATCH] Add NumberType specializations --- include/deal.II/base/numbers.h | 26 + include/deal.II/lac/cuda_precondition.h | 234 ++++ source/lac/cuda_precondition.cu | 1397 +++++++++++++++++++++++ 3 files changed, 1657 insertions(+) create mode 100644 include/deal.II/lac/cuda_precondition.h create mode 100644 source/lac/cuda_precondition.cu diff --git a/include/deal.II/base/numbers.h b/include/deal.II/base/numbers.h index 90fc1034ae..f856d7923e 100644 --- a/include/deal.II/base/numbers.h +++ b/include/deal.II/base/numbers.h @@ -21,6 +21,10 @@ #include +#ifdef DEAL_II_COMPILER_CUDA_AWARE +# include +#endif + #include #include #include @@ -663,6 +667,28 @@ namespace internal NumberType::value(t.imag())); } }; + +#ifdef DEAL_II_COMPILER_CUDA_AWARE + template <> + struct NumberType + { + static cuComplex + value(const float t) + { + return make_cuComplex(t, 0.f); + } + }; + + template <> + struct NumberType + { + static cuDoubleComplex + value(const double t) + { + return make_cuDoubleComplex(t, 0.); + } + }; +#endif } // namespace internal namespace numbers diff --git a/include/deal.II/lac/cuda_precondition.h b/include/deal.II/lac/cuda_precondition.h new file mode 100644 index 0000000000..724c5f44b5 --- /dev/null +++ b/include/deal.II/lac/cuda_precondition.h @@ -0,0 +1,234 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#include + +#include + +DEAL_II_NAMESPACE_OPEN + + namespace CUDAWrappers + { + /** + * This class implements an incomplete Cholesky factorization (IC) + * preconditioner for @em symmetric CUDAWrappers::SparseMatrix matrices. + * + * The implementation closely follows the one documented in the cuSPARSE + * documentation + * (https://docs.nvidia.com/cuda/cusparse/index.html#cusparse-lt-t-gt-csric02). + * + * @note Instantiations for this template are provided for @ and + * @. + * + * @ingroup Preconditioners CUDAWrappers + * @author Daniel Arndt + * @date 2018 + */ + template + class PreconditionIC + { + public: + /** + * Declare the type for container size. + */ + using size_type = int; + + /** + * Standardized data struct to pipe additional flags to the + * preconditioner. + */ + struct AdditionalData + { + /** + * Constructor. cuSPARSE allows to compute and use level information. + * According to the documentation it is this might improve performance. + * It is suggested to try both options. + */ + AdditionalData(bool use_level_analysis = true); + + /** + * Flag that determines if level informations are used when creating and + * applying the preconditioner. See the documentation for + * cusparseSolvePolicy_t at + * https://docs.nvidia.com/cuda/cusparse/index.html#cusparsesolvepolicy_t + * for more information. + */ + bool use_level_analysis; + }; + + /** + * Constructor. + */ + PreconditionIC(const Utilities::CUDA::Handle &handle); + + /** + * The copy constructor is deleted. + */ + PreconditionIC(const PreconditionIC &) = delete; + + /** + * The copy assignment operator is deleted. + */ + PreconditionIC & + operator=(const PreconditionIC &) = delete; + + /** + * Destructor. Free all resources that were initialized in this class. + */ + ~PreconditionIC(); + + /** + * Initialize this object. In particular, the given matrix is copied to be + * modified in-place. For the underlying sparsity pattern pointers are + * stored. Specifically, this means + * that the current object can only be used reliably as long as @p matrix is valid + * and has not been changed since calling this function. + * + * The @p additional_data determines if level information are used. + */ + void + initialize(const SparseMatrix &matrix, + const AdditionalData &additional_data = AdditionalData()); + + /** + * Apply the preconditioner. + */ + void + vmult(LinearAlgebra::CUDAWrappers::Vector & dst, + const LinearAlgebra::CUDAWrappers::Vector &src) const; + + /** + * Apply the preconditioner. Since the preconditioner is symmetric, this + * is the same as vmult(). + */ + void + Tvmult(LinearAlgebra::CUDAWrappers::Vector & dst, + const LinearAlgebra::CUDAWrappers::Vector &src) const; + + /** + * Return the dimension of the codomain (or range) space. Note that the + * matrix is square and has dimension $m \times m$. + * + * @note This function should only be called if the preconditioner has been + * initialized. + */ + size_type + m() const; + + /** + * Return the dimension of the codomain (or range) space. Note that the + * matrix is square and has dimension $m \times m$. + * + * @note This function should only be called if the preconditioner has been + * initialized. + */ + size_type + n() const; + + private: + /** + * cuSPARSE handle used to call cuSPARSE functions. + */ + cusparseHandle_t cusparse_handle; + + /** + * cuSPARSE description of the sparse matrix $M=LL^T$. + */ + cusparseMatDescr_t descr_M; + + /** + * cuSPARSE description of the lower triangular matrix $L$. + */ + cusparseMatDescr_t descr_L; + + /** + * Solve and analysis structure for $M=LL^T$. + */ + csric02Info_t info_M; + + /** + * Solve and analysis structure for the lower triangular matrix $L$. + */ + csrsv2Info_t info_L; + + /** + * Solve and analysis structure for the upper triangular matrix $L^T$. + */ + csrsv2Info_t info_Lt; + + /** + * Pointer to the values (on the device) of the computed preconditioning + * matrix. + */ + std::unique_ptr P_val_dev; + + /** + * Pointer to the row pointer (on the device) of the sparse matrix this + * object was initialized with. + */ + const int *P_row_ptr_dev; + + /** + * Pointer to the column indices (on the device) of the sparse matrix this + * object was initialized with. + */ + const int *P_column_index_dev; + + /** + * Pointer to the value (on the device) for a temporary (helper) vector + * used in vmult(). + */ + std::unique_ptr tmp_dev; + + /** + * + */ + std::unique_ptr buffer_dev; + + /** + * Determine if level information should be generated for the lower + * triangular matrix $L$. This value can be modified through an + * AdditionalData object. + */ + cusparseSolvePolicy_t policy_L; + + /** + * Determine if level information should be generated for the upper + * triangular matrix $L^T$. This value can be modified through an + * AdditionalData object. + */ + cusparseSolvePolicy_t policy_Lt; + + /** + * Determine if level information should be generated for $M=LL^T$. This + * value can be modified through an AdditionalData object. + */ + cusparseSolvePolicy_t policy_M; + + /** + * The number of rows is the same as for the matrix this object has been + * initialized with. + */ + int n_rows; + + /** + * The number of non-zero elements is the same as for the matrix this + * object has been initialized with. + */ + int n_nonzero_elements; + }; + } + +DEAL_II_NAMESPACE_CLOSE diff --git a/source/lac/cuda_precondition.cu b/source/lac/cuda_precondition.cu new file mode 100644 index 0000000000..ab67f7bce8 --- /dev/null +++ b/source/lac/cuda_precondition.cu @@ -0,0 +1,1397 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#include + +DEAL_II_NAMESPACE_OPEN + +namespace +{ + /** + * Template wrapper for cusparsecsric02 + * (https://docs.nvidia.com/cuda/cusparse/index.html#cusparse-lt-t-gt-csric02). + * This function performs the solve phase of the computing the + * incomplete-Cholesky factorization with 0 fill-in and no pivoting. + */ + template + cusparseStatus_t + cusparseXcsric02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + Number * csrValA_valM, + const int * csrRowPtrA, + const int * csrColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + AssertThrow(false, ExcNotImplemented()); + return CUSPARSE_STATUS_INVALID_VALUE; + } + + template <> + cusparseStatus_t + cusparseXcsric02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + float * csrValA_valM, + const int * csrRowPtrA, + const int * csrColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + return cusparseScsric02(handle, + m, + nnz, + descrA, + csrValA_valM, + csrRowPtrA, + csrColIndA, + info, + policy, + pBuffer); + } + + template <> + cusparseStatus_t + cusparseXcsric02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + double * csrValA_valM, + const int * csrRowPtrA, + const int * csrColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + return cusparseDcsric02(handle, + m, + nnz, + descrA, + csrValA_valM, + csrRowPtrA, + csrColIndA, + info, + policy, + pBuffer); + } + + template <> + cusparseStatus_t + cusparseXcsric02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex * csrValA_valM, + const int * csrRowPtrA, + const int * csrColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + return cusparseCcsric02(handle, + m, + nnz, + descrA, + csrValA_valM, + csrRowPtrA, + csrColIndA, + info, + policy, + pBuffer); + } + + template <> + cusparseStatus_t + cusparseXcsric02(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex * csrValA_valM, + const int * csrRowPtrA, + const int * csrColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + return cusparseZcsric02(handle, + m, + nnz, + descrA, + csrValA_valM, + csrRowPtrA, + csrColIndA, + info, + policy, + pBuffer); + } + + + /** + * Template wrapper for cusparsecsrsv2_solve + *(https://docs.nvidia.com/cuda/cusparse/index.html#cusparse-lt-t-gt-csrsv2_solve). + * This function performs the solve phase of csrsv2, a new sparse triangular + *linear system op(A)*y = alpha*x. + */ + template + cusparseStatus_t + cusparseXcsrsv2_solve(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int nnz, + const Number * alpha, + const cusparseMatDescr_t descra, + const Number * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrsv2Info_t info, + const Number * x, + Number * y, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + AssertThrow(false, ExcNotImplemented()); + return CUSPARSE_STATUS_INVALID_VALUE; + } + + template <> + cusparseStatus_t + cusparseXcsrsv2_solve(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int nnz, + const float * alpha, + const cusparseMatDescr_t descra, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrsv2Info_t info, + const float * x, + float * y, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + return cusparseScsrsv2_solve(handle, + transA, + m, + nnz, + alpha, + descra, + csrValA, + csrRowPtrA, + csrColIndA, + info, + x, + y, + policy, + pBuffer); + } + + template <> + cusparseStatus_t + cusparseXcsrsv2_solve(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int nnz, + const double * alpha, + const cusparseMatDescr_t descra, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrsv2Info_t info, + const double * x, + double * y, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + return cusparseDcsrsv2_solve(handle, + transA, + m, + nnz, + alpha, + descra, + csrValA, + csrRowPtrA, + csrColIndA, + info, + x, + y, + policy, + pBuffer); + } + + template <> + cusparseStatus_t + cusparseXcsrsv2_solve(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int nnz, + const cuComplex * alpha, + const cusparseMatDescr_t descra, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrsv2Info_t info, + const cuComplex * x, + cuComplex * y, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + return cusparseCcsrsv2_solve(handle, + transA, + m, + nnz, + alpha, + descra, + csrValA, + csrRowPtrA, + csrColIndA, + info, + x, + y, + policy, + pBuffer); + } + + template <> + cusparseStatus_t + cusparseXcsrsv2_solve(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int nnz, + const cuDoubleComplex * alpha, + const cusparseMatDescr_t descra, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrsv2Info_t info, + const cuDoubleComplex * x, + cuDoubleComplex * y, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + return cusparseZcsrsv2_solve(handle, + transA, + m, + nnz, + alpha, + descra, + csrValA, + csrRowPtrA, + csrColIndA, + info, + x, + y, + policy, + pBuffer); + } + + + /** + * Template wrapper for cusparsecsrsv2_analysis + * (https://docs.nvidia.com/cuda/cusparse/index.html#cusparse-lt-t-gt-csrsv2_analysis). + * This function performs the analysis phase of csrsv2, a new sparse + * triangular linear system op(A)*y = alpha*x. + */ + template + cusparseStatus_t + cusparseXcsrsv2_analysis(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const Number * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrsv2Info_t info, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + AssertThrow(false, ExcNotImplemented()); + return CUSPARSE_STATUS_INVALID_VALUE; + } + + template <> + cusparseStatus_t + cusparseXcsrsv2_analysis(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrsv2Info_t info, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + return cusparseScsrsv2_analysis(handle, + transA, + m, + nnz, + descrA, + csrValA, + csrRowPtrA, + csrColIndA, + info, + policy, + pBuffer); + } + + template <> + cusparseStatus_t + cusparseXcsrsv2_analysis(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrsv2Info_t info, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + return cusparseDcsrsv2_analysis(handle, + transA, + m, + nnz, + descrA, + csrValA, + csrRowPtrA, + csrColIndA, + info, + policy, + pBuffer); + } + + template <> + cusparseStatus_t + cusparseXcsrsv2_analysis(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrsv2Info_t info, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + return cusparseCcsrsv2_analysis(handle, + transA, + m, + nnz, + descrA, + csrValA, + csrRowPtrA, + csrColIndA, + info, + policy, + pBuffer); + } + + template <> + cusparseStatus_t + cusparseXcsrsv2_analysis(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrsv2Info_t info, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + return cusparseZcsrsv2_analysis(handle, + transA, + m, + nnz, + descrA, + csrValA, + csrRowPtrA, + csrColIndA, + info, + policy, + pBuffer); + } + + + + /** + * Template wrapper for cusparsecsric02_analysis + * (https://docs.nvidia.com/cuda/cusparse/index.html#cusparse-lt-t-gt-csric02_analysis). + * This function performs the analysis phase of the incomplete-Cholesky + * factorization with 0 fill-in and no pivoting. + */ + template + cusparseStatus_t + cusparseXcsric02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const Number * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + AssertThrow(false, ExcNotImplemented()); + return CUSPARSE_STATUS_INVALID_VALUE; + } + + template <> + cusparseStatus_t + cusparseXcsric02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + return cusparseScsric02_analysis(handle, + m, + nnz, + descrA, + csrValA, + csrRowPtrA, + csrColIndA, + info, + policy, + pBuffer); + } + + template <> + cusparseStatus_t + cusparseXcsric02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + return cusparseDcsric02_analysis(handle, + m, + nnz, + descrA, + csrValA, + csrRowPtrA, + csrColIndA, + info, + policy, + pBuffer); + } + + template <> + cusparseStatus_t + cusparseXcsric02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + return cusparseCcsric02_analysis(handle, + m, + nnz, + descrA, + csrValA, + csrRowPtrA, + csrColIndA, + info, + policy, + pBuffer); + } + + template <> + cusparseStatus_t + cusparseXcsric02_analysis(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + const cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csric02Info_t info, + cusparseSolvePolicy_t policy, + void * pBuffer) + { + return cusparseZcsric02_analysis(handle, + m, + nnz, + descrA, + csrValA, + csrRowPtrA, + csrColIndA, + info, + policy, + pBuffer); + } + + + /** + * Template wrapper for cusparsecsrsv2_bufferSize + * (https://docs.nvidia.com/cuda/cusparse/index.html#cusparse-lt-t-gt-csrsv2_bufferSize). + * This function returns the size of the buffer used in csrsv2, a new sparse + * triangular linear system op(A)*y = alpha*x. + */ + template + cusparseStatus_t + cusparseXcsrsv2_bufferSize(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int nnz, + const cusparseMatDescr_t descrA, + Number * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrsv2Info_t info, + int * pBufferSizeInBytes) + { + AssertThrow(false, ExcNotImplemented()); + return CUSPARSE_STATUS_INVALID_VALUE; + } + + template <> + cusparseStatus_t + cusparseXcsrsv2_bufferSize(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int nnz, + const cusparseMatDescr_t descrA, + float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrsv2Info_t info, + int * pBufferSizeInBytes) + { + return cusparseScsrsv2_bufferSize(handle, + transA, + m, + nnz, + descrA, + csrValA, + csrRowPtrA, + csrColIndA, + info, + pBufferSizeInBytes); + } + + template <> + cusparseStatus_t + cusparseXcsrsv2_bufferSize(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int nnz, + const cusparseMatDescr_t descrA, + double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrsv2Info_t info, + int *pBufferSizeInBytes) + { + return cusparseDcsrsv2_bufferSize(handle, + transA, + m, + nnz, + descrA, + csrValA, + csrRowPtrA, + csrColIndA, + info, + pBufferSizeInBytes); + } + + template <> + cusparseStatus_t + cusparseXcsrsv2_bufferSize(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrsv2Info_t info, + int *pBufferSizeInBytes) + { + return cusparseCcsrsv2_bufferSize(handle, + transA, + m, + nnz, + descrA, + csrValA, + csrRowPtrA, + csrColIndA, + info, + pBufferSizeInBytes); + } + + template <> + cusparseStatus_t + cusparseXcsrsv2_bufferSize(cusparseHandle_t handle, + cusparseOperation_t transA, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csrsv2Info_t info, + int * pBufferSizeInBytes) + { + return cusparseZcsrsv2_bufferSize(handle, + transA, + m, + nnz, + descrA, + csrValA, + csrRowPtrA, + csrColIndA, + info, + pBufferSizeInBytes); + } + + + + /** + * Template wrapper for cusparsecsric02_bufferSize + * (https://docs.nvidia.com/cuda/cusparse/index.html#cusparse-lt-t-gt-csric02_bufferSize). + *This function returns size of buffer used in computing the + *incomplete-Cholesky factorization with 0 fill-in and no pivoting. + */ + template + cusparseStatus_t + cusparseXcsric02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + Number * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csric02Info_t info, + int * pBufferSizeInBytes) + { + AssertThrow(false, ExcNotImplemented()); + return CUSPARSE_STATUS_INVALID_VALUE; + } + + template <> + cusparseStatus_t + cusparseXcsric02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + float * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csric02Info_t info, + int *pBufferSizeInBytes) + { + return cusparseScsric02_bufferSize(handle, + m, + nnz, + descrA, + csrValA, + csrRowPtrA, + csrColIndA, + info, + pBufferSizeInBytes); + } + + template <> + cusparseStatus_t + cusparseXcsric02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + double * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csric02Info_t info, + int *pBufferSizeInBytes) + { + return cusparseDcsric02_bufferSize(handle, + m, + nnz, + descrA, + csrValA, + csrRowPtrA, + csrColIndA, + info, + pBufferSizeInBytes); + } + + template <> + cusparseStatus_t + cusparseXcsric02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csric02Info_t info, + int *pBufferSizeInBytes) + { + return cusparseCcsric02_bufferSize(handle, + m, + nnz, + descrA, + csrValA, + csrRowPtrA, + csrColIndA, + info, + pBufferSizeInBytes); + } + + template <> + cusparseStatus_t + cusparseXcsric02_bufferSize(cusparseHandle_t handle, + int m, + int nnz, + const cusparseMatDescr_t descrA, + cuDoubleComplex * csrValA, + const int * csrRowPtrA, + const int * csrColIndA, + csric02Info_t info, + int * pBufferSizeInBytes) + { + return cusparseZcsric02_bufferSize(handle, + m, + nnz, + descrA, + csrValA, + csrRowPtrA, + csrColIndA, + info, + pBufferSizeInBytes); + } + + template + void + delete_device_vector(Number *device_ptr) noexcept + { + const cudaError_t error_code = cudaFree(device_ptr); + (void)error_code; + AssertNothrow(error_code == cudaSuccess, + dealii::ExcCudaError(cudaGetErrorString(error_code))); + } + + template + Number * + allocate_device_vector(const std::size_t size) + { + Number *device_ptr; + Utilities::CUDA::malloc(device_ptr, size); + return device_ptr; + } +} // namespace + + namespace CUDAWrappers + { + template + PreconditionIC::AdditionalData::AdditionalData( + bool use_level_analysis_) + : use_level_analysis(use_level_analysis_) + {} + + + + template + PreconditionIC::PreconditionIC( + const Utilities::CUDA::Handle &handle) + : cusparse_handle(handle.cusparse_handle) + , P_val_dev(nullptr, delete_device_vector) + , P_row_ptr_dev(nullptr) + , P_column_index_dev(nullptr) + , tmp_dev(nullptr, delete_device_vector) + , buffer_dev(nullptr, delete_device_vector) + , policy_L(CUSPARSE_SOLVE_POLICY_USE_LEVEL) + , policy_Lt(CUSPARSE_SOLVE_POLICY_USE_LEVEL) + , policy_M(CUSPARSE_SOLVE_POLICY_USE_LEVEL) + , n_rows(0) + , n_nonzero_elements(0) + { + cusparseStatus_t status; + // step 1: create a descriptor which contains + // - matrix M is base-0 + // - matrix L is base-0 + // - matrix L is lower triangular + // - matrix L has non-unit diagonal + status = cusparseCreateMatDescr(&descr_M); + AssertCusparse(status); + status = cusparseSetMatIndexBase(descr_M, CUSPARSE_INDEX_BASE_ZERO); + AssertCusparse(status); + status = cusparseSetMatType(descr_M, CUSPARSE_MATRIX_TYPE_GENERAL); + AssertCusparse(status); + + status = cusparseCreateMatDescr(&descr_L); + AssertCusparse(status); + status = cusparseSetMatIndexBase(descr_L, CUSPARSE_INDEX_BASE_ZERO); + AssertCusparse(status); + status = cusparseSetMatType(descr_L, CUSPARSE_MATRIX_TYPE_GENERAL); + AssertCusparse(status); + status = cusparseSetMatFillMode(descr_L, CUSPARSE_FILL_MODE_LOWER); + AssertCusparse(status); + status = cusparseSetMatDiagType(descr_L, CUSPARSE_DIAG_TYPE_NON_UNIT); + AssertCusparse(status); + + // step 2: create a empty info structure + // we need one info for csric02 and two info's for csrsv2 + status = cusparseCreateCsric02Info(&info_M); + AssertCusparse(status); + status = cusparseCreateCsrsv2Info(&info_L); + AssertCusparse(status); + status = cusparseCreateCsrsv2Info(&info_Lt); + AssertCusparse(status); + } + + + + template + PreconditionIC::~PreconditionIC() + { + // step 8: free resources + cusparseStatus_t status = cusparseDestroyMatDescr(descr_M); + AssertNothrowCusparse(status); + + status = cusparseDestroyMatDescr(descr_L); + AssertNothrowCusparse(status); + + status = cusparseDestroyCsric02Info(info_M); + AssertNothrowCusparse(status); + + status = cusparseDestroyCsrsv2Info(info_L); + AssertNothrowCusparse(status); + + status = cusparseDestroyCsrsv2Info(info_Lt); + AssertNothrowCusparse(status); + } + + + + template + void + PreconditionIC::initialize(const SparseMatrix &A, + const AdditionalData &additional_data) + { + if (additional_data.use_level_analysis) + { + policy_L = CUSPARSE_SOLVE_POLICY_USE_LEVEL; + policy_Lt = CUSPARSE_SOLVE_POLICY_USE_LEVEL; + policy_M = CUSPARSE_SOLVE_POLICY_USE_LEVEL; + } + else + { + policy_L = CUSPARSE_SOLVE_POLICY_NO_LEVEL; + policy_Lt = CUSPARSE_SOLVE_POLICY_NO_LEVEL; + policy_M = CUSPARSE_SOLVE_POLICY_NO_LEVEL; + } + + n_rows = A.m(); + n_nonzero_elements = A.n_nonzero_elements(); + AssertDimension(A.m(), A.n()); + + const auto cusparse_matrix = A.get_cusparse_matrix(); + const Number *const A_val_dev = std::get<0>(cusparse_matrix); + + // create a copy of the matrix entries + P_val_dev.reset(allocate_device_vector(n_nonzero_elements)); + cudaError_t cuda_status = cudaMemcpy(P_val_dev.get(), + A_val_dev, + n_nonzero_elements * sizeof(Number), + cudaMemcpyDeviceToDevice); + P_column_index_dev = std::get<1>(cusparse_matrix); + P_row_ptr_dev = std::get<2>(cusparse_matrix); + const cusparseMatDescr_t mat_descr = std::get<3>(cusparse_matrix); + + // initializa an internal buffer we need later on + tmp_dev.reset(allocate_device_vector(n_rows)); + + // step 3: query how much memory used in csric02 and csrsv2, and allocate + // the buffer + int BufferSize_M; + cusparseStatus_t status = cusparseXcsric02_bufferSize(cusparse_handle, + n_rows, + n_nonzero_elements, + descr_M, + P_val_dev.get(), + P_row_ptr_dev, + P_column_index_dev, + info_M, + &BufferSize_M); + AssertCusparse(status); + + int BufferSize_L; + status = cusparseXcsrsv2_bufferSize(cusparse_handle, + CUSPARSE_OPERATION_NON_TRANSPOSE, + n_rows, + n_nonzero_elements, + descr_L, + P_val_dev.get(), + P_row_ptr_dev, + P_column_index_dev, + info_L, + &BufferSize_L); + AssertCusparse(status); + + int BufferSize_Lt; + status = cusparseXcsrsv2_bufferSize(cusparse_handle, + CUSPARSE_OPERATION_TRANSPOSE, + n_rows, + n_nonzero_elements, + descr_L, + P_val_dev.get(), + P_row_ptr_dev, + P_column_index_dev, + info_Lt, + &BufferSize_Lt); + AssertCusparse(status); + + const int BufferSize = + std::max(BufferSize_M, std::max(BufferSize_L, BufferSize_Lt)); + // workaround: since allocate_device_vector needs a type, we pass char + // which is required to have size 1. + buffer_dev.reset(static_cast( + allocate_device_vector(BufferSize / sizeof(char)))); + + // step 4: perform analysis of incomplete Cholesky on M + // perform analysis of triangular solve on L + // perform analysis of triangular solve on L' + // The lower triangular part of M has the same sparsity pattern as L, so + // we can do analysis of csric02 and csrsv2 simultaneously. + + status = cusparseXcsric02_analysis(cusparse_handle, + n_rows, + n_nonzero_elements, + descr_M, + P_val_dev.get(), + P_row_ptr_dev, + P_column_index_dev, + info_M, + policy_M, + buffer_dev.get()); + AssertCusparse(status); + + int structural_zero; + status = + cusparseXcsric02_zeroPivot(cusparse_handle, info_M, &structural_zero); + AssertCusparse(status); + + status = cusparseXcsrsv2_analysis(cusparse_handle, + CUSPARSE_OPERATION_TRANSPOSE, + n_rows, + n_nonzero_elements, + descr_L, + P_val_dev.get(), + P_row_ptr_dev, + P_column_index_dev, + info_Lt, + policy_Lt, + buffer_dev.get()); + AssertCusparse(status); + + status = cusparseXcsrsv2_analysis(cusparse_handle, + CUSPARSE_OPERATION_NON_TRANSPOSE, + n_rows, + n_nonzero_elements, + descr_L, + P_val_dev.get(), + P_row_ptr_dev, + P_column_index_dev, + info_L, + policy_L, + buffer_dev.get()); + AssertCusparse(status); + + // step 5: M = L * L' + status = cusparseXcsric02(cusparse_handle, + n_rows, + n_nonzero_elements, + descr_M, + P_val_dev.get(), + P_row_ptr_dev, + P_column_index_dev, + info_M, + policy_M, + buffer_dev.get()); + AssertCusparse(status); + + int numerical_zero; + status = + cusparseXcsric02_zeroPivot(cusparse_handle, info_M, &numerical_zero); + AssertCusparse(status); + } + + + + template + void + PreconditionIC::vmult( + LinearAlgebra::CUDAWrappers::Vector & dst, + const LinearAlgebra::CUDAWrappers::Vector &src) const + { + Assert(P_val_dev != nullptr, ExcNotInitialized()); + Assert(P_row_ptr_dev != nullptr, ExcNotInitialized()); + Assert(P_column_index_dev != nullptr, ExcNotInitialized()); + AssertDimension(dst.size(), static_cast(n_rows)); + AssertDimension(src.size(), static_cast(n_rows)); + Assert(tmp_dev != nullptr, ExcInternalError()); + + const Number *const src_dev = src.get_values(); + Number *const dst_dev = dst.get_values(); + // step 6: solve L*z = alpha*x + const double alpha = 1.; + cusparseStatus_t status = + cusparseXcsrsv2_solve(cusparse_handle, + CUSPARSE_OPERATION_NON_TRANSPOSE, + n_rows, + n_nonzero_elements, + &alpha, + descr_L, + P_val_dev.get(), + P_row_ptr_dev, + P_column_index_dev, + info_L, + src_dev, + tmp_dev.get(), + policy_L, + buffer_dev.get()); + AssertCusparse(status); + + // step 7: solve L'*y = alpha*z + status = cusparseXcsrsv2_solve(cusparse_handle, + CUSPARSE_OPERATION_TRANSPOSE, + n_rows, + n_nonzero_elements, + &alpha, + descr_L, + P_val_dev.get(), + P_row_ptr_dev, + P_column_index_dev, + info_Lt, + tmp_dev.get(), + dst_dev, + policy_Lt, + buffer_dev.get()); + AssertCusparse(status); + } + + + + template + void + PreconditionIC::Tvmult( + LinearAlgebra::CUDAWrappers::Vector & dst, + const LinearAlgebra::CUDAWrappers::Vector &src) const + { + // the constructed preconditioner is symmetric + vmult(dst, src); + } + + + + template + PreconditionIC::size_type + PreconditionIC::m() const + { + return n_rows; + } + + + + template + PreconditionIC::size_type + PreconditionIC::n() const + { + return n_rows; + } + + + + template + void + apply_preconditioner(const SparseMatrix &A, + const cusparseHandle_t cusparse_handle, + LinearAlgebra::CUDAWrappers::Vector & dst, + const LinearAlgebra::CUDAWrappers::Vector &src) + { + const Number *const src_dev = src.get_values(); + Number * dst_dev = dst.get_values(); + const cusparseHandle_t handle = cusparse_handle; + + const auto cusparse_matrix = A.get_cusparse_matrix(); + Number * A_val_dev = std::get<0>(cusparse_matrix); + const int *const A_row_ptr_dev = std::get<2>(cusparse_matrix); + const int *const A_column_index_dev = std::get<1>(cusparse_matrix); + const cusparseMatDescr_t mat_descr = std::get<3>(cusparse_matrix); + + const unsigned int n_rows = A.m(); + const unsigned int n_nonzero_elements = A.n_nonzero_elements(); + + AssertDimension(dst.size(), src.size()); + AssertDimension(A.m(), src.size()); + AssertDimension(A.n(), src.size()); + + std::unique_ptr tmp_dev( + allocate_device_vector(dst.size()), + delete_device_vector); + + // Suppose that A is a m x m sparse matrix represented by CSR format, + // Assumption: + // - handle is already created by cusparseCreate(), + // - (A_row_ptr_dev, A_column_index_dev, A_val_dev) is CSR of A on device + // memory, + // - src_dev is right hand side vector on device memory, + // - dst_dev is solution vector on device memory. + // - tmp_dev is intermediate result on device memory. + + cusparseMatDescr_t descr_M = mat_descr; + cusparseMatDescr_t descr_L = mat_descr; + csric02Info_t info_M = 0; + csrsv2Info_t info_L = 0; + csrsv2Info_t info_Lt = 0; + int BufferSize_M; + int BufferSize_L; + int BufferSize_Lt; + int BufferSize; + void * buffer_dev = 0; + int structural_zero; + int numerical_zero; + const double alpha = 1.; + const cusparseSolvePolicy_t policy_M = CUSPARSE_SOLVE_POLICY_NO_LEVEL; + const cusparseSolvePolicy_t policy_L = CUSPARSE_SOLVE_POLICY_NO_LEVEL; + const cusparseSolvePolicy_t policy_Lt = CUSPARSE_SOLVE_POLICY_USE_LEVEL; + + cusparseStatus_t status; + // step 1: create a descriptor which contains + // - matrix M is base-0 + // - matrix L is base-0 + // - matrix L is lower triangular + // - matrix L has non-unit diagonal + status = cusparseCreateMatDescr(&descr_M); + AssertCusparse(status); + status = cusparseSetMatIndexBase(descr_M, CUSPARSE_INDEX_BASE_ZERO); + AssertCusparse(status); + status = cusparseSetMatType(descr_M, CUSPARSE_MATRIX_TYPE_GENERAL); + AssertCusparse(status); + + status = cusparseCreateMatDescr(&descr_L); + AssertCusparse(status); + status = cusparseSetMatIndexBase(descr_L, CUSPARSE_INDEX_BASE_ZERO); + AssertCusparse(status); + status = cusparseSetMatType(descr_L, CUSPARSE_MATRIX_TYPE_GENERAL); + AssertCusparse(status); + status = cusparseSetMatFillMode(descr_L, CUSPARSE_FILL_MODE_LOWER); + AssertCusparse(status); + status = cusparseSetMatDiagType(descr_L, CUSPARSE_DIAG_TYPE_NON_UNIT); + AssertCusparse(status); + + // step 2: create a empty info structure + // we need one info for csric02 and two info's for csrsv2 + status = cusparseCreateCsric02Info(&info_M); + AssertCusparse(status); + status = cusparseCreateCsrsv2Info(&info_L); + AssertCusparse(status); + status = cusparseCreateCsrsv2Info(&info_Lt); + AssertCusparse(status); + + // step 3: query how much memory used in csric02 and csrsv2, and allocate + // the buffer + status = cusparseXcsric02_bufferSize(handle, + n_rows, + n_nonzero_elements, + descr_M, + A_val_dev, + A_row_ptr_dev, + A_column_index_dev, + info_M, + &BufferSize_M); + AssertCusparse(status); + status = cusparseXcsrsv2_bufferSize(handle, + CUSPARSE_OPERATION_NON_TRANSPOSE, + n_rows, + n_nonzero_elements, + descr_L, + A_val_dev, + A_row_ptr_dev, + A_column_index_dev, + info_L, + &BufferSize_L); + AssertCusparse(status); + status = cusparseXcsrsv2_bufferSize(handle, + CUSPARSE_OPERATION_TRANSPOSE, + n_rows, + n_nonzero_elements, + descr_L, + A_val_dev, + A_row_ptr_dev, + A_column_index_dev, + info_Lt, + &BufferSize_Lt); + AssertCusparse(status); + + BufferSize = max(BufferSize_M, max(BufferSize_L, BufferSize_Lt)); + + // buffer_dev returned by cudaMalloc is automatically aligned to 128 + // bytes. + cudaError_t status_cuda = cudaMalloc((void **)&buffer_dev, BufferSize); + Assert(cudaSuccess == status_cuda, ExcInternalError()); + + // step 4: perform analysis of incomplete Cholesky on M + // perform analysis of triangular solve on L + // perform analysis of triangular solve on L' + // The lower triangular part of M has the same sparsity pattern as L, so + // we can do analysis of csric02 and csrsv2 simultaneously. + + status = cusparseXcsric02_analysis(handle, + n_rows, + n_nonzero_elements, + descr_M, + A_val_dev, + A_row_ptr_dev, + A_column_index_dev, + info_M, + policy_M, + buffer_dev); + AssertCusparse(status); + status = cusparseXcsric02_zeroPivot(handle, info_M, &structural_zero); + if (CUSPARSE_STATUS_ZERO_PIVOT == status) + { + printf("A(%d,%d) is missing\n", structural_zero, structural_zero); + } + + status = cusparseXcsrsv2_analysis(handle, + CUSPARSE_OPERATION_TRANSPOSE, + n_rows, + n_nonzero_elements, + descr_L, + A_val_dev, + A_row_ptr_dev, + A_column_index_dev, + info_Lt, + policy_Lt, + buffer_dev); + AssertCusparse(status); + + status = cusparseXcsrsv2_analysis(handle, + CUSPARSE_OPERATION_NON_TRANSPOSE, + n_rows, + n_nonzero_elements, + descr_L, + A_val_dev, + A_row_ptr_dev, + A_column_index_dev, + info_L, + policy_L, + buffer_dev); + AssertCusparse(status); + + // step 5: M = L * L' + status = cusparseXcsric02(handle, + n_rows, + n_nonzero_elements, + descr_M, + A_val_dev, + A_row_ptr_dev, + A_column_index_dev, + info_M, + policy_M, + buffer_dev); + AssertCusparse(status); + status = cusparseXcsric02_zeroPivot(handle, info_M, &numerical_zero); + if (CUSPARSE_STATUS_ZERO_PIVOT == status) + { + printf("L(%d,%d) is zero\n", numerical_zero, numerical_zero); + } + + // step 6: solve L*z = x + status = cusparseXcsrsv2_solve(handle, + CUSPARSE_OPERATION_NON_TRANSPOSE, + n_rows, + n_nonzero_elements, + &alpha, + descr_L, + A_val_dev, + A_row_ptr_dev, + A_column_index_dev, + info_L, + src_dev, + tmp_dev.get(), + policy_L, + buffer_dev); + AssertCusparse(status); + + // step 7: solve L'*y = z + status = cusparseXcsrsv2_solve(handle, + CUSPARSE_OPERATION_TRANSPOSE, + n_rows, + n_nonzero_elements, + &alpha, + descr_L, + A_val_dev, + A_row_ptr_dev, + A_column_index_dev, + info_Lt, + tmp_dev.get(), + dst_dev, + policy_Lt, + buffer_dev); + AssertCusparse(status); + + // step 8: free resources + status_cuda = cudaFree(buffer_dev); + AssertCuda(status_cuda); + status = cusparseDestroyMatDescr(descr_M); + AssertCusparse(status); + status = cusparseDestroyMatDescr(descr_L); + AssertCusparse(status); + status = cusparseDestroyCsric02Info(info_M); + AssertCusparse(status); + status = cusparseDestroyCsrsv2Info(info_L); + AssertCusparse(status); + status = cusparseDestroyCsrsv2Info(info_Lt); + AssertCusparse(status); + } + + + + // explicit instantiations + template class PreconditionIC; + template class PreconditionIC; + } // namespace CUDAWrappers + +DEAL_II_NAMESPACE_CLOSE -- 2.39.5