From 406ec759079f99fd69f4dc8b8a8ee6dd7f88c6bd Mon Sep 17 00:00:00 2001 From: Bruno Turcksin Date: Mon, 23 Apr 2018 18:37:33 -0400 Subject: [PATCH] Add direct solver on the device --- include/deal.II/lac/cuda_solver_direct.h | 114 ++++++++ include/deal.II/lac/cuda_sparse_matrix.h | 4 +- source/lac/CMakeLists.txt | 3 +- source/lac/cuda_solver_direct.cu | 352 +++++++++++++++++++++++ source/lac/cuda_sparse_matrix.cu | 18 +- 5 files changed, 479 insertions(+), 12 deletions(-) create mode 100644 include/deal.II/lac/cuda_solver_direct.h create mode 100644 source/lac/cuda_solver_direct.cu diff --git a/include/deal.II/lac/cuda_solver_direct.h b/include/deal.II/lac/cuda_solver_direct.h new file mode 100644 index 0000000000..6411fdbbcc --- /dev/null +++ b/include/deal.II/lac/cuda_solver_direct.h @@ -0,0 +1,114 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#ifndef dealii_cuda_solver_direct_h +#define dealii_cuda_solver_direct_h + +#include + +#ifdef DEAL_II_WITH_CUDA +#include +#include +#include +#include + +DEAL_II_NAMESPACE_OPEN + +namespace CUDAWrappers +{ + /** + * Direct solvers. These solvers call cuSOLVER underneath. + * + * @note Instantiations for this template are provided for @ + * and @. + * + * @ingroup CUDAWrappers + * @author Bruno Turcksin + * @date 2018 + */ + template + class SolverDirect + { + public: + struct AdditionalData + { + /** + * Set the additional data field to the desired solver. + */ + explicit + AdditionalData (const std::string &solver_type = "LU_dense"); + + /** + * Set the solver type. Possibilities are: + *
    + *
  • "Cholesky" which performs a Cholesky decomposition on the device
  • + *
  • "LU_dense" which converts the sparse matrix to a dense matrix and + * uses LU factorization
  • + *
  • "LU_host" which uses LU factorization on the host
  • + *
+ */ + std::string solver_type; + }; + + /** + * Constructor. Takes the solver control object and creates the solver. + */ + SolverDirect(const Utilities::CUDA::Handle &handle, + SolverControl &cn, + const AdditionalData &data = AdditionalData()); + + /** + * Destructor. + */ + virtual ~SolverDirect() = default; + + /** + * Solve the linear system Ax=b. + */ + void solve(const SparseMatrix &A, + LinearAlgebra::CUDAWrappers::Vector &x, + const LinearAlgebra::CUDAWrappers::Vector &b); + + /** + * Access to object that controls convergence. + */ + SolverControl &control() const; + + private: + /** + * Handle + */ + const Utilities::CUDA::Handle &cuda_handle; + + /** + * Reference to the object that controls convergence of the iterative + * solver. In fact, for these Trilinos wrappers, Trilinos does so itself, + * but we copy the data from this object before starting the solution + * process, and copy the data back into it afterwards. + */ + SolverControl &solver_control; + + /** + * Store a copy of the flags for this particular solver. + */ + const AdditionalData additional_data; + }; +} + +DEAL_II_NAMESPACE_CLOSE + +#endif + +#endif diff --git a/include/deal.II/lac/cuda_sparse_matrix.h b/include/deal.II/lac/cuda_sparse_matrix.h index 991a419359..1e1eef4a58 100644 --- a/include/deal.II/lac/cuda_sparse_matrix.h +++ b/include/deal.II/lac/cuda_sparse_matrix.h @@ -247,8 +247,8 @@ namespace CUDAWrappers * the cuSPARSE matrix description. */ std::tuple - get_cusparse_matrix(); - //@} + get_cusparse_matrix() const; + //*} private: /** diff --git a/source/lac/CMakeLists.txt b/source/lac/CMakeLists.txt index dea7cf02f8..4f2803f6e3 100644 --- a/source/lac/CMakeLists.txt +++ b/source/lac/CMakeLists.txt @@ -146,8 +146,9 @@ ENDIF() IF(DEAL_II_WITH_CUDA) SET(_separate_src ${_separate_src} - cuda_vector.cu + cuda_solver_direct.cu cuda_sparse_matrix.cu + cuda_vector.cu ) ENDIF() diff --git a/source/lac/cuda_solver_direct.cu b/source/lac/cuda_solver_direct.cu new file mode 100644 index 0000000000..f8cafbe776 --- /dev/null +++ b/source/lac/cuda_solver_direct.cu @@ -0,0 +1,352 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#include + +DEAL_II_NAMESPACE_OPEN + +namespace CUDAWrappers +{ + namespace internal + { + void cusparsecsr2dense(cusparseHandle_t cusparse_handle, + const SparseMatrix &matrix, + float *dense_matrix_dev) + { + auto cusparse_matrix = matrix.get_cusparse_matrix(); + + cusparseStatus_t cusparse_error_code = + cusparseScsr2dense( + cusparse_handle, matrix.m(), matrix.n(), + std::get<3>(cusparse_matrix), std::get<0>(cusparse_matrix), + std::get<2>(cusparse_matrix), std::get<1>(cusparse_matrix), + dense_matrix_dev, matrix.m()); + AssertCusparse(cusparse_error_code); + } + + + + void cusparsecsr2dense(cusparseHandle_t cusparse_handle, + const SparseMatrix &matrix, + double *dense_matrix_dev) + { + auto cusparse_matrix = matrix.get_cusparse_matrix(); + + cusparseStatus_t cusparse_error_code = + cusparseDcsr2dense( + cusparse_handle, matrix.m(), matrix.n(), + std::get<3>(cusparse_matrix), std::get<0>(cusparse_matrix), + std::get<2>(cusparse_matrix), std::get<1>(cusparse_matrix), + dense_matrix_dev, matrix.m()); + AssertCusparse(cusparse_error_code); + } + + + + void cusolverDngetrf_buffer_size(cusolverDnHandle_t cusolver_dn_handle, int m, + int n, float *dense_matrix_dev, + int &workspace_size) + { + cusolverStatus_t cusolver_error_code = cusolverDnSgetrf_bufferSize( + cusolver_dn_handle, m, n, dense_matrix_dev, m, &workspace_size); + AssertCusolver(cusolver_error_code); + } + + + + void cusolverDngetrf_buffer_size(cusolverDnHandle_t cusolver_dn_handle, int m, + int n, double *dense_matrix_dev, + int &workspace_size) + { + cusolverStatus_t cusolver_error_code = cusolverDnDgetrf_bufferSize( + cusolver_dn_handle, m, n, dense_matrix_dev, m, &workspace_size); + AssertCusolver(cusolver_error_code); + } + + + + void cusolverDngetrf(cusolverDnHandle_t cusolver_dn_handle, int m, int n, + float *dense_matrix_dev, float *workspace_dev, + int *pivot_dev, int *info_dev) + { + cusolverStatus_t cusolver_error_code = + cusolverDnSgetrf(cusolver_dn_handle, m, n, dense_matrix_dev, m, + workspace_dev, pivot_dev, info_dev); + AssertCusolver(cusolver_error_code); + } + + + + void cusolverDngetrf(cusolverDnHandle_t cusolver_dn_handle, int m, int n, + double *dense_matrix_dev, double *workspace_dev, + int *pivot_dev, int *info_dev) + { + cusolverStatus_t cusolver_error_code = + cusolverDnDgetrf(cusolver_dn_handle, m, n, dense_matrix_dev, m, + workspace_dev, pivot_dev, info_dev); + AssertCusolver(cusolver_error_code); + } + + + + void cusolverDngetrs(cusolverDnHandle_t cusolver_dn_handle, int m, + float *dense_matrix_dev, int *pivot_dev, float *b, + int *info_dev) + { + const int n_rhs = 1; + cusolverStatus_t cusolver_error_code = + cusolverDnSgetrs(cusolver_dn_handle, CUBLAS_OP_N, m, n_rhs, + dense_matrix_dev, m, pivot_dev, b, m, info_dev); + AssertCusolver(cusolver_error_code); + } + + + + void cusolverDngetrs(cusolverDnHandle_t cusolver_dn_handle, int m, + double *dense_matrix_dev, int *pivot_dev, double *b, + int *info_dev) + { + const int n_rhs = 1; + cusolverStatus_t cusolver_error_code = + cusolverDnDgetrs(cusolver_dn_handle, CUBLAS_OP_N, m, n_rhs, + dense_matrix_dev, m, pivot_dev, b, m, info_dev); + AssertCusolver(cusolver_error_code); + } + + + + void cusolverSpcsrlsvluHost(cusolverSpHandle_t cusolver_sp_handle, + const unsigned int n_rows, const unsigned int nnz, + cusparseMatDescr_t descr, const float *val_host, + const int *row_ptr_host, const int *column_index_host, + const float *b_host, float *x_host) + { + int singularity = 0; + cusolverStatus_t cusolver_error_code = cusolverSpScsrlsvluHost( + cusolver_sp_handle, n_rows, nnz, descr, val_host, row_ptr_host, + column_index_host, b_host, 0., 1, x_host, &singularity); + AssertCusolver(cusolver_error_code); + Assert(singularity == -1, ExcMessage("Coarse matrix is singular")); + } + + + + void cusolverSpcsrlsvluHost(cusolverSpHandle_t cusolver_sp_handle, + const unsigned int n_rows, unsigned int nnz, + cusparseMatDescr_t descr, const double *val_host, + const int *row_ptr_host, const int *column_index_host, + const double *b_host, double *x_host) + { + int singularity = 0; + cusolverStatus_t cusolver_error_code = cusolverSpDcsrlsvluHost( + cusolver_sp_handle, n_rows, nnz, descr, val_host, row_ptr_host, + column_index_host, b_host, 0., 1, x_host, &singularity); + AssertCusolver(cusolver_error_code); + Assert(singularity == -1, ExcMessage("Coarse matrix is singular")); + } + + + + void cholesky_factorization(cusolverSpHandle_t cusolver_sp_handle, + const SparseMatrix &matrix, + const float *b, float *x) + { + auto cusparse_matrix = matrix.get_cusparse_matrix(); + int singularity = 0; + + cusolverStatus_t cusolver_error_code = cusolverSpScsrlsvchol( + cusolver_sp_handle, matrix.m(), matrix.n_nonzero_elements(), + std::get<3>(cusparse_matrix), std::get<0>(cusparse_matrix), + std::get<2>(cusparse_matrix), std::get<1>(cusparse_matrix), b, 0., 0, x, + &singularity); + AssertCusolver(cusolver_error_code); + Assert(singularity == -1, ExcMessage("Coarse matrix is not SPD")); + } + + + + void cholesky_factorization(cusolverSpHandle_t cusolver_sp_handle, + const SparseMatrix &matrix, + const double *b, double *x) + { + auto cusparse_matrix = matrix.get_cusparse_matrix(); + int singularity = 0; + + cusolverStatus_t cusolver_error_code = cusolverSpDcsrlsvchol( + cusolver_sp_handle, matrix.m(), matrix.n_nonzero_elements(), + std::get<3>(cusparse_matrix), std::get<0>(cusparse_matrix), + std::get<2>(cusparse_matrix), std::get<1>(cusparse_matrix), b, 0., 0, x, + &singularity); + AssertCusolver(cusolver_error_code); + Assert(singularity == -1, ExcMessage("Coarse matrix is not SPD")); + } + + + + template + void lu_factorization(cusparseHandle_t cusparse_handle, + cusolverDnHandle_t cusolver_dn_handle, + const SparseMatrix &matrix, + const Number *b_dev, Number *x_dev) + { + // Change the format of the matrix from sparse to dense + unsigned int const m = matrix.m(); + unsigned int const n = matrix.n(); + Assert(m == n, ExcMessage("The matrix is not square")); + Number *dense_matrix_dev; + Utilities::CUDA::malloc(dense_matrix_dev, m * n); + + // Change the format of matrix to dense + internal::cusparsecsr2dense(cusparse_handle, matrix, dense_matrix_dev); + + // Create the working space + int workspace_size = 0; + internal::cusolverDngetrf_buffer_size(cusolver_dn_handle, m, n, + dense_matrix_dev, workspace_size); + Assert(workspace_size > 0, ExcMessage("No workspace was allocated")); + Number *workspace_dev; + Utilities::CUDA::malloc(workspace_dev, workspace_size); + + // LU factorization + int *pivot_dev; + Utilities::CUDA::malloc(pivot_dev, m); + int *info_dev; + Utilities::CUDA::malloc(info_dev, 1); + + internal::cusolverDngetrf(cusolver_dn_handle, m, n, dense_matrix_dev, + workspace_dev, pivot_dev, info_dev); + +#ifdef DEBUG + int info = 0; + cudaError_t cuda_error_code_debug = + cudaMemcpy(&info, info_dev, sizeof(int), cudaMemcpyDeviceToHost); + AssertCuda(cuda_error_code_debug); + Assert(info == 0, ExcMessage("There was a problem during the LU factorization")); +#endif + + // Solve Ax = b + cudaError_t cuda_error_code = cudaMemcpy(x_dev, b_dev, m * sizeof(Number), + cudaMemcpyDeviceToDevice); + AssertCuda(cuda_error_code); + internal::cusolverDngetrs(cusolver_dn_handle, m, dense_matrix_dev, pivot_dev, + x_dev, info_dev); +#ifdef DEBUG + cuda_error_code = + cudaMemcpy(&info, info_dev, sizeof(int), cudaMemcpyDeviceToHost); + AssertCuda(cuda_error_code); + Assert(info == 0, ExcMessage("There was a problem during the LU solve")); +#endif + + // Free the memory allocated + Utilities::CUDA::free(dense_matrix_dev); + Utilities::CUDA::free(workspace_dev); + Utilities::CUDA::free(pivot_dev); + Utilities::CUDA::free(info_dev); + } + + + + template + void lu_factorization(cusolverSpHandle_t cusolver_sp_handle, + const SparseMatrix &matrix, + const Number *b_dev, Number *x_dev) + { + // cuSOLVER does not support LU factorization of sparse matrix on the device, + // so we need to move everything to the host first and then back to the host. + const unsigned int nnz = matrix.n_nonzero_elements(); + const unsigned int n_rows = matrix.m(); + std::vector val_host(nnz); + std::vector column_index_host(nnz); + std::vector row_ptr_host(n_rows + 1); + auto cusparse_matrix = matrix.get_cusparse_matrix(); + Utilities::CUDA::copy_to_host(std::get<0>(cusparse_matrix), val_host); + Utilities::CUDA::copy_to_host(std::get<1>(cusparse_matrix), column_index_host); + Utilities::CUDA::copy_to_host(std::get<2>(cusparse_matrix), row_ptr_host); + std::vector b_host(n_rows); + Utilities::CUDA::copy_to_host(b_dev, b_host); + std::vector x_host(n_rows); + Utilities::CUDA::copy_to_host(x_dev, x_host); + + internal::cusolverSpcsrlsvluHost( + cusolver_sp_handle, n_rows, nnz, std::get<3>(cusparse_matrix), val_host.data(), + row_ptr_host.data(), column_index_host.data(), b_host.data(), + x_host.data()); + + // Move the solution back to the device + Utilities::CUDA::copy_to_dev(x_host, x_dev); + } + } + + + + template + SolverDirect::AdditionalData:: + AdditionalData(const std::string &solver_type) + : + solver_type(solver_type) + {} + + + + template + SolverDirect::SolverDirect(const Utilities::CUDA::Handle &handle, + SolverControl &cn, + const AdditionalData &data) + : + cuda_handle(handle), + solver_control(cn), + additional_data(data.solver_type) + {} + + + + template + SolverControl &SolverDirect::control() const + { + return solver_control; + } + + + + template + void SolverDirect::solve(const SparseMatrix &A, + LinearAlgebra::CUDAWrappers::Vector &x, + const LinearAlgebra::CUDAWrappers::Vector &b) + { + if (additional_data.solver_type == "Cholesky") + internal::cholesky_factorization(cuda_handle.cusolver_sp_handle, A, + b.get_values(), x.get_values()); + else if (additional_data.solver_type == "LU_dense") + internal::lu_factorization(cuda_handle.cusparse_handle, + cuda_handle.cusolver_dn_handle, A, + b.get_values(), x.get_values()); + else if (additional_data.solver_type == "LU_host") + internal::lu_factorization(cuda_handle.cusolver_sp_handle, A, + b.get_values(), x.get_values()); + else + AssertThrow(false, ExcMessage("The provided solver name " + + additional_data.solver_type + " is invalid.")); + + // Force the SolverControl object to report convergence + solver_control.check(0, 0); + } + + + // Explicit Instanationation + template class SolverDirect; + template class SolverDirect; +} + +DEAL_II_NAMESPACE_CLOSE diff --git a/source/lac/cuda_sparse_matrix.cu b/source/lac/cuda_sparse_matrix.cu index 453330f76e..16e54246de 100644 --- a/source/lac/cuda_sparse_matrix.cu +++ b/source/lac/cuda_sparse_matrix.cu @@ -52,11 +52,11 @@ namespace CUDAWrappers CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE; - cusparseStatus_t error_code; // This function performs y = alpha*op(A)*x + beta*y - error_code = cusparseScsrmv(handle, cusparse_operation, m, n, nnz, - &alpha, descr, A_val_dev, A_row_ptr_dev, - A_column_index_dev, x, &beta, y); + cusparseStatus_t error_code = cusparseScsrmv(handle, cusparse_operation, + m, n, nnz, &alpha, descr, + A_val_dev, A_row_ptr_dev, + A_column_index_dev, x, &beta, y); AssertCusparse(error_code); } @@ -73,11 +73,11 @@ namespace CUDAWrappers CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE; - cusparseStatus_t error_code; // This function performs y = alpha*op(A)*x + beta*y - error_code = cusparseDcsrmv(handle, cusparse_operation, m, n, nnz, - &alpha, descr, A_val_dev, A_row_ptr_dev, - A_column_index_dev, x, &beta, y); + cusparseStatus_t error_code = cusparseDcsrmv(handle, cusparse_operation, + m, n, nnz, &alpha, descr, + A_val_dev, A_row_ptr_dev, + A_column_index_dev, x, &beta, y); AssertCusparse(error_code); } @@ -452,7 +452,7 @@ namespace CUDAWrappers template std::tuple - SparseMatrix::get_cusparse_matrix() + SparseMatrix::get_cusparse_matrix() const { return std::make_tuple(val_dev, column_index_dev, row_ptr_dev, descr); } -- 2.39.5