pBufferSizeInBytes);
}
*/
-
-
-
- template <typename Number>
- void
- delete_device_vector(Number *device_ptr) noexcept
- {
- const cudaError_t error_code = cudaFree(device_ptr);
- (void)error_code;
- AssertNothrow(error_code == cudaSuccess,
- dealii::ExcCudaError(cudaGetErrorString(error_code)));
- }
-
- template <typename Number>
- Number *
- allocate_device_vector(const std::size_t size)
- {
- Number *device_ptr;
- Utilities::CUDA::malloc(device_ptr, size);
- return device_ptr;
- }
} // namespace
namespace CUDAWrappers
template <typename Number>
PreconditionIC<Number>::PreconditionIC(const Utilities::CUDA::Handle &handle)
: cusparse_handle(handle.cusparse_handle)
- , P_val_dev(nullptr, delete_device_vector<Number>)
+ , P_val_dev(nullptr, Utilities::CUDA::delete_device_data<Number>)
, P_row_ptr_dev(nullptr)
, P_column_index_dev(nullptr)
- , tmp_dev(nullptr, delete_device_vector<Number>)
- , buffer_dev(nullptr, delete_device_vector<void>)
+ , tmp_dev(nullptr, Utilities::CUDA::delete_device_data<Number>)
+ , buffer_dev(nullptr, Utilities::CUDA::delete_device_data<void>)
, policy_L(CUSPARSE_SOLVE_POLICY_USE_LEVEL)
, policy_Lt(CUSPARSE_SOLVE_POLICY_USE_LEVEL)
, policy_M(CUSPARSE_SOLVE_POLICY_USE_LEVEL)
const Number *const A_val_dev = std::get<0>(cusparse_matrix);
// create a copy of the matrix entries since the algorithm works in-place.
- P_val_dev.reset(allocate_device_vector<Number>(n_nonzero_elements));
+ P_val_dev.reset(
+ Utilities::CUDA::allocate_device_data<Number>(n_nonzero_elements));
cudaError_t cuda_status = cudaMemcpy(P_val_dev.get(),
A_val_dev,
n_nonzero_elements * sizeof(Number),
const cusparseMatDescr_t mat_descr = std::get<3>(cusparse_matrix);
// initialize an internal buffer we need later on
- tmp_dev.reset(allocate_device_vector<Number>(n_rows));
+ tmp_dev.reset(Utilities::CUDA::allocate_device_data<Number>(n_rows));
// step 3: query how much memory used in csric02 and csrsv2, and allocate
// the buffer
const int BufferSize =
std::max(BufferSize_M, std::max(BufferSize_L, BufferSize_Lt));
- // workaround: since allocate_device_vector needs a type, we pass char
+ // workaround: since allocate_device_data needs a type, we pass char
// which is required to have size 1.
buffer_dev.reset(static_cast<void *>(
- allocate_device_vector<char>(BufferSize / sizeof(char))));
+ Utilities::CUDA::allocate_device_data<char>(BufferSize / sizeof(char))));
// step 4: perform analysis of incomplete Cholesky on M
// perform analysis of triangular solve on L
PreconditionILU<Number>::PreconditionILU(
const Utilities::CUDA::Handle &handle)
: cusparse_handle(handle.cusparse_handle)
- , P_val_dev(nullptr, delete_device_vector<Number>)
+ , P_val_dev(nullptr, Utilities::CUDA::delete_device_data<Number>)
, P_row_ptr_dev(nullptr)
, P_column_index_dev(nullptr)
- , tmp_dev(nullptr, delete_device_vector<Number>)
- , buffer_dev(nullptr, delete_device_vector<void>)
+ , tmp_dev(nullptr, Utilities::CUDA::delete_device_data<Number>)
+ , buffer_dev(nullptr, Utilities::CUDA::delete_device_data<void>)
, policy_L(CUSPARSE_SOLVE_POLICY_USE_LEVEL)
, policy_U(CUSPARSE_SOLVE_POLICY_USE_LEVEL)
, policy_M(CUSPARSE_SOLVE_POLICY_USE_LEVEL)
const Number *const A_val_dev = std::get<0>(cusparse_matrix);
// create a copy of the matrix entries since the algorithm works in-place.
- P_val_dev.reset(allocate_device_vector<Number>(n_nonzero_elements));
+ P_val_dev.reset(
+ Utilities::CUDA::allocate_device_data<Number>(n_nonzero_elements));
cudaError_t cuda_status = cudaMemcpy(P_val_dev.get(),
A_val_dev,
n_nonzero_elements * sizeof(Number),
const cusparseMatDescr_t mat_descr = std::get<3>(cusparse_matrix);
// initialize an internal buffer we need later on
- tmp_dev.reset(allocate_device_vector<Number>(n_rows));
+ tmp_dev.reset(Utilities::CUDA::allocate_device_data<Number>(n_rows));
// step 3: query how much memory used in csrilu02 and csrsv2, and allocate
// the buffer
const int BufferSize =
std::max(BufferSize_M, std::max(BufferSize_L, BufferSize_U));
- // workaround: since allocate_device_vector needs a type, we pass char
+ // workaround: since allocate_device_data needs a type, we pass char
// which is required to have size 1.
buffer_dev.reset(static_cast<void *>(
- allocate_device_vector<char>(BufferSize / sizeof(char))));
+ Utilities::CUDA::allocate_device_data<char>(BufferSize / sizeof(char))));
// step 4: perform analysis of incomplete Cholesky on M
// perform analysis of triangular solve on L
namespace CUDAWrappers
{
- namespace
- {
- template <typename Number>
- void
- delete_device_data(Number *device_ptr) noexcept
- {
- const cudaError_t error_code = cudaFree(device_ptr);
- (void)error_code;
- AssertNothrow(error_code == cudaSuccess,
- dealii::ExcCudaError(cudaGetErrorString(error_code)));
- }
-
- template <typename Number>
- Number *
- allocate_device_data(const std::size_t size)
- {
- Number *device_ptr;
- Utilities::CUDA::malloc(device_ptr, size);
- return device_ptr;
- }
- } // namespace
-
namespace internal
{
template <typename Number>
SparseMatrix<Number>::SparseMatrix()
: nnz(0)
, n_rows(0)
- , val_dev(nullptr, delete_device_data<Number>)
- , column_index_dev(nullptr, delete_device_data<int>)
- , row_ptr_dev(nullptr, delete_device_data<int>)
+ , val_dev(nullptr, Utilities::CUDA::delete_device_data<Number>)
+ , column_index_dev(nullptr, Utilities::CUDA::delete_device_data<int>)
+ , row_ptr_dev(nullptr, Utilities::CUDA::delete_device_data<int>)
, descr(nullptr)
{}
SparseMatrix<Number>::SparseMatrix(
Utilities::CUDA::Handle & handle,
const ::dealii::SparseMatrix<Number> &sparse_matrix_host)
- : val_dev(nullptr, delete_device_data<Number>)
- , column_index_dev(nullptr, delete_device_data<int>)
- , row_ptr_dev(nullptr, delete_device_data<int>)
+ : val_dev(nullptr, Utilities::CUDA::delete_device_data<Number>)
+ , column_index_dev(nullptr, Utilities::CUDA::delete_device_data<int>)
+ , row_ptr_dev(nullptr, Utilities::CUDA::delete_device_data<int>)
, descr(nullptr)
{
reinit(handle, sparse_matrix_host);
}
// Copy the elements to the gpu
- val_dev.reset(allocate_device_data<Number>(nnz));
+ val_dev.reset(Utilities::CUDA::allocate_device_data<Number>(nnz));
cudaError_t error_code = cudaMemcpy(val_dev.get(),
&val[0],
nnz * sizeof(Number),
AssertCuda(error_code);
// Copy the column indices to the gpu
- column_index_dev.reset(allocate_device_data<int>(nnz));
+ column_index_dev.reset(Utilities::CUDA::allocate_device_data<int>(nnz));
AssertCuda(error_code);
error_code = cudaMemcpy(column_index_dev.get(),
&column_index[0],
AssertCuda(error_code);
// Copy the row pointer to the gpu
- row_ptr_dev.reset(allocate_device_data<int>(row_ptr_size));
+ row_ptr_dev.reset(Utilities::CUDA::allocate_device_data<int>(row_ptr_size));
AssertCuda(error_code);
error_code = cudaMemcpy(row_ptr_dev.get(),
&row_ptr[0],
using ::dealii::CUDAWrappers::block_size;
using ::dealii::CUDAWrappers::chunk_size;
- namespace
- {
- template <typename Number>
- void
- delete_device_vector(Number *device_ptr) noexcept
- {
- const cudaError_t error_code = cudaFree(device_ptr);
- (void)error_code;
- AssertNothrow(error_code == cudaSuccess,
- dealii::ExcCudaError(cudaGetErrorString(error_code)));
- }
-
- template <typename Number>
- Number *
- allocate_device_vector(const std::size_t size)
- {
- Number *device_ptr;
- Utilities::CUDA::malloc(device_ptr, size);
- return device_ptr;
- }
- } // namespace
-
template <typename Number>
Vector<Number>::Vector()
- : val(nullptr, delete_device_vector<Number>)
+ : val(nullptr, Utilities::CUDA::delete_device_data<Number>)
, n_elements(0)
{}
template <typename Number>
Vector<Number>::Vector(const Vector<Number> &V)
- : val(allocate_device_vector<Number>(V.n_elements),
- delete_device_vector<Number>)
+ : val(Utilities::CUDA::allocate_device_data<Number>(V.n_elements),
+ Utilities::CUDA::delete_device_data<Number>)
, n_elements(V.n_elements)
{
// Copy the values.
template <typename Number>
Vector<Number>::Vector(const size_type n)
- : val(nullptr, delete_device_vector<Number>)
+ : val(nullptr, Utilities::CUDA::delete_device_data<Number>)
, n_elements(0)
{
reinit(n, false);
if (n == 0)
val.reset();
else if (n != n_elements)
- val.reset(allocate_device_vector<Number>(n));
+ val.reset(Utilities::CUDA::allocate_device_data<Number>(n));
// If necessary set the elements to zero
if (omit_zeroing_entries == false)