From e7fbd2d33640e18e4bab45207a6dfda6a99c9a2e Mon Sep 17 00:00:00 2001
From: Daniel Arndt
Date: Sun, 11 Aug 2024 17:47:22 -0600
Subject: [PATCH] Remove CUDAWrappers, related documentation and macros
---
.github/workflows/linux.yml | 14 +-
CMakeLists.txt | 1 -
cmake/checks/check_01_cpu_features.cmake | 10 -
cmake/configure/configure_10_mpi.cmake | 7 +-
cmake/configure/configure_40_cuda.cmake | 126 --
contrib/docker/Dockerfile | 1 -
doc/doxygen/headers/cuda.h | 21 -
doc/doxygen/headers/glossary.h | 2 +-
doc/doxygen/headers/main.h | 1 -
doc/doxygen/headers/matrixfree.h | 6 +-
doc/doxygen/options.dox.in | 1 -
doc/external-libs/cuda.html | 14 -
.../changes/incompatibilities/20240811Arndt | 4 +
doc/users/cmake_dealii.html | 1 -
doc/users/cmake_user.html | 1 -
doc/users/config.sample | 1 -
examples/step-64/doc/intro.dox | 4 +-
examples/step-64/step-64.cc | 3 +-
include/deal.II/base/config.h.in | 4 -
include/deal.II/base/cuda.h | 187 --
include/deal.II/base/cuda_size.h | 44 -
include/deal.II/base/exceptions.h | 232 ---
include/deal.II/base/memory_space.h | 7 -
include/deal.II/base/memory_space_data.h | 7 +-
include/deal.II/base/numbers.h | 26 -
include/deal.II/base/undefine_macros.h | 12 -
include/deal.II/lac/cuda_atomic.h | 83 -
include/deal.II/lac/cuda_kernels.h | 516 -----
include/deal.II/lac/cuda_kernels.templates.h | 592 ------
include/deal.II/lac/cuda_precondition.h | 521 -----
include/deal.II/lac/cuda_solver_direct.h | 118 --
include/deal.II/lac/cuda_sparse_matrix.h | 531 -----
include/deal.II/lac/cuda_vector.h | 435 ----
include/deal.II/lac/precondition.h | 1 -
include/deal.II/lac/read_write_vector.h | 39 -
.../deal.II/lac/read_write_vector.templates.h | 94 -
.../deal.II/matrix_free/cuda_fe_evaluation.h | 37 -
.../matrix_free/cuda_hanging_nodes_internal.h | 32 -
.../deal.II/matrix_free/cuda_matrix_free.h | 35 -
.../matrix_free/cuda_matrix_free.templates.h | 35 -
.../matrix_free/cuda_tensor_product_kernels.h | 37 -
.../matrix_free/hanging_nodes_internal.h | 2 +-
.../matrix_free/portable_fe_evaluation.h | 2 -
.../portable_hanging_nodes_internal.h | 2 -
.../matrix_free/portable_matrix_free.h | 34 +-
.../portable_matrix_free.templates.h | 55 +-
.../portable_tensor_product_kernels.h | 8 -
source/base/CMakeLists.txt | 8 -
source/base/cuda.cc | 60 -
source/base/exceptions.cc | 91 -
source/lac/CMakeLists.txt | 21 -
source/lac/cuda_kernels.cc | 250 ---
source/lac/cuda_precondition.cc | 1826 -----------------
source/lac/cuda_solver_direct.cc | 488 -----
source/lac/cuda_sparse_matrix.cc | 695 -------
source/lac/cuda_vector.cc | 620 ------
source/lac/read_write_vector_cuda.cc | 49 -
source/lac/trilinos_tpetra_block_vector.cc | 8 +-
source/lac/trilinos_tpetra_vector.cc | 8 +-
source/lac/vector_memory.cc | 8 -
source/lac/vector_memory_cuda.cc | 42 -
tests/base/kokkos_point.cc | 2 +-
tests/cuda/CMakeLists.txt | 6 -
tests/cuda/cuda_vector_01.cc | 124 --
tests/cuda/cuda_vector_01.output | 2 -
tests/cuda/cuda_vector_02.cc | 120 --
tests/cuda/cuda_vector_02.output | 2 -
tests/cuda/cuda_vector_03.cc | 50 -
tests/cuda/cuda_vector_03.output | 2 -
tests/cuda/cuda_vector_04.cc | 58 -
tests/cuda/cuda_vector_04.output | 104 -
tests/cuda/cuda_vector_05.cc | 71 -
tests/cuda/cuda_vector_05.output | 2 -
tests/cuda/precondition_01.cc | 88 -
tests/cuda/precondition_01.output | 10 -
tests/cuda/precondition_02.cc | 88 -
tests/cuda/precondition_02.output | 10 -
tests/cuda/solver_01.cc | 87 -
tests/cuda/solver_01.output | 6 -
tests/cuda/solver_02.cc | 117 --
tests/cuda/solver_02.output | 11 -
tests/cuda/solver_03.cc | 87 -
tests/cuda/solver_03.output | 6 -
tests/cuda/solver_04.cc | 88 -
tests/cuda/solver_04.output | 6 -
tests/cuda/solver_05.cc | 87 -
tests/cuda/solver_05.output | 6 -
tests/cuda/solver_06.cc | 87 -
tests/cuda/solver_06.output | 6 -
tests/cuda/solver_07.cc | 87 -
tests/cuda/solver_07.output | 6 -
tests/cuda/solver_08.cc | 87 -
tests/cuda/solver_08.output | 6 -
tests/cuda/solver_09.cc | 116 --
tests/cuda/solver_09.output | 6 -
tests/cuda/solver_10.cc | 136 --
tests/cuda/solver_10.output | 6 -
tests/cuda/sparse_matrix_01.cc | 225 --
tests/cuda/sparse_matrix_01.output | 2 -
tests/cuda/sparse_matrix_02.cc | 64 -
tests/cuda/sparse_matrix_02.output | 34 -
tests/lac/affine_constraints_set_zero.cc | 2 -
tests/lac/parallel_vector_01.cc | 1 -
tests/lac/parallel_vector_02.cc | 1 -
tests/lac/parallel_vector_03.cc | 1 -
tests/lac/parallel_vector_03a.cc | 1 -
tests/lac/parallel_vector_04.cc | 1 -
tests/lac/parallel_vector_05.cc | 1 -
tests/lac/parallel_vector_06.cc | 1 -
tests/lac/parallel_vector_08.cc | 1 -
tests/lac/parallel_vector_10.cc | 1 -
tests/lac/parallel_vector_11.cc | 1 -
tests/lac/parallel_vector_12.cc | 2 +-
tests/lac/parallel_vector_13.cc | 1 -
tests/lac/parallel_vector_14.cc | 1 -
tests/lac/parallel_vector_15.cc | 1 -
tests/lac/parallel_vector_21.cc | 1 -
tests/lac/parallel_vector_25.cc | 1 -
.../coefficient_eval_device.cc | 1 -
tests/matrix_free_kokkos/evaluate_1d_shape.cc | 9 +-
tests/matrix_free_kokkos/evaluate_2d_shape.cc | 10 +-
.../matrix_free_device_initialize_vector.cc | 15 -
.../matrix_free_device_matrix_vector_06a.cc | 2 +-
.../matrix_free_device_multiple_objects.cc | 1 -
.../matrix_vector_device_common.h | 1 -
.../matrix_vector_device_mf.h | 1 -
.../performance/timing_matrix_free_kokkos.cc | 31 +-
tests/quick_tests/cuda.cc | 75 -
tests/run_testsuite.cmake | 2 +-
tests/tests.h | 55 -
130 files changed, 61 insertions(+), 10389 deletions(-)
delete mode 100644 cmake/configure/configure_40_cuda.cmake
delete mode 100644 doc/doxygen/headers/cuda.h
create mode 100644 doc/news/changes/incompatibilities/20240811Arndt
delete mode 100644 include/deal.II/base/cuda.h
delete mode 100644 include/deal.II/base/cuda_size.h
delete mode 100644 include/deal.II/lac/cuda_atomic.h
delete mode 100644 include/deal.II/lac/cuda_kernels.h
delete mode 100644 include/deal.II/lac/cuda_kernels.templates.h
delete mode 100644 include/deal.II/lac/cuda_precondition.h
delete mode 100644 include/deal.II/lac/cuda_solver_direct.h
delete mode 100644 include/deal.II/lac/cuda_sparse_matrix.h
delete mode 100644 include/deal.II/lac/cuda_vector.h
delete mode 100644 include/deal.II/matrix_free/cuda_fe_evaluation.h
delete mode 100644 include/deal.II/matrix_free/cuda_hanging_nodes_internal.h
delete mode 100644 include/deal.II/matrix_free/cuda_matrix_free.h
delete mode 100644 include/deal.II/matrix_free/cuda_matrix_free.templates.h
delete mode 100644 include/deal.II/matrix_free/cuda_tensor_product_kernels.h
delete mode 100644 source/base/cuda.cc
delete mode 100644 source/lac/cuda_kernels.cc
delete mode 100644 source/lac/cuda_precondition.cc
delete mode 100644 source/lac/cuda_solver_direct.cc
delete mode 100644 source/lac/cuda_sparse_matrix.cc
delete mode 100644 source/lac/cuda_vector.cc
delete mode 100644 source/lac/read_write_vector_cuda.cc
delete mode 100644 source/lac/vector_memory_cuda.cc
delete mode 100644 tests/cuda/CMakeLists.txt
delete mode 100644 tests/cuda/cuda_vector_01.cc
delete mode 100644 tests/cuda/cuda_vector_01.output
delete mode 100644 tests/cuda/cuda_vector_02.cc
delete mode 100644 tests/cuda/cuda_vector_02.output
delete mode 100644 tests/cuda/cuda_vector_03.cc
delete mode 100644 tests/cuda/cuda_vector_03.output
delete mode 100644 tests/cuda/cuda_vector_04.cc
delete mode 100644 tests/cuda/cuda_vector_04.output
delete mode 100644 tests/cuda/cuda_vector_05.cc
delete mode 100644 tests/cuda/cuda_vector_05.output
delete mode 100644 tests/cuda/precondition_01.cc
delete mode 100644 tests/cuda/precondition_01.output
delete mode 100644 tests/cuda/precondition_02.cc
delete mode 100644 tests/cuda/precondition_02.output
delete mode 100644 tests/cuda/solver_01.cc
delete mode 100644 tests/cuda/solver_01.output
delete mode 100644 tests/cuda/solver_02.cc
delete mode 100644 tests/cuda/solver_02.output
delete mode 100644 tests/cuda/solver_03.cc
delete mode 100644 tests/cuda/solver_03.output
delete mode 100644 tests/cuda/solver_04.cc
delete mode 100644 tests/cuda/solver_04.output
delete mode 100644 tests/cuda/solver_05.cc
delete mode 100644 tests/cuda/solver_05.output
delete mode 100644 tests/cuda/solver_06.cc
delete mode 100644 tests/cuda/solver_06.output
delete mode 100644 tests/cuda/solver_07.cc
delete mode 100644 tests/cuda/solver_07.output
delete mode 100644 tests/cuda/solver_08.cc
delete mode 100644 tests/cuda/solver_08.output
delete mode 100644 tests/cuda/solver_09.cc
delete mode 100644 tests/cuda/solver_09.output
delete mode 100644 tests/cuda/solver_10.cc
delete mode 100644 tests/cuda/solver_10.output
delete mode 100644 tests/cuda/sparse_matrix_01.cc
delete mode 100644 tests/cuda/sparse_matrix_01.output
delete mode 100644 tests/cuda/sparse_matrix_02.cc
delete mode 100644 tests/cuda/sparse_matrix_02.output
delete mode 100644 tests/quick_tests/cuda.cc
diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml
index b8998bf32a..2b946af978 100644
--- a/.github/workflows/linux.yml
+++ b/.github/workflows/linux.yml
@@ -369,7 +369,6 @@ jobs:
-D CMAKE_CXX_COMPILER=${GITHUB_WORKSPACE}/kokkos/bin/nvcc_wrapper \
-D DEAL_II_CXX_FLAGS='-Werror -Wno-non-template-friend' \
-D DEAL_II_EARLY_DEPRECATIONS=ON \
- -D DEAL_II_WITH_CUDA="ON" \
-D DEAL_II_WITH_KOKKOS="ON" \
-D KOKKOS_DIR=${GITHUB_WORKSPACE}/../kokkos-install \
-D DEAL_II_WITH_MPI="ON" \
@@ -386,10 +385,8 @@ jobs:
- name: build CUDA tests
run: |
cd build
- make -j2 setup_tests_cuda setup_tests_matrix_free_kokkos
- cd tests/cuda
- make -j2 compile_test_executables
- cd ../matrix_free_kokkos
+ make -j2 setup_tests_matrix_free_kokkos
+ cd tests/matrix_free_kokkos
make -j2 compile_test_executables
#############################
@@ -463,7 +460,6 @@ jobs:
-D DEAL_II_FORCE_BUNDLED_BOOST=ON \
-D DEAL_II_CXX_FLAGS="-std=c++17" \
-D DEAL_II_EARLY_DEPRECATIONS=ON \
- -D DEAL_II_WITH_CUDA="ON" \
-D DEAL_II_WITH_KOKKOS="ON" \
-D KOKKOS_DIR=${GITHUB_WORKSPACE}/../kokkos-install \
-D DEAL_II_WITH_MPI="ON" \
@@ -484,8 +480,6 @@ jobs:
- name: build CUDA tests
run: |
cd build
- make -j2 setup_tests_cuda setup_tests_matrix_free_kokkos
- cd tests/cuda
- make -j2 compile_test_executables
- cd ../matrix_free_kokkos
+ make -j2 setup_tests_matrix_free_kokkos
+ cd tests/matrix_free_kokkos
make -j2 compile_test_executables
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 89e3d3e715..8dcdbb73b2 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -94,7 +94,6 @@ verbose_include(${CMAKE_SOURCE_DIR}/cmake/setup_cached_variables.cmake)
# Now, set the project and set up the rest:
#
project(deal.II CXX C)
-enable_language_optional(Fortran)
verbose_include(${CMAKE_SOURCE_DIR}/cmake/setup_deal_ii.cmake)
diff --git a/cmake/checks/check_01_cpu_features.cmake b/cmake/checks/check_01_cpu_features.cmake
index 5626fe51cf..60afad6168 100644
--- a/cmake/checks/check_01_cpu_features.cmake
+++ b/cmake/checks/check_01_cpu_features.cmake
@@ -327,16 +327,6 @@ if(DEAL_II_HAVE_ARM_NEON)
set(DEAL_II_VECTORIZATION_WIDTH_IN_BITS 128)
endif()
-#
-# We need to disable SIMD vectorization for CUDA device code.
-# Otherwise, nvcc compilers from version 9 on will emit an error message like:
-# "[...] contains a vector, which is not supported in device code"
-#
-
-if(DEAL_II_WITH_CUDA)
- set(DEAL_II_VECTORIZATION_WIDTH_IN_BITS 0)
-endif()
-
#
# If we have OpenMP SIMD support (i.e. DEAL_II_HAVE_OPENMP_SIMD is true)
# populate DEAL_II_OPENMP_SIMD_PRAGMA.
diff --git a/cmake/configure/configure_10_mpi.cmake b/cmake/configure/configure_10_mpi.cmake
index 1509e1c06f..bb979dc980 100644
--- a/cmake/configure/configure_10_mpi.cmake
+++ b/cmake/configure/configure_10_mpi.cmake
@@ -54,12 +54,7 @@ macro(feature_mpi_configure_external)
# TODO: We might consider refactoring this option into an automatic check
# (in Modules/FindMPI.cmake) at some point. For the time being this is an
# advanced configuration option.
- #
- if(DEAL_II_MPI_WITH_CUDA_SUPPORT)
- option(DEAL_II_MPI_WITH_DEVICE_SUPPORT "Enable MPI Device support" ON)
- else()
- option(DEAL_II_MPI_WITH_DEVICE_SUPPORT "Enable MPI Device support" OFF)
- endif()
+ option(DEAL_II_MPI_WITH_DEVICE_SUPPORT "Enable MPI Device support" OFF)
mark_as_advanced(DEAL_II_MPI_WITH_DEVICE_SUPPORT)
endmacro()
diff --git a/cmake/configure/configure_40_cuda.cmake b/cmake/configure/configure_40_cuda.cmake
deleted file mode 100644
index 806bb5675a..0000000000
--- a/cmake/configure/configure_40_cuda.cmake
+++ /dev/null
@@ -1,126 +0,0 @@
-## ------------------------------------------------------------------------
-##
-## SPDX-License-Identifier: LGPL-2.1-or-later
-## Copyright (C) 2022 - 2023 by the deal.II authors
-##
-## This file is part of the deal.II library.
-##
-## Part of the source code is dual licensed under Apache-2.0 WITH
-## LLVM-exception OR LGPL-2.1-or-later. Detailed license information
-## governing the source code and code contributions can be found in
-## LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
-##
-## ------------------------------------------------------------------------
-
-#
-# Configuration for cuda support:
-#
-
-#
-# cuda support is experimental. Therefore, disable the feature per default:
-#
-set(DEAL_II_WITH_CUDA FALSE CACHE BOOL "")
-
-macro(feature_cuda_find_external var)
- if(DEAL_II_FEATURE_KOKKOS_BUNDLED_CONFIGURED)
- set(CUDA_ADDITIONAL_ERROR_STRING
- ${CUDA_ADDITIONAL_ERROR_STRING}
- "deal.II's bundled version of Kokkos only supports the Serial backend and therefore cannot be used with Cuda."
- )
- set(${var} FALSE)
- elseif(NOT Kokkos_ENABLE_CUDA)
- set(CUDA_ADDITIONAL_ERROR_STRING
- ${CUDA_ADDITIONAL_ERROR_STRING}
- "deal.II can only be compiled with Cuda support if Kokkos was built with Cuda support!"
- )
- set(${var} FALSE)
- else()
- # FIXME We need to also find and link with cuSolver and cuSparse even though
- # relying on Kokkos for linking with Cuda. That's why we keep the code below for now.
-
- # We need to set CUDA_USE_STATIC_CUDA_RUNTIME before find_package(CUDA) and to
- # force the value otherwise it is overwritten by find_package(CUDA)
- if(BUILD_SHARED_LIBS)
- set(CUDA_USE_STATIC_CUDA_RUNTIME OFF CACHE BOOL "" FORCE)
- endif()
-
- #
- # TODO: Ultimately, this find_package call is not needed any more. We
- # still use it because it is very convenient to (a) check that CUDA is
- # installed, (b) get compiler path and include directories / libraries.
- #
- find_package(DEAL_II_CUDA)
-
- if(CUDA_FOUND)
- #
- # CUDA was found, check whether we can actually use it:
- #
- set(${var} TRUE)
-
- #
- # disable CUDA support older than 10.2:
- #
- if(CUDA_VERSION VERSION_LESS 10.2)
- message(FATAL_ERROR "\n"
- "deal.II requires CUDA version 10.2 or newer."
- )
- endif()
-
- #
- # We do not support CUDA 12.0 and newer:
- #
- if(CUDA_VERSION VERSION_GREATER_EQUAL 12.0)
- message(FATAL_ERROR "\n"
- "deal.II's own CUDA backend does not support CUDA version 12.0 or newer.\n"
- "Instead, configure Kokkos with CUDA enabled."
- )
- endif()
-
- #
- # CUDA Toolkit 10 is incompatible with C++17.
- # Make sure that deal.II is configured appropriately
- #
- macro(_cuda_ensure_feature_off _version _cpp_version_bad _cpp_version_good)
- if(${CUDA_VERSION_MAJOR} EQUAL ${_version})
- if(${DEAL_II_HAVE_CXX${_cpp_version_bad}})
- set(${var} FALSE)
- message(STATUS "CUDA ${_version} requires ${_feature} to be set to off.")
- set(CUDA_ADDITIONAL_ERROR_STRING
- ${CUDA_ADDITIONAL_ERROR_STRING}
- "CUDA ${_version} is not compatible with the C++${_cpp_version_bad} standard.\n"
- "Please explicitly set the standard version to C++${_cpp_version_good}, e.g. by reconfiguring with\n"
- " cmake -DDEAL_II_CXX_FLAGS=\"-std=c++${_cpp_version_good}\" ."
- )
- endif()
- endif()
- endmacro()
-
- _cuda_ensure_feature_off(10 17 14)
-
- # cuSOLVER requires OpenMP
- find_package(OpenMP REQUIRED)
- set(DEAL_II_LINKER_FLAGS "${DEAL_II_LINKER_FLAGS} ${OpenMP_CXX_FLAGS}")
- endif()
- endif()
-endmacro()
-
-
-macro(feature_cuda_configure_external)
- # We cannot use -pedantic as compiler flags. nvcc generates code that
- # produces a lot of warnings when pedantic is enabled. So filter out the
- # flag:
- #
- string(REPLACE "-pedantic" "" DEAL_II_WARNING_FLAGS "${DEAL_II_WARNING_FLAGS}")
-endmacro()
-
-
-macro(feature_cuda_error_message)
- message(FATAL_ERROR "\n"
- "Could not find any suitable cuda library!\n"
- ${CUDA_ADDITIONAL_ERROR_STRING}
- "\nPlease ensure that a cuda library is installed on your computer and deal.II is configured to use an external Kokkos installation.\n"
- )
-endmacro()
-
-
-configure_feature(CUDA)
diff --git a/contrib/docker/Dockerfile b/contrib/docker/Dockerfile
index 0d545fbe89..d2857a70b8 100644
--- a/contrib/docker/Dockerfile
+++ b/contrib/docker/Dockerfile
@@ -28,7 +28,6 @@ RUN cd /usr/src && \
-DDEAL_II_WITH_BOOST=ON \
-DDEAL_II_WITH_CGAL=ON \
-DDEAL_II_WITH_COMPLEX_VALUES=ON \
- -DDEAL_II_WITH_CUDA=OFF \
-DDEAL_II_WITH_GINKGO=OFF \
-DDEAL_II_WITH_GMSH=ON \
-DDEAL_II_WITH_GSL=ON \
diff --git a/doc/doxygen/headers/cuda.h b/doc/doxygen/headers/cuda.h
deleted file mode 100644
index 0457b9be51..0000000000
--- a/doc/doxygen/headers/cuda.h
+++ /dev/null
@@ -1,21 +0,0 @@
-// ------------------------------------------------------------------------
-//
-// SPDX-License-Identifier: LGPL-2.1-or-later
-// Copyright (C) 2018 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// Part of the source code is dual licensed under Apache-2.0 WITH
-// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
-// governing the source code and code contributions can be found in
-// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
-//
-// ------------------------------------------------------------------------
-
-
-/**
- * @defgroup CUDAWrappers CUDA Wrappers
- *
- * The classes in this group are concerned with the description of features
- * to be run on GPUs using CUDA.
- */
diff --git a/doc/doxygen/headers/glossary.h b/doc/doxygen/headers/glossary.h
index aa1750a8d1..73b629ddf5 100644
--- a/doc/doxygen/headers/glossary.h
+++ b/doc/doxygen/headers/glossary.h
@@ -777,7 +777,7 @@
* @anchor GlossDevice Device
*
* We commonly refer to GPUs as "devices" in deal.II. The context is
- * always related to Kokkos or CUDA that motivated using this term.
+ * always related to Kokkos that motivated using this term.
* Occasionally, we also call data corresponding to MemorySpace::Default "device data"
* (even though it is allocated in CPU memory if Kokkos was configured without
* a GPU backend) to distinguish between MemorySpace::Default and MemorySpace::Host.
diff --git a/doc/doxygen/headers/main.h b/doc/doxygen/headers/main.h
index 1d561e86e2..6d73f64d6f 100644
--- a/doc/doxygen/headers/main.h
+++ b/doc/doxygen/headers/main.h
@@ -74,7 +74,6 @@
rank="same";
petsc [label="PETSc", URL="\ref PETScWrappers"];
trilinos [label="Trilinos", URL="\ref TrilinosWrappers"];
- cuda [label="CUDA", URL="\ref CUDAWrappers"];
}
umfpack [label="UMFPACK"];
diff --git a/doc/doxygen/headers/matrixfree.h b/doc/doxygen/headers/matrixfree.h
index fbf71a97c0..7b7bf9afc6 100644
--- a/doc/doxygen/headers/matrixfree.h
+++ b/doc/doxygen/headers/matrixfree.h
@@ -42,7 +42,7 @@ digraph G
color="gray", fontcolor="gray", fillcolor="white", style="filled"];
fevalues [label="FEEvaluation", fillcolor="deepskyblue"];
mf [label="MatrixFree loops", fillcolor="deepskyblue"];
- cuda [label="CUDA", URL="\ref CUDAWrappers", fontname="FreeSans",fontsize=12,
+ portable [label="Portable", URL="\ref Portable", fontname="FreeSans",fontsize=12,
height=0.2,width=0.4,
color="gray", fontcolor="gray", fillcolor="white", style="filled"];
tbb [label="TBB", fontname="FreeSans",fontsize=12,
@@ -51,8 +51,8 @@ digraph G
{rank=same
simd -> fevalues [dir="none", color="transparent"];
fevalues -> mf [dir="none", color="transparent"];
- mf -> cuda [dir="none", color="transparent"];
- cuda -> tbb [dir="none", color="transparent"];
+ mf -> portable [dir="none", color="transparent"];
+ portable -> tbb [dir="none", color="transparent"];
}
subgraph sol {
rank="same";
diff --git a/doc/doxygen/options.dox.in b/doc/doxygen/options.dox.in
index f1bcad8bf1..1ac505f306 100644
--- a/doc/doxygen/options.dox.in
+++ b/doc/doxygen/options.dox.in
@@ -204,7 +204,6 @@ PREDEFINED = DOXYGEN=1 \
DEAL_II_CGAL_VERSION_GTE=1 \
DEAL_II_WITH_TASKFLOW=1 \
DEAL_II_WITH_COMPLEX_VALUES=1 \
- DEAL_II_WITH_CUDA=1 \
DEAL_II_WITH_GINKGO=1 \
DEAL_II_WITH_GMSH=1 \
DEAL_II_GMSH_WITH_API=1 \
diff --git a/doc/external-libs/cuda.html b/doc/external-libs/cuda.html
index 6e6be86b17..4bdede8462 100644
--- a/doc/external-libs/cuda.html
+++ b/doc/external-libs/cuda.html
@@ -15,20 +15,6 @@
To compile and run CUDA code, you need to use an external Kokkos
installation that has been configured with CUDA support.
-
- To enable deal.II's own CUDA backend, you will need your GPU to have compute
- capability 6.0 or higher. Independently from the GPU itself, you also need a
- version of CUDA between 10.2 and 11.8.
-
-
-
- To configure deal.II's own CUDA backend use the following option:
-
-
- -DDEAL_II_WITH_CUDA=ON
-
- CUDA versions prior to 11.0 don't support C++17 or higher. You will have
- to make sure that C++17 is disabled when using earlier versions.
diff --git a/doc/news/changes/incompatibilities/20240811Arndt b/doc/news/changes/incompatibilities/20240811Arndt
new file mode 100644
index 0000000000..3c61624a49
--- /dev/null
+++ b/doc/news/changes/incompatibilities/20240811Arndt
@@ -0,0 +1,4 @@
+Removed: All support for CUDAWrappers and CUDA-related macros have been removed.
+GPU support is provided through Kokkos.
+
+(Daniel Arndt, 2024/08/11)
diff --git a/doc/users/cmake_dealii.html b/doc/users/cmake_dealii.html
index c16240607b..ddc2f12b8e 100644
--- a/doc/users/cmake_dealii.html
+++ b/doc/users/cmake_dealii.html
@@ -457,7 +457,6 @@ DEAL_II_WITH_ASSIMP
DEAL_II_WITH_BOOST
DEAL_II_WITH_CGAL
DEAL_II_WITH_COMPLEX_VALUES
-DEAL_II_WITH_CUDA
DEAL_II_WITH_GINKGO
DEAL_II_WITH_GMSH
DEAL_II_WITH_GSL
diff --git a/doc/users/cmake_user.html b/doc/users/cmake_user.html
index 3ad6b495c3..9be97af2c9 100644
--- a/doc/users/cmake_user.html
+++ b/doc/users/cmake_user.html
@@ -844,7 +844,6 @@ DEAL_II_WITH_ADOLC
DEAL_II_WITH_ARPACK
DEAL_II_WITH_ASSIMP
DEAL_II_WITH_COMPLEX_VALUES
-DEAL_II_WITH_CUDA
DEAL_II_WITH_GINKGO
DEAL_II_WITH_GMSH
DEAL_II_WITH_GSL
diff --git a/doc/users/config.sample b/doc/users/config.sample
index 0e2869ca5e..4a09f4ace1 100644
--- a/doc/users/config.sample
+++ b/doc/users/config.sample
@@ -118,7 +118,6 @@
# set(DEAL_II_WITH_BOOST "ON" CACHE BOOL "")
# set(DEAL_II_WITH_CGAL "ON" CACHE BOOL "")
# set(DEAL_II_WITH_COMPLEX_VALUES "ON" CACHE BOOL "")
-# set(DEAL_II_WITH_CUDA "ON" CACHE BOOL "")
# set(DEAL_II_WITH_GINKGO "ON" CACHE BOOL "")
# set(DEAL_II_WITH_GMSH "ON" CACHE BOOL "")
# set(DEAL_II_WITH_GSL "ON" CACHE BOOL "")
diff --git a/examples/step-64/doc/intro.dox b/examples/step-64/doc/intro.dox
index d77a6b0d03..4bb3675738 100644
--- a/examples/step-64/doc/intro.dox
+++ b/examples/step-64/doc/intro.dox
@@ -67,8 +67,8 @@ The data movement in deal.II is done using LinearAlgebra::ReadWriteVector. These
vectors can be seen as buffers on the host that are used to either store data
received from the device or to send data to the device. There are two types of vectors
that can be used on the device:
-- LinearAlgebra::CUDAWrappers::Vector, which is similar to the more common
-Vector, and
+- LinearAlgebra::TpetraWrappers::Vector, that has a host
+and device copy of the data, and
- LinearAlgebra::distributed::Vector, which is a regular
LinearAlgebra::distributed::Vector where we have specified which memory
diff --git a/examples/step-64/step-64.cc b/examples/step-64/step-64.cc
index 4ab2559049..5e3624681a 100644
--- a/examples/step-64/step-64.cc
+++ b/examples/step-64/step-64.cc
@@ -363,8 +363,7 @@ namespace Step64
// Since all the operations in the `solve()` function are executed on the
// graphics card, it is necessary for the vectors used to store their values
// on the GPU as well. LinearAlgebra::distributed::Vector can be told which
- // memory space to use. There is also LinearAlgebra::CUDAWrappers::Vector
- // that always uses GPU memory storage but doesn't work with MPI. It might
+ // memory space to use. It might
// be worth noticing that the communication between different MPI processes
// can be improved if the MPI implementation is GPU-aware and the configure
// flag `DEAL_II_MPI_WITH_DEVICE_SUPPORT` is enabled. (The value of this
diff --git a/include/deal.II/base/config.h.in b/include/deal.II/base/config.h.in
index e09209db4d..95e01a2918 100644
--- a/include/deal.II/base/config.h.in
+++ b/include/deal.II/base/config.h.in
@@ -41,7 +41,6 @@
#cmakedefine DEAL_II_FEATURE_BOOST_BUNDLED_CONFIGURED
#cmakedefine DEAL_II_WITH_CGAL
#cmakedefine DEAL_II_WITH_COMPLEX_VALUES
-#cmakedefine DEAL_II_WITH_CUDA
#cmakedefine DEAL_II_WITH_GINKGO
#cmakedefine DEAL_II_WITH_GMSH
#cmakedefine DEAL_II_WITH_GSL
@@ -491,9 +490,6 @@
#endif
#cmakedefine DEAL_II_MPI_WITH_DEVICE_SUPPORT
-#ifdef DEAL_II_MPI_WITH_DEVICE_SUPPORT
-#cmakedefine DEAL_II_MPI_WITH_CUDA_SUPPORT
-#endif
/***********************************************************************
* Two macro names that we put at the top and bottom of all deal.II files
diff --git a/include/deal.II/base/cuda.h b/include/deal.II/base/cuda.h
deleted file mode 100644
index 782a47e49b..0000000000
--- a/include/deal.II/base/cuda.h
+++ /dev/null
@@ -1,187 +0,0 @@
-// ------------------------------------------------------------------------
-//
-// SPDX-License-Identifier: LGPL-2.1-or-later
-// Copyright (C) 2018 - 2023 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// Part of the source code is dual licensed under Apache-2.0 WITH
-// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
-// governing the source code and code contributions can be found in
-// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
-//
-// ------------------------------------------------------------------------
-
-#ifndef dealii_cuda_h
-#define dealii_cuda_h
-
-#include
-
-#include
-#include
-
-#ifdef DEAL_II_WITH_CUDA
-# include
-# include
-# include
-
-# include
-
-DEAL_II_NAMESPACE_OPEN
-namespace Utilities
-{
- /**
- * A namespace for utility structures for CUDA.
- */
- namespace CUDA
- {
- /**
- * Various CUDA APIs need an object to store internal data. This structure
- * creates, initializes, stores, and destroys these so-called handles for
- * the respective CUDA libraries used inside deal.II.
- */
- struct Handle
- {
- /**
- * Constructor. Initialize the handles for the different libraries.
- */
- Handle();
-
- /**
- * Copy constructor is deleted.
- */
- Handle(const Handle &) = delete;
-
- /**
- * Destructor. Destroy the handles.
- */
- ~Handle();
-
- /**
- * Pointer to an opaque cuSolverDN context.
- * The handle must be passed to every cuSolverDN library function.
- */
- cusolverDnHandle_t cusolver_dn_handle;
-
- /**
- * Pointer to an opaque cuSolverSP context.
- * The handle must be passed to every cuSolverSP library function.
- */
- cusolverSpHandle_t cusolver_sp_handle;
-
- /**
- * Pointer to an opaque cuSPARSE context.
- * The handle must be passed to every cuSPARSE library function.
- */
- cusparseHandle_t cusparse_handle;
- };
-
- /**
- * Allocate @p n_elements on the @ref GlossDevice "device".
- */
- template
- inline void
- malloc(T *&pointer, const unsigned int n_elements)
- {
- cudaError_t cuda_error_code =
- cudaMalloc(&pointer, n_elements * sizeof(T));
- AssertCuda(cuda_error_code);
- }
-
- /**
- * Free memory on the @ref GlossDevice "device".
- */
- template
- inline void
- free(T *&pointer)
- {
- cudaError_t cuda_error_code = cudaFree(pointer);
- AssertCuda(cuda_error_code);
- pointer = nullptr;
- }
-
- /**
- * Allocator to be used for `std::unique_ptr` pointing to @ref GlossDevice "device" memory.
- */
- template
- Number *
- allocate_device_data(const std::size_t size)
- {
- Number *device_ptr;
- Utilities::CUDA::malloc(device_ptr, size);
- return device_ptr;
- }
-
- /**
- * Deleter to be used for `std::unique_ptr` pointing to @ref GlossDevice "device" memory.
- */
- template
- void
- delete_device_data(Number *device_ptr) noexcept
- {
- const cudaError_t error_code = cudaFree(device_ptr);
- AssertNothrowCuda(error_code);
- }
-
- /**
- * Copy the @ref GlossDevice "device" ArrayView @p in to the host ArrayView @p out.
- */
- template
- inline void
- copy_to_host(const ArrayView &in,
- ArrayView &out)
- {
- AssertDimension(in.size(), out.size());
- cudaError_t cuda_error_code = cudaMemcpy(out.data(),
- in.data(),
- in.size() * sizeof(T),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error_code);
- }
-
- /**
- * Copy the host ArrayView @p in to the @ref GlossDevice "device" ArrayView @p out.
- */
- template
- inline void
- copy_to_dev(const ArrayView &in,
- ArrayView &out)
- {
- AssertDimension(in.size(), out.size());
- cudaError_t cuda_error_code = cudaMemcpy(out.data(),
- in.data(),
- in.size() * sizeof(T),
- cudaMemcpyHostToDevice);
- AssertCuda(cuda_error_code);
- }
-
- /**
- * Copy the elements in @p pointer_dev to the host in @p vector_host.
- */
- template
- inline void
- copy_to_host(const T *pointer_dev, std::vector &vector_host)
- {
- ArrayView in(pointer_dev, vector_host.size());
- auto out = make_array_view(vector_host);
- copy_to_host(in, out);
- }
-
- /**
- * Copy the elements in @p vector_host to the @ref GlossDevice "device" in @p pointer_dev. The
- * memory needs to be allocate on the @ref GlossDevice "device" before this function is called.
- */
- template
- inline void
- copy_to_dev(const std::vector &vector_host, T *pointer_dev)
- {
- auto in = make_array_view(vector_host);
- ArrayView out(pointer_dev, vector_host.size());
- copy_to_dev(in, out);
- }
- } // namespace CUDA
-} // namespace Utilities
-
-DEAL_II_NAMESPACE_CLOSE
-#endif
-#endif
diff --git a/include/deal.II/base/cuda_size.h b/include/deal.II/base/cuda_size.h
deleted file mode 100644
index bac486c1a6..0000000000
--- a/include/deal.II/base/cuda_size.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// ------------------------------------------------------------------------
-//
-// SPDX-License-Identifier: LGPL-2.1-or-later
-// Copyright (C) 2018 - 2023 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// Part of the source code is dual licensed under Apache-2.0 WITH
-// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
-// governing the source code and code contributions can be found in
-// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
-//
-// ------------------------------------------------------------------------
-
-#ifndef dealii_cuda_size_h
-#define dealii_cuda_size_h
-
-#include
-
-DEAL_II_NAMESPACE_OPEN
-
-namespace CUDAWrappers
-{
- /**
- * Define the size of a block when launching a CUDA kernel. This number can be
- * changed depending on the architecture the code is running on.
- */
- constexpr int block_size = 512;
-
- /**
- * Define the size of chunk of data worked on by a thread. This number can be
- * changed depending on the architecture the code is running on.
- */
- constexpr int chunk_size = 1;
-
- /**
- * Define the number of threads in a warp.
- */
- constexpr int warp_size = 32;
-} // namespace CUDAWrappers
-
-DEAL_II_NAMESPACE_CLOSE
-
-#endif
diff --git a/include/deal.II/base/exceptions.h b/include/deal.II/base/exceptions.h
index 95d755470a..ea4b3539bf 100644
--- a/include/deal.II/base/exceptions.h
+++ b/include/deal.II/base/exceptions.h
@@ -31,11 +31,6 @@ DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
#include
#include
-#ifdef DEAL_II_WITH_CUDA
-# include
-# include
-#endif
-
DEAL_II_NAMESPACE_OPEN
@@ -1198,21 +1193,6 @@ namespace StandardExceptions
"if deal.II was configured to use Assimp, but cmake did not "
"find a valid Assimp library.");
-#ifdef DEAL_II_WITH_CUDA
- /**
- * This exception is raised if an error happened in a CUDA kernel.
- *
- * The constructor takes a single char*, the output of
- * cudaGetErrorString.
- */
- DeclException1(ExcCudaError, const char *, << arg1);
- /**
- * This exception is raised if an error happened in a cuSPARSE function.
- */
- DeclException1(ExcCusparseError,
- std::string,
- << "There was an error in a cuSPARSE function: " << arg1);
-#endif
/** @} */
/**
@@ -1518,23 +1498,6 @@ namespace deal_II_exceptions
// another function:
do_issue_error_nothrow(e);
}
-#ifdef DEAL_II_WITH_CUDA
- /**
- * Return a string given an error code. This is similar to the
- * cudaGetErrorString function but there is no equivalent function for
- * cuSPARSE.
- */
- std::string
- get_cusparse_error_string(const cusparseStatus_t error_code);
-
- /**
- * Return a string given an error code. This is similar to the
- * cudaGetErrorString function but there is no equivalent function for
- * cuSOLVER.
- */
- std::string
- get_cusolver_error_string(const cusolverStatus_t error_code);
-#endif
} /*namespace internals*/
} /*namespace deal_II_exceptions*/
@@ -2174,201 +2137,6 @@ namespace internal
while (false)
#endif // DEAL_II_WITH_MPI
-#ifdef DEAL_II_WITH_CUDA
-/**
- * An assertion that checks that the error code produced by calling a CUDA
- * routine is equal to cudaSuccess.
- *
- * @note This and similar macro names are examples of preprocessor definitions
- * in the deal.II library that are not prefixed by a string that likely makes
- * them unique to deal.II. As a consequence, it is possible that other
- * libraries your code interfaces with define the same name, and the result
- * will be name collisions (see
- * https://en.wikipedia.org/wiki/Name_collision). One can \#undef
- * this macro, as well as all other macros defined by deal.II that are not
- * prefixed with either DEAL
or deal
, by including
- * the header deal.II/base/undefine_macros.h
after all other
- * deal.II headers have been included.
- *
- * @ingroup Exceptions
- */
-# ifdef DEBUG
-# define AssertCuda(error_code) \
- Assert(error_code == cudaSuccess, \
- dealii::ExcCudaError(cudaGetErrorString(error_code)))
-# else
-# define AssertCuda(error_code) \
- do \
- { \
- (void)(error_code); \
- } \
- while (false)
-# endif
-
-/**
- * The non-throwing equivalent of AssertCuda.
- *
- * @note This and similar macro names are examples of preprocessor definitions
- * in the deal.II library that are not prefixed by a string that likely makes
- * them unique to deal.II. As a consequence, it is possible that other
- * libraries your code interfaces with define the same name, and the result
- * will be name collisions (see
- * https://en.wikipedia.org/wiki/Name_collision). One can \#undef
- * this macro, as well as all other macros defined by deal.II that are not
- * prefixed with either DEAL
or deal
, by including
- * the header deal.II/base/undefine_macros.h
after all other
- * deal.II headers have been included.
- *
- * @ingroup Exceptions
- */
-# ifdef DEBUG
-# define AssertNothrowCuda(error_code) \
- AssertNothrow(error_code == cudaSuccess, \
- dealii::ExcCudaError(cudaGetErrorString(error_code)))
-# else
-# define AssertNothrowCuda(error_code) \
- do \
- { \
- (void)(error_code); \
- } \
- while (false)
-# endif
-
-/**
- * An assertion that checks that the kernel was launched and executed
- * successfully.
- *
- * @note This and similar macro names are examples of preprocessor definitions
- * in the deal.II library that are not prefixed by a string that likely makes
- * them unique to deal.II. As a consequence, it is possible that other
- * libraries your code interfaces with define the same name, and the result
- * will be name collisions (see
- * https://en.wikipedia.org/wiki/Name_collision). One can \#undef
- * this macro, as well as all other macros defined by deal.II that are not
- * prefixed with either DEAL
or deal
, by including
- * the header deal.II/base/undefine_macros.h
after all other
- * deal.II headers have been included.
- *
- * @ingroup Exceptions
- */
-# ifdef DEBUG
-# define AssertCudaKernel() \
- do \
- { \
- cudaError_t local_error_code = cudaPeekAtLastError(); \
- AssertCuda(local_error_code); \
- local_error_code = cudaDeviceSynchronize(); \
- AssertCuda(local_error_code); \
- } \
- while (false)
-# else
-# define AssertCudaKernel() \
- do \
- { \
- } \
- while (false)
-# endif
-
-/**
- * An assertion that checks that the error code produced by calling a cuSPARSE
- * routine is equal to CUSPARSE_STATUS_SUCCESS.
- *
- * @note This and similar macro names are examples of preprocessor definitions
- * in the deal.II library that are not prefixed by a string that likely makes
- * them unique to deal.II. As a consequence, it is possible that other
- * libraries your code interfaces with define the same name, and the result
- * will be name collisions (see
- * https://en.wikipedia.org/wiki/Name_collision). One can \#undef
- * this macro, as well as all other macros defined by deal.II that are not
- * prefixed with either DEAL
or deal
, by including
- * the header deal.II/base/undefine_macros.h
after all other
- * deal.II headers have been included.
- *
- * @ingroup Exceptions
- */
-# ifdef DEBUG
-# define AssertCusparse(error_code) \
- Assert( \
- error_code == CUSPARSE_STATUS_SUCCESS, \
- dealii::ExcCusparseError( \
- dealii::deal_II_exceptions::internals::get_cusparse_error_string( \
- error_code)))
-# else
-# define AssertCusparse(error_code) \
- do \
- { \
- (void)(error_code); \
- } \
- while (false)
-# endif
-
-/**
- * The non-throwing equivalent of AssertCusparse.
- *
- * @note This and similar macro names are examples of preprocessor definitions
- * in the deal.II library that are not prefixed by a string that likely makes
- * them unique to deal.II. As a consequence, it is possible that other
- * libraries your code interfaces with define the same name, and the result
- * will be name collisions (see
- * https://en.wikipedia.org/wiki/Name_collision). One can \#undef
- * this macro, as well as all other macros defined by deal.II that are not
- * prefixed with either DEAL
or deal
, by including
- * the header deal.II/base/undefine_macros.h
after all other
- * deal.II headers have been included.
- *
- * @ingroup Exceptions
- */
-# ifdef DEBUG
-# define AssertNothrowCusparse(error_code) \
- AssertNothrow( \
- error_code == CUSPARSE_STATUS_SUCCESS, \
- dealii::ExcCusparseError( \
- dealii::deal_II_exceptions::internals::get_cusparse_error_string( \
- error_code)))
-# else
-# define AssertNothrowCusparse(error_code) \
- do \
- { \
- (void)(error_code); \
- } \
- while (false)
-# endif
-
-/**
- * An assertion that checks that the error code produced by calling a cuSOLVER
- * routine is equal to CUSOLVER_STATUS_SUCCESS.
- *
- * @note This and similar macro names are examples of preprocessor definitions
- * in the deal.II library that are not prefixed by a string that likely makes
- * them unique to deal.II. As a consequence, it is possible that other
- * libraries your code interfaces with define the same name, and the result
- * will be name collisions (see
- * https://en.wikipedia.org/wiki/Name_collision). One can \#undef
- * this macro, as well as all other macros defined by deal.II that are not
- * prefixed with either DEAL
or deal
, by including
- * the header deal.II/base/undefine_macros.h
after all other
- * deal.II headers have been included.
- *
- * @ingroup Exceptions
- */
-# ifdef DEBUG
-# define AssertCusolver(error_code) \
- Assert( \
- error_code == CUSOLVER_STATUS_SUCCESS, \
- dealii::ExcCusparseError( \
- dealii::deal_II_exceptions::internals::get_cusolver_error_string( \
- error_code)))
-# else
-# define AssertCusolver(error_code) \
- do \
- { \
- (void)(error_code); \
- } \
- while (false)
-# endif
-
-#endif
-
#ifdef DEAL_II_TRILINOS_WITH_SEACAS
/**
* Assertion that checks that the error code produced by calling an ExodusII
diff --git a/include/deal.II/base/memory_space.h b/include/deal.II/base/memory_space.h
index 65346f7cb3..a4a3050d4b 100644
--- a/include/deal.II/base/memory_space.h
+++ b/include/deal.II/base/memory_space.h
@@ -44,13 +44,6 @@ namespace MemorySpace
{
using kokkos_space = ::Kokkos::DefaultExecutionSpace::memory_space;
};
-
-#ifdef DEAL_II_WITH_CUDA
- /**
- * Structure describing CUDA memory space.
- */
- using CUDA = Default;
-#endif
} // namespace MemorySpace
DEAL_II_NAMESPACE_CLOSE
diff --git a/include/deal.II/base/memory_space_data.h b/include/deal.II/base/memory_space_data.h
index b398087b3b..ace1182b85 100644
--- a/include/deal.II/base/memory_space_data.h
+++ b/include/deal.II/base/memory_space_data.h
@@ -18,7 +18,7 @@
#include
-#include
+#include
#include
#include
@@ -35,9 +35,8 @@ namespace MemorySpace
{
/**
* Structure which stores data on the host or the @ref GlossDevice "device" depending on the
- * template parameter @p MemorySpace. Valid choices are MemorySpace::Host,
- * MemorySpace::Default, and MemorySpace::CUDA (if CUDA was enabled in
- * deal.II). The data is copied into the structure which then owns the data
+ * template parameter @p MemorySpace. Valid choices are MemorySpace::Host and MemorySpace::Default.
+ * The data is copied into the structure which then owns the data
* and will release the memory when the destructor is called.
*/
template
diff --git a/include/deal.II/base/numbers.h b/include/deal.II/base/numbers.h
index a95bfd201f..245e857505 100644
--- a/include/deal.II/base/numbers.h
+++ b/include/deal.II/base/numbers.h
@@ -20,10 +20,6 @@
#include
-#ifdef DEAL_II_WITH_CUDA
-# include
-#endif
-
#include
#include
@@ -32,7 +28,6 @@
#include
#define DEAL_II_HOST_DEVICE KOKKOS_FUNCTION
-#define DEAL_II_CUDA_HOST_DEV DEAL_II_HOST_DEVICE
#define DEAL_II_HOST_DEVICE_ALWAYS_INLINE KOKKOS_FORCEINLINE_FUNCTION
// clang++ assumes that all constexpr functions are __host__ __device__ when
@@ -775,27 +770,6 @@ namespace internal
}
};
-#ifdef DEAL_II_WITH_CUDA
- template <>
- struct NumberType
- {
- static cuComplex
- value(const float t)
- {
- return make_cuComplex(t, 0.f);
- }
- };
-
- template <>
- struct NumberType
- {
- static cuDoubleComplex
- value(const double t)
- {
- return make_cuDoubleComplex(t, 0.);
- }
- };
-#endif
} // namespace internal
namespace numbers
diff --git a/include/deal.II/base/undefine_macros.h b/include/deal.II/base/undefine_macros.h
index df7290e53c..45cc723c65 100644
--- a/include/deal.II/base/undefine_macros.h
+++ b/include/deal.II/base/undefine_macros.h
@@ -25,14 +25,6 @@
# undef AssertARKode
#endif // #ifdef AssertARKode
-#ifdef AssertCuda
-# undef AssertCuda
-#endif // #ifdef AssertCuda
-
-#ifdef AssertCudaKernel
-# undef AssertCudaKernel
-#endif // #ifdef AssertCudaKernel
-
#ifdef AssertCusolver
# undef AssertCusolver
#endif // #ifdef AssertCusolver
@@ -65,10 +57,6 @@
# undef AssertNothrow
#endif // #ifdef AssertNothrow
-#ifdef AssertNothrowCuda
-# undef AssertNothrowCuda
-#endif // #ifdef AssertNothrowCuda
-
#ifdef AssertNothrowCusparse
# undef AssertNothrowCusparse
#endif // #ifdef AssertNothrowCusparse
diff --git a/include/deal.II/lac/cuda_atomic.h b/include/deal.II/lac/cuda_atomic.h
deleted file mode 100644
index 9fff028e94..0000000000
--- a/include/deal.II/lac/cuda_atomic.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// ------------------------------------------------------------------------
-//
-// SPDX-License-Identifier: LGPL-2.1-or-later
-// Copyright (C) 2016 - 2022 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// Part of the source code is dual licensed under Apache-2.0 WITH
-// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
-// governing the source code and code contributions can be found in
-// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
-//
-// ------------------------------------------------------------------------
-
-#ifndef dealii_cuda_atomic_h
-#define dealii_cuda_atomic_h
-
-#include
-
-#ifdef DEAL_II_WITH_CUDA
-
-DEAL_II_NAMESPACE_OPEN
-
-namespace LinearAlgebra
-{
- namespace CUDAWrappers
- {
- /**
- * Provide atomicMax for floats.
- *
- * @ingroup CUDAWrappers
- */
- inline __device__ float
- atomicMax_wrapper(float *address, float val)
- {
- int *address_as_int = reinterpret_cast(address);
- int old = *address_as_int, assumed;
- do
- {
- assumed = old;
- old = atomicCAS(address_as_int,
- assumed,
- atomicMax(address_as_int, __float_as_int(val)));
- }
- while (assumed != old);
-
- return __longlong_as_double(old);
- }
-
-
-
- /**
- * Provide atomicMax for doubles.
- *
- * @ingroup CUDAWrappers
- */
- inline __device__ double
- atomicMax_wrapper(double *address, double val)
- {
- unsigned long long int *address_as_ull =
- reinterpret_cast(address);
- unsigned long long int old = *address_as_ull, assumed;
- do
- {
- assumed = old;
- old = atomicCAS(address_as_ull,
- assumed,
- atomicMax(address_as_ull,
- static_cast(
- __double_as_longlong(val))));
- }
- while (assumed != old);
-
- return __longlong_as_double(old);
- }
- } // namespace CUDAWrappers
-} // namespace LinearAlgebra
-
-DEAL_II_NAMESPACE_CLOSE
-
-#endif
-
-#endif
diff --git a/include/deal.II/lac/cuda_kernels.h b/include/deal.II/lac/cuda_kernels.h
deleted file mode 100644
index 9482670604..0000000000
--- a/include/deal.II/lac/cuda_kernels.h
+++ /dev/null
@@ -1,516 +0,0 @@
-// ------------------------------------------------------------------------
-//
-// SPDX-License-Identifier: LGPL-2.1-or-later
-// Copyright (C) 2018 - 2023 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// Part of the source code is dual licensed under Apache-2.0 WITH
-// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
-// governing the source code and code contributions can be found in
-// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
-//
-// ------------------------------------------------------------------------
-
-#ifndef dealii_cuda_kernels_h
-#define dealii_cuda_kernels_h
-
-#include
-
-#ifdef DEAL_II_WITH_CUDA
-
-
-# include
-# include
-
-# include
-
-# include
-
-DEAL_II_NAMESPACE_OPEN
-
-namespace LinearAlgebra
-{
- namespace CUDAWrappers
- {
- /**
- * Namespace containing the CUDA kernels.
- */
- namespace kernel
- {
- using ::dealii::CUDAWrappers::block_size;
- using ::dealii::CUDAWrappers::chunk_size;
- using ::dealii::CUDAWrappers::warp_size;
- using size_type = types::global_dof_index;
-
- /**
- * Multiply each entry of @p val of size @p N by @p a.
- *
- * @ingroup CUDAWrappers
- */
- template
- __global__ void
- vec_scale(Number *val, const Number a, const size_type N);
-
-
-
- /**
- * Functor defining the addition of two Numbers.
- *
- * @ingroup CUDAWrappers
- */
- template
- struct Binop_Addition
- {
- __device__ static inline Number
- operation(const Number a, const Number b)
- {
- return a + b;
- }
- };
-
- template
- struct Binop_Addition>
- {
- __device__ static inline std::complex
- operation(const std::complex a, const std::complex)
- {
- printf("This function is not implemented for std::complex!");
- assert(false);
- return a;
- }
- };
-
-
-
- /**
- * Functor defining the subtraction of two Numbers.
- *
- * @ingroup CUDAWrappers
- */
- template
- struct Binop_Subtraction
- {
- __device__ static inline Number
- operation(const Number a, const Number b)
- {
- return a - b;
- }
- };
-
- template
- struct Binop_Subtraction>
- {
- __device__ static inline std::complex
- operation(const std::complex a,
- const std::complex /*b*/)
- {
- printf("This function is not implemented for std::complex!");
- assert(false);
- return a;
- }
- };
-
-
-
- /**
- * Functor defining the maximum of two Numbers.
- *
- * @ingroup CUDAWrappers
- */
- template
- struct Binop_Max
- {
- __device__ static inline Number
- operation(const Number a, const Number b)
- {
- return a > b ? a : b;
- }
- };
-
- template
- struct Binop_Max>
- {
- __device__ static inline std::complex
- operation(const std::complex a, const std::complex)
- {
- printf("This function is not implemented for std::complex!");
- assert(false);
- return a;
- }
- };
-
-
-
- /**
- * Functor defining the maximum of two Numbers.
- *
- * @ingroup CUDAWrappers
- */
- template
- struct Binop_Min
- {
- __device__ static inline Number
- operation(const Number a, const Number b)
- {
- return a > b ? b : a;
- }
- };
-
- template
- struct Binop_Min>
- {
- __device__ static inline std::complex
- operation(const std::complex a, const std::complex)
- {
- printf("This function is not implemented for std::complex!");
- assert(false);
- return a;
- }
- };
-
-
-
- /**
- * Apply the functor @p Binop to each element of @p v1 and @p v2.
- *
- * @ingroup CUDAWrappers
- */
- template class Binop>
- __global__ void
- vector_bin_op(Number *v1, const Number *v2, const size_type N);
-
-
-
- /**
- * Apply the functor @p Binop to the elements of @p v1 that have
- * indices in @p mask and @p v2. The size of @p mask should be greater
- * than the size of @p v1. @p mask and @p v2 should have the same size @p
- * N.
- *
- * @ingroup CUDAWrappers
- */
- template class Binop>
- __global__ void
- masked_vector_bin_op(const unsigned int *mask,
- Number *v1,
- const Number *v2,
- const size_type N);
-
-
-
- /**
- * Structure implementing the functions used to add elements when
- * using a reduction.
- *
- * @ingroup CUDAWrappers
- */
- template
- struct ElemSum
- {
- __device__ static Number
- reduction_op(const Number a, const Number b);
-
- __device__ static Number
- atomic_op(Number *dst, const Number a);
-
- __device__ static Number
- element_wise_op(const Number a);
-
- __device__ static Number
- null_value();
- };
-
-
-
- /**
- * Structure implementing the functions used to compute the L1 norm
- * when using a reduction.
- *
- * @ingroup CUDAWrappers
- */
- template
- struct L1Norm
- {
- __device__ static Number
- reduction_op(const Number a, const Number b);
-
- __device__ static Number
- atomic_op(Number *dst, const Number a);
-
- __device__ static Number
- element_wise_op(const Number a);
-
- __device__ static Number
- null_value();
- };
-
-
-
- /**
- * Structure implementing the functions used to compute the L-infinity
- * norm when using a reduction.
- *
- * @ingroup CUDAWrappers
- */
- template
- struct LInfty
- {
- __device__ static Number
- reduction_op(const Number a, const Number b);
-
- __device__ static Number
- atomic_op(Number *dst, const Number a);
-
- __device__ static Number
- element_wise_op(const Number a);
-
- __device__ static Number
- null_value();
- };
-
-
-
- /**
- * Perform a reduction on @p v using @p Operation.
- *
- * @ingroup CUDAWrappers
- */
- template
- __global__ void
- reduction(Number *result, const Number *v, const size_type N);
-
-
-
- /**
- * Structure implementing the functions used to compute the dot
- * product norm when using a double vector reduction.
- *
- * @ingroup CUDAWrappers
- */
- template
- struct DotProduct
- {
- __device__ static Number
- binary_op(const Number a, const Number b);
-
- __device__ static Number
- reduction_op(const Number a, const Number b);
-
- __device__ static Number
- atomic_op(Number *dst, const Number a);
-
- __device__ static Number
- null_value();
- };
-
-
-
- /**
- * Perform a binary operation on each element of @p v1 and @p v2 followed
- * by reduction on the resulting array.
- *
- * @ingroup CUDAWrappers
- */
- template
- __global__ void
- double_vector_reduction(Number *result,
- const Number *v1,
- const Number *v2,
- const size_type N);
-
-
-
- /**
- * Add @p a to each element of @p val.
- *
- * @ingroup CUDAWrappers
- */
- template
- __global__ void
- vec_add(Number *val, const Number a, const size_type N);
-
-
-
- /**
- * Addition of a multiple of a vector, i.e., val += a*V_val.
- *
- * @ingroup CUDAWrappers
- */
- template
- __global__ void
- add_aV(Number *val,
- const Number a,
- const Number *V_val,
- const size_type N);
-
-
-
- /**
- * Addition of multiple scaled vector, i.e., val += a*V_val +
- * b*W_val.
- *
- * @ingroup CUDAWrappers
- */
- template
- __global__ void
- add_aVbW(Number *val,
- const Number a,
- const Number *V_val,
- const Number b,
- const Number *W_val,
- const size_type N);
-
-
-
- /**
- * Scaling and simple addition of a multiple of a vector, i.e. val
- * = = s*val + a*V_val
- *
- * @ingroup CUDAWrappers
- */
- template
- __global__ void
- sadd(const Number s,
- Number *val,
- const Number a,
- const Number *V_val,
- const size_type N);
-
-
-
- /**
- * Scaling and multiple additions of scaled vectors, i.e. val =
- * = s*val + a*V_val + b*W_val
- *
- * @ingroup CUDAWrappers
- */
- template
- __global__ void
- sadd(const Number s,
- Number *val,
- const Number a,
- const Number *V_val,
- const Number b,
- const Number *W_val,
- const size_type N);
-
-
-
- /**
- * Scale each element of this vector by the corresponding element in
- * the argument.
- *
- * @ingroup CUDAWrappers
- */
- template
- __global__ void
- scale(Number *val, const Number *V_val, const size_type N);
-
-
-
- /**
- * Assignment val = a*V_val.
- *
- * @ingroup CUDAWrappers
- */
- template
- __global__ void
- equ(Number *val, const Number a, const Number *V_val, const size_type N);
-
-
-
- /**
- * Assignment val = a*V_val + b*W_val.
- *
- * @ingroup CUDAWrappers
- */
- template
- __global__ void
- equ(Number *val,
- const Number a,
- const Number *V_val,
- const Number b,
- const Number *W_val,
- const size_type N);
-
-
-
- /**
- * Perform a combined operation of a vector addition and a subsequent
- * inner product, returning the value of the inner product.
- *
- * @ingroup CUDAWrappers
- */
- template
- __global__ void
- add_and_dot(Number *res,
- Number *v1,
- const Number *v2,
- const Number *v3,
- const Number a,
- const size_type N);
-
-
-
- /**
- * Set each element of @p val to @p s.
- *
- * @ingroup CUDAWrappers
- */
- template
- __global__ void
- set(Number *val, const Number s, const size_type N);
-
-
- /**
- * Set each element in @p val to @p v using @p indices as permutation, i.e.,
- * val[indices[i]] = v[i].
- *
- * @ingroup CUDAWrappers
- */
- template
- __global__ void
- set_permutated(const IndexType *indices,
- Number *val,
- const Number *v,
- const IndexType N);
-
-
-
- /**
- * Set each element in @p val to @p v using @p indices as permutation, i.e.,
- * val[i] = v[indices[i]].
- *
- * @ingroup CUDAWrappers
- */
- template
- __global__ void
- gather(Number *val,
- const IndexType *indices,
- const Number *v,
- const IndexType N);
-
-
-
- /**
- * Add each element in @p val to @p v using @p indices as permutation, i.e.,
- * val[indices[i]] += v[i].
- *
- * @ingroup CUDAWrappers
- */
- template
- __global__ void
- add_permutated(const size_type *indices,
- Number *val,
- const Number *v,
- const size_type N);
- } // namespace kernel
- } // namespace CUDAWrappers
-} // namespace LinearAlgebra
-
-DEAL_II_NAMESPACE_CLOSE
-
-#endif
-
-#endif
diff --git a/include/deal.II/lac/cuda_kernels.templates.h b/include/deal.II/lac/cuda_kernels.templates.h
deleted file mode 100644
index 6e92d9013b..0000000000
--- a/include/deal.II/lac/cuda_kernels.templates.h
+++ /dev/null
@@ -1,592 +0,0 @@
-// ------------------------------------------------------------------------
-//
-// SPDX-License-Identifier: LGPL-2.1-or-later
-// Copyright (C) 2018 - 2023 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// Part of the source code is dual licensed under Apache-2.0 WITH
-// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
-// governing the source code and code contributions can be found in
-// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
-//
-// ------------------------------------------------------------------------
-
-#ifndef dealii_cuda_kernels_templates_h
-#define dealii_cuda_kernels_templates_h
-
-#include
-
-#include
-
-#ifdef DEAL_II_WITH_CUDA
-
-DEAL_II_NAMESPACE_OPEN
-
-namespace LinearAlgebra
-{
- namespace CUDAWrappers
- {
- namespace kernel
- {
- template
- __global__ void
- vec_scale(Number *val, const Number a, const size_type N)
- {
- const size_type idx_base =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- for (unsigned int i = 0; i < chunk_size; ++i)
- {
- const size_type idx = idx_base + i * block_size;
- if (idx < N)
- val[idx] *= a;
- }
- }
-
-
-
- template class Binop>
- __global__ void
- vector_bin_op(Number *v1, const Number *v2, const size_type N)
- {
- const size_type idx_base =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- for (unsigned int i = 0; i < chunk_size; ++i)
- {
- const size_type idx = idx_base + i * block_size;
- if (idx < N)
- v1[idx] = Binop::operation(v1[idx], v2[idx]);
- }
- }
-
-
-
- template class Binop>
- __global__ void
- masked_vector_bin_op(const unsigned int *mask,
- Number *v1,
- const Number *v2,
- const size_type N)
- {
- const size_type idx_base =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- for (unsigned int i = 0; i < chunk_size; ++i)
- {
- const size_type idx = idx_base + i * block_size;
- if (idx < N)
- v1[mask[idx]] = Binop::operation(v1[mask[idx]], v2[idx]);
- }
- }
-
-
- template
- __device__ Number
- ElemSum::reduction_op(const Number a, const Number b)
- {
- return (a + b);
- }
-
-
-
- template
- __device__ Number
- ElemSum::atomic_op(Number *dst, const Number a)
- {
- return atomicAdd(dst, a);
- }
-
-
-
- template
- __device__ Number
- ElemSum::element_wise_op(const Number a)
- {
- return a;
- }
-
-
-
- template
- __device__ Number
- ElemSum::null_value()
- {
- return Number();
- }
-
-
-
- template
- __device__ Number
- L1Norm::reduction_op(const Number a, const Number b)
- {
- return (a + b);
- }
-
-
-
- template
- __device__ Number
- L1Norm::atomic_op(Number *dst, const Number a)
- {
- return atomicAdd(dst, a);
- }
-
-
-
- template
- __device__ Number
- L1Norm::element_wise_op(const Number a)
- {
- return std::fabs(a);
- }
-
-
-
- template
- __device__ Number
- L1Norm::null_value()
- {
- return Number();
- }
-
-
-
- template
- __device__ Number
- LInfty::reduction_op(const Number a, const Number b)
- {
- if (a > b)
- return a;
- else
- return b;
- }
-
-
-
- template
- __device__ Number
- LInfty::atomic_op(Number *dst, const Number a)
- {
- return atomicMax_wrapper(dst, a);
- }
-
-
-
- template
- __device__ Number
- LInfty::element_wise_op(const Number a)
- {
- return std::fabs(a);
- }
-
-
-
- template
- __device__ Number
- LInfty::null_value()
- {
- return Number();
- }
-
-
-
- template
- __device__ void
- reduce(Number *result,
- volatile Number *result_buffer,
- const size_type local_idx,
- const size_type /*global_idx*/,
- const size_type /*N*/)
- {
- for (size_type s = block_size / 2; s > warp_size; s = s >> 1)
- {
- if (local_idx < s)
- result_buffer[local_idx] =
- Operation::reduction_op(result_buffer[local_idx],
- result_buffer[local_idx + s]);
- __syncthreads();
- }
-
- if (local_idx < warp_size)
- {
- for (size_type s = warp_size; s > 0; s = s >> 1)
- {
- result_buffer[local_idx] =
- Operation::reduction_op(result_buffer[local_idx],
- result_buffer[local_idx + s]);
- }
- }
-
- if (local_idx == 0)
- Operation::atomic_op(result, result_buffer[0]);
- }
-
-
-
- template
- __global__ void
- reduction(Number *result, const Number *v, const size_type N)
- {
- __shared__ Number result_buffer[block_size];
-
- const size_type global_idx =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- const size_type local_idx = threadIdx.x;
-
- if (global_idx < N)
- result_buffer[local_idx] = Operation::element_wise_op(v[global_idx]);
- else
- result_buffer[local_idx] = Operation::null_value();
-
- __syncthreads();
-
- reduce(
- result, result_buffer, local_idx, global_idx, N);
- }
-
-
-
- template
- __device__ Number
- DotProduct::binary_op(const Number a, const Number b)
- {
- return a * b;
- }
-
-
-
- template
- __device__ Number
- DotProduct::reduction_op(const Number a, const Number b)
- {
- return a + b;
- }
-
-
-
- template
- __device__ Number
- DotProduct::atomic_op(Number *dst, const Number a)
- {
- return atomicAdd(dst, a);
- }
-
-
-
- template
- __device__ Number
- DotProduct::null_value()
- {
- return Number();
- }
-
-
-
- template
- __global__ void
- double_vector_reduction(Number *result,
- const Number *v1,
- const Number *v2,
- const size_type N)
- {
- __shared__ Number result_buffer[block_size];
-
- const size_type global_idx =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- const size_type local_idx = threadIdx.x;
-
- if (global_idx < N)
- result_buffer[local_idx] =
- Operation::binary_op(v1[global_idx], v2[global_idx]);
- else
- result_buffer[local_idx] = Operation::null_value();
-
- for (unsigned int i = 1; i < chunk_size; ++i)
- {
- const size_type idx = global_idx + i * block_size;
- if (idx < N)
- result_buffer[local_idx] =
- Operation::reduction_op(result_buffer[local_idx],
- Operation::binary_op(v1[idx], v2[idx]));
- }
-
- __syncthreads();
-
- reduce(
- result, result_buffer, local_idx, global_idx, N);
- }
-
-
-
- template
- __global__ void
- vec_add(Number *val, const Number a, const size_type N)
- {
- const size_type idx_base =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- for (unsigned int i = 0; i < chunk_size; ++i)
- {
- const size_type idx = idx_base + i * block_size;
- if (idx < N)
- val[idx] += a;
- }
- }
-
-
-
- template
- __global__ void
- add_aV(Number *val,
- const Number a,
- const Number *V_val,
- const size_type N)
- {
- const size_type idx_base =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- for (unsigned int i = 0; i < chunk_size; ++i)
- {
- const size_type idx = idx_base + i * block_size;
- if (idx < N)
- val[idx] += a * V_val[idx];
- }
- }
-
-
-
- template
- __global__ void
- add_aVbW(Number *val,
- const Number a,
- const Number *V_val,
- const Number b,
- const Number *W_val,
- const size_type N)
- {
- const size_type idx_base =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- for (unsigned int i = 0; i < chunk_size; ++i)
- {
- const size_type idx = idx_base + i * block_size;
- if (idx < N)
- val[idx] += a * V_val[idx] + b * W_val[idx];
- }
- }
-
-
-
- template
- __global__ void
- sadd(const Number s,
- Number *val,
- const Number a,
- const Number *V_val,
- const size_type N)
- {
- const size_type idx_base =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- for (unsigned int i = 0; i < chunk_size; ++i)
- {
- const size_type idx = idx_base + i * block_size;
- if (idx < N)
- val[idx] = s * val[idx] + a * V_val[idx];
- }
- }
-
-
-
- template
- __global__ void
- sadd(const Number s,
- Number *val,
- const Number a,
- const Number *V_val,
- const Number b,
- const Number *W_val,
- const size_type N)
- {
- const size_type idx_base =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- for (unsigned int i = 0; i < chunk_size; ++i)
- {
- const size_type idx = idx_base + i * block_size;
- if (idx < N)
- val[idx] = s * val[idx] + a * V_val[idx] + b * W_val[idx];
- }
- }
-
-
-
- template
- __global__ void
- scale(Number *val, const Number *V_val, const size_type N)
- {
- const size_type idx_base =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- for (unsigned int i = 0; i < chunk_size; ++i)
- {
- const size_type idx = idx_base + i * block_size;
- if (idx < N)
- val[idx] *= V_val[idx];
- }
- }
-
-
-
- template
- __global__ void
- equ(Number *val, const Number a, const Number *V_val, const size_type N)
- {
- const size_type idx_base =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- for (unsigned int i = 0; i < chunk_size; ++i)
- {
- const size_type idx = idx_base + i * block_size;
- if (idx < N)
- val[idx] = a * V_val[idx];
- }
- }
-
-
-
- template
- __global__ void
- equ(Number *val,
- const Number a,
- const Number *V_val,
- const Number b,
- const Number *W_val,
- const size_type N)
- {
- const size_type idx_base =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- for (unsigned int i = 0; i < chunk_size; ++i)
- {
- const size_type idx = idx_base + i * block_size;
- if (idx < N)
- val[idx] = a * V_val[idx] + b * W_val[idx];
- }
- }
-
-
-
- template
- __global__ void
- add_and_dot(Number *res,
- Number *v1,
- const Number *v2,
- const Number *v3,
- const Number a,
- const size_type N)
- {
- __shared__ Number res_buf[block_size];
-
- const unsigned int global_idx =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- const unsigned int local_idx = threadIdx.x;
- if (global_idx < N)
- {
- v1[global_idx] += a * v2[global_idx];
- res_buf[local_idx] =
- v1[global_idx] *
- Number(numbers::NumberTraits::conjugate(v3[global_idx]));
- }
- else
- res_buf[local_idx] = 0.;
-
- for (unsigned int i = 1; i < chunk_size; ++i)
- {
- const unsigned int idx = global_idx + i * block_size;
- if (idx < N)
- {
- v1[idx] += a * v2[idx];
- res_buf[local_idx] += v1[idx] * v3[idx];
- }
- }
-
- __syncthreads();
-
- reduce>(
- res, res_buf, local_idx, global_idx, N);
- }
-
-
-
- template
- __global__ void
- set(Number *val, const Number s, const size_type N)
- {
- const size_type idx_base =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- for (unsigned int i = 0; i < chunk_size; ++i)
- {
- const size_type idx = idx_base + i * block_size;
- if (idx < N)
- val[idx] = s;
- }
- }
-
-
-
- template
- __global__ void
- set_permutated(const IndexType *indices,
- Number *val,
- const Number *v,
- const IndexType N)
- {
- const size_type idx_base =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- for (unsigned int i = 0; i < chunk_size; ++i)
- {
- const size_type idx = idx_base + i * block_size;
- if (idx < N)
- val[indices[idx]] = v[idx];
- }
- }
-
-
-
- template
- __global__ void
- gather(Number *val,
- const IndexType *indices,
- const Number *v,
- const IndexType N)
- {
- const IndexType idx_base =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- for (unsigned int i = 0; i < chunk_size; ++i)
- {
- const IndexType idx = idx_base + i * block_size;
- if (idx < N)
- val[idx] = v[indices[idx]];
- }
- }
-
-
-
- template
- __global__ void
- add_permutated(const size_type *indices,
- Number *val,
- const Number *v,
- const size_type N)
- {
- const size_type idx_base =
- threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
- for (unsigned int i = 0; i < chunk_size; ++i)
- {
- const size_type idx = idx_base + i * block_size;
- if (idx < N)
- val[indices[idx]] += v[idx];
- }
- }
- } // namespace kernel
- } // namespace CUDAWrappers
-} // namespace LinearAlgebra
-
-DEAL_II_NAMESPACE_CLOSE
-
-#endif
-
-#endif
diff --git a/include/deal.II/lac/cuda_precondition.h b/include/deal.II/lac/cuda_precondition.h
deleted file mode 100644
index 213386ade4..0000000000
--- a/include/deal.II/lac/cuda_precondition.h
+++ /dev/null
@@ -1,521 +0,0 @@
-// ------------------------------------------------------------------------
-//
-// SPDX-License-Identifier: LGPL-2.1-or-later
-// Copyright (C) 2018 - 2023 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// Part of the source code is dual licensed under Apache-2.0 WITH
-// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
-// governing the source code and code contributions can be found in
-// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
-//
-// ------------------------------------------------------------------------
-
-#ifndef dealii_cuda_precondition_h
-#define dealii_cuda_precondition_h
-
-#include
-
-#include
-#include
-
-#include
-
-#ifdef DEAL_II_WITH_CUDA
-
-DEAL_II_NAMESPACE_OPEN
-
-// forward-definition
-# ifndef DOXYGEN
-namespace LinearAlgebra
-{
- namespace CUDAWrappers
- {
- template
- class Vector;
- }
-} // namespace LinearAlgebra
-# endif
-
-namespace CUDAWrappers
-{
- // forward definition
- template
- class SparseMatrix;
-
- /**
- * This class implements an incomplete Cholesky factorization (IC)
- * preconditioner for @em symmetric CUDAWrappers::SparseMatrix matrices.
- *
- * The implementation closely follows the one documented in the cuSPARSE
- * documentation
- * (https://docs.nvidia.com/cuda/cusparse/index.html#cusparse-lt-t-gt-csric02).
- *
- * @note Instantiations for this template are provided for @ and
- * @.
- *
- * @ingroup Preconditioners CUDAWrappers
- */
- template
- class PreconditionIC
- {
- public:
- /**
- * Declare the type for container size.
- */
- using size_type = int;
-
- /**
- * Standardized data struct to pipe additional flags to the
- * preconditioner.
- */
- struct AdditionalData
- {
- /**
- * Constructor. cuSPARSE allows to compute and use level information.
- * According to the documentation this might improve performance.
- * It is suggested to try both options.
- */
- AdditionalData(bool use_level_analysis = true);
-
- /**
- * Flag that determines if level information is used when creating and
- * applying the preconditioner. See the documentation for
- * cusparseSolvePolicy_t at
- * https://docs.nvidia.com/cuda/cusparse/index.html#cusparsesolvepolicy_t
- * for more information.
- */
- bool use_level_analysis;
- };
-
- /**
- * Constructor.
- */
- PreconditionIC(const Utilities::CUDA::Handle &handle);
-
- /**
- * The copy constructor is deleted.
- */
- PreconditionIC(const PreconditionIC &) = delete;
-
- /**
- * The copy assignment operator is deleted.
- */
- PreconditionIC &
- operator=(const PreconditionIC &) = delete;
-
- /**
- * Destructor. Free all resources that were initialized in this class.
- */
- ~PreconditionIC();
-
- /**
- * Initialize this object. In particular, the given matrix is copied to be
- * modified in-place. For the underlying sparsity pattern pointers are
- * stored. Specifically, this means
- * that the current object can only be used reliably as long as @p matrix is valid
- * and has not been changed since calling this function.
- *
- * The @p additional_data determines if level information are used.
- */
- void
- initialize(const SparseMatrix &matrix,
- const AdditionalData &additional_data = AdditionalData());
-
- /**
- * Apply the preconditioner.
- */
- void
- vmult(LinearAlgebra::CUDAWrappers::Vector &dst,
- const LinearAlgebra::CUDAWrappers::Vector &src) const;
-
- /**
- * Apply the preconditioner. Since the preconditioner is symmetric, this
- * is the same as vmult().
- */
- void
- Tvmult(LinearAlgebra::CUDAWrappers::Vector &dst,
- const LinearAlgebra::CUDAWrappers::Vector &src) const;
-
- /**
- * Return the dimension of the codomain (or range) space. Note that the
- * matrix is square and has dimension $m \times m$.
- *
- * @note This function should only be called if the preconditioner has been
- * initialized.
- */
- size_type
- m() const;
-
- /**
- * Return the dimension of the codomain (or range) space. Note that the
- * matrix is square and has dimension $n \times n$.
- *
- * @note This function should only be called if the preconditioner has been
- * initialized.
- */
- size_type
- n() const;
-
- private:
- /**
- * cuSPARSE handle used to call cuSPARSE functions.
- */
- cusparseHandle_t cusparse_handle;
-
- /**
- * cuSPARSE description of the sparse matrix $M=LL^T$.
- */
- cusparseMatDescr_t descr_M;
-
- /**
- * cuSPARSE description of the lower triangular matrix $L$.
- */
- cusparseMatDescr_t descr_L;
-
- /**
- * Solve and analysis structure for $M=LL^T$.
- */
- csric02Info_t info_M;
-
- /**
- * Solve and analysis structure for the lower triangular matrix $L$.
- */
- csrsv2Info_t info_L;
-
- /**
- * Solve and analysis structure for the upper triangular matrix $L^T$.
- */
- csrsv2Info_t info_Lt;
-
- /**
- * Pointer to the matrix this object was initialized with.
- */
- SmartPointer> matrix_pointer;
-
- /**
- * Pointer to the values (on the @ref GlossDevice "device") of the computed preconditioning
- * matrix.
- */
- std::unique_ptr P_val_dev;
-
- /**
- * Pointer to the row pointer (on the @ref GlossDevice "device") of the sparse matrix this
- * object was initialized with. Guarded by matrix_pointer.
- */
- const int *P_row_ptr_dev;
-
- /**
- * Pointer to the column indices (on the @ref GlossDevice "device") of the sparse matrix this
- * object was initialized with. Guarded by matrix_pointer.
- */
- const int *P_column_index_dev;
-
- /**
- * Pointer to the value (on the @ref GlossDevice "device") for a temporary (helper) vector
- * used in vmult().
- */
- std::unique_ptr tmp_dev;
-
- /**
- * Pointer to an internal buffer (on the @ref GlossDevice "device") that is used for
- * computing the decomposition.
- */
- std::unique_ptr buffer_dev;
-
- /**
- * Determine if level information should be generated for the lower
- * triangular matrix $L$. This value can be modified through an
- * AdditionalData object.
- */
- cusparseSolvePolicy_t policy_L;
-
- /**
- * Determine if level information should be generated for the upper
- * triangular matrix $L^T$. This value can be modified through an
- * AdditionalData object.
- */
- cusparseSolvePolicy_t policy_Lt;
-
- /**
- * Determine if level information should be generated for $M=LL^T$. This
- * value can be modified through an AdditionalData object.
- */
- cusparseSolvePolicy_t policy_M;
-
- /**
- * The number of rows is the same as for the matrix this object has been
- * initialized with.
- */
- int n_rows;
-
- /**
- * The number of non-zero elements is the same as for the matrix this
- * object has been initialized with.
- */
- int n_nonzero_elements;
- };
-
- /**
- * This class implements an incomplete LU factorization preconditioner for
- * CUDAWrappers::SparseMatrix matrices.
- *
- * The implementation closely follows the one documented in the cuSPARSE
- * documentation
- * (https://docs.nvidia.com/cuda/cusparse/index.html#cusparse-lt-t-gt-csrilu02).
- *
- * @note Instantiations for this template are provided for @ and
- * @.
- *
- * @ingroup Preconditioners CUDAWrappers
- */
- template
- class PreconditionILU
- {
- public:
- /**
- * Declare the type for container size.
- */
- using size_type = int;
-
- /**
- * Standardized data struct to pipe additional flags to the
- * preconditioner.
- */
- struct AdditionalData
- {
- /**
- * Constructor. cuSPARSE allows to compute and use level information.
- * to the documentation this might improve performance.
- * It is suggested to try both options.
- */
- AdditionalData(bool use_level_analysis = true);
-
- /**
- * Flag that determines if level information is used when creating and
- * applying the preconditioner. See the documentation for
- * cusparseSolvePolicy_t at
- * https://docs.nvidia.com/cuda/cusparse/index.html#cusparsesolvepolicy_t
- * for more information.
- */
- bool use_level_analysis;
- };
-
- /**
- * Constructor.
- */
- PreconditionILU(const Utilities::CUDA::Handle &handle);
-
- /**
- * The copy constructor is deleted.
- */
- PreconditionILU(const PreconditionILU &) = delete;
-
- /**
- * The copy assignment operator is deleted.
- */
- PreconditionILU &
- operator=(const PreconditionILU &) = delete;
-
- /**
- * Destructor. Free all resources that were initialized in this class.
- */
- ~PreconditionILU();
-
- /**
- * Initialize this object. In particular, the given matrix is copied to be
- * modified in-place. For the underlying sparsity pattern pointers are
- * stored. Specifically, this means
- * that the current object can only be used reliably as long as @p matrix is valid
- * and has not been changed since calling this function.
- *
- * The @p additional_data determines if level information are used.
- */
- void
- initialize(const SparseMatrix &matrix,
- const AdditionalData &additional_data = AdditionalData());
-
- /**
- * Apply the preconditioner.
- */
- void
- vmult(LinearAlgebra::CUDAWrappers::Vector &dst,
- const LinearAlgebra::CUDAWrappers::Vector &src) const;
-
- /**
- * Apply the transposed preconditioner. Not yet implemented.
- */
- void
- Tvmult(LinearAlgebra::CUDAWrappers::Vector &dst,
- const LinearAlgebra::CUDAWrappers::Vector &src) const;
-
- /**
- * Return the dimension of the codomain (or range) space. Note that the
- * matrix is square and has dimension $m \times m$.
- *
- * @note This function should only be called if the preconditioner has been
- * initialized.
- */
- size_type
- m() const;
-
- /**
- * Return the dimension of the codomain (or range) space. Note that the
- * matrix is square and has dimension $n \times n$.
- *
- * @note This function should only be called if the preconditioner has been
- * initialized.
- */
- size_type
- n() const;
-
- private:
- /**
- * cuSPARSE handle used to call cuSPARSE functions.
- */
- cusparseHandle_t cusparse_handle;
-
- /**
- * cuSPARSE description of the sparse matrix $M=LU$.
- */
- cusparseMatDescr_t descr_M;
-
- /**
- * cuSPARSE description of the lower triangular matrix $L$.
- */
- cusparseMatDescr_t descr_L;
-
- /**
- * cuSPARSE description of the upper triangular matrix $U$.
- */
- cusparseMatDescr_t descr_U;
-
- /**
- * Solve and analysis structure for $M=LU$.
- */
- csrilu02Info_t info_M;
-
- /**
- * Solve and analysis structure for the lower triangular matrix $L$.
- */
- csrsv2Info_t info_L;
-
- /**
- * Solve and analysis structure for the upper triangular matrix $U$.
- */
- csrsv2Info_t info_U;
-
- /**
- * Pointer to the matrix this object was initialized with.
- */
- SmartPointer> matrix_pointer;
-
- /**
- * Pointer to the values (on the @ref GlossDevice "device") of the computed preconditioning
- * matrix.
- */
- std::unique_ptr P_val_dev;
-
- /**
- * Pointer to the row pointer (on the @ref GlossDevice "device") of the sparse matrix this
- * object was initialized with. Guarded by matrix_pointer.
- */
- const int *P_row_ptr_dev;
-
- /**
- * Pointer to the column indices (on the @ref GlossDevice "device") of the sparse matrix this
- * object was initialized with. Guarded by matrix_pointer.
- */
- const int *P_column_index_dev;
-
- /**
- * Pointer to the value (on the @ref GlossDevice "device") for a temporary (helper) vector
- * used in vmult().
- */
- std::unique_ptr tmp_dev;
-
- /**
- * Pointer to an internal buffer (on the @ref GlossDevice "device") that is used for
- * computing the decomposition.
- */
- std::unique_ptr buffer_dev;
-
- /**
- * Determine if level information should be generated for the lower
- * triangular matrix $L$. This value can be modified through an
- * AdditionalData object.
- */
- cusparseSolvePolicy_t policy_L;
-
- /**
- * Determine if level information should be generated for the upper
- * triangular matrix $U$. This value can be modified through an
- * AdditionalData object.
- */
- cusparseSolvePolicy_t policy_U;
-
- /**
- * Determine if level information should be generated for $M=LU$. This
- * value can be modified through an AdditionalData object.
- */
- cusparseSolvePolicy_t policy_M;
-
- /**
- * The number of rows is the same as for the matrix this object has been
- * initialized with.
- */
- int n_rows;
-
- /**
- * The number of non-zero elements is the same as for the matrix this
- * object has been initialized with.
- */
- int n_nonzero_elements;
- };
-
- /*--------------------------- inline functions ----------------------------*/
-
-# ifndef DOXYGEN
- template
- inline typename PreconditionIC::size_type
- PreconditionIC::m() const
- {
- return n_rows;
- }
-
-
-
- template
- inline typename PreconditionIC::size_type
- PreconditionIC::n() const
- {
- return n_rows;
- }
-
-
-
- template
- inline typename PreconditionILU::size_type
- PreconditionILU::m() const
- {
- return n_rows;
- }
-
-
-
- template
- inline typename PreconditionILU::size_type
- PreconditionILU::n() const
- {
- return n_rows;
- }
-# endif // DOXYGEN
-
-} // namespace CUDAWrappers
-
-DEAL_II_NAMESPACE_CLOSE
-
-#endif // DEAL_II_WITH_CUDA
-
-#endif // dealii_cuda_precondition_h
diff --git a/include/deal.II/lac/cuda_solver_direct.h b/include/deal.II/lac/cuda_solver_direct.h
deleted file mode 100644
index e5948e9392..0000000000
--- a/include/deal.II/lac/cuda_solver_direct.h
+++ /dev/null
@@ -1,118 +0,0 @@
-// ------------------------------------------------------------------------
-//
-// SPDX-License-Identifier: LGPL-2.1-or-later
-// Copyright (C) 2018 - 2024 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// Part of the source code is dual licensed under Apache-2.0 WITH
-// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
-// governing the source code and code contributions can be found in
-// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
-//
-// ------------------------------------------------------------------------
-
-#ifndef dealii_cuda_solver_direct_h
-#define dealii_cuda_solver_direct_h
-
-#include
-
-#ifdef DEAL_II_WITH_CUDA
-# include
-
-# include
-# include
-# include
-
-DEAL_II_NAMESPACE_OPEN
-
-namespace CUDAWrappers
-{
- /**
- * Direct solvers. These solvers call cuSOLVER underneath.
- *
- * @note Instantiations for this template are provided for @
- * and @.
- *
- * @ingroup CUDAWrappers
- */
- template
- class SolverDirect
- {
- public:
- /**
- * Struct for additional settings for SolverDirect.
- */
- struct AdditionalData
- {
- /**
- * Set the additional data field to the desired solver.
- */
- explicit AdditionalData(const std::string &solver_type = "LU_dense");
-
- /**
- * Set the solver type. Possibilities are:
- *
- * - "Cholesky" which performs a Cholesky decomposition on the
- * @ref GlossDevice "device"
- *
- * - "LU_dense" which converts the sparse matrix to a dense
- * matrix and uses LU factorization
- * - "LU_host" which uses LU factorization on the host
- *
- */
- std::string solver_type;
- };
-
- /**
- * Constructor. Takes the solver control object and creates the solver.
- */
- SolverDirect(const Utilities::CUDA::Handle &handle,
- SolverControl &cn,
- const AdditionalData &data = AdditionalData());
-
- /**
- * Destructor.
- */
- virtual ~SolverDirect() = default;
-
- /**
- * Solve the linear system Ax=b.
- */
- void
- solve(const SparseMatrix &A,
- LinearAlgebra::CUDAWrappers::Vector &x,
- const LinearAlgebra::CUDAWrappers::Vector &b);
-
- /**
- * Access to object that controls convergence.
- */
- SolverControl &
- control() const;
-
- private:
- /**
- * Handle
- */
- const Utilities::CUDA::Handle &cuda_handle;
-
- /**
- * Reference to the object that controls convergence of the iterative
- * solver. In fact, for these CUDA wrappers, cuSOLVER and cuSPARSE do so
- * themselves, but we copy the data from this object before starting the
- * solution process, and copy the data back into it afterwards.
- */
- SolverControl &solver_control;
-
- /**
- * Store a copy of the flags for this particular solver.
- */
- const AdditionalData additional_data;
- };
-} // namespace CUDAWrappers
-
-DEAL_II_NAMESPACE_CLOSE
-
-#endif
-
-#endif
diff --git a/include/deal.II/lac/cuda_sparse_matrix.h b/include/deal.II/lac/cuda_sparse_matrix.h
deleted file mode 100644
index bda2f88066..0000000000
--- a/include/deal.II/lac/cuda_sparse_matrix.h
+++ /dev/null
@@ -1,531 +0,0 @@
-// ------------------------------------------------------------------------
-//
-// SPDX-License-Identifier: LGPL-2.1-or-later
-// Copyright (C) 2018 - 2023 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// Part of the source code is dual licensed under Apache-2.0 WITH
-// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
-// governing the source code and code contributions can be found in
-// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
-//
-// ------------------------------------------------------------------------
-
-#ifndef dealii_cuda_sparse_matrix_h
-#define dealii_cuda_sparse_matrix_h
-
-#include
-
-#include
-
-#include
-
-#ifdef DEAL_II_WITH_CUDA
-# include
-
-# include
-# include
-
-# include
-
-DEAL_II_NAMESPACE_OPEN
-
-namespace CUDAWrappers
-{
- /**
- * This class is a wrapper around cuSPARSE csr sparse matrix. Unlike deal.II's
- * own SparseMatrix all elements within each row are stored in increasing
- * column index order.
- *
- * @note Instantiations for this template are provided for @ and
- * @.
- *
- * @ingroup Matrix1
- */
- template
- class SparseMatrix : public virtual Subscriptor
- {
- public:
- /**
- * Declare type for container size.
- */
- using size_type = int;
-
- /**
- * Type of the matrix entries.
- */
- using value_type = Number;
-
- /**
- * Declare a type that holds real-valued numbers with the same precision
- * as the template argument to this class.
- */
- using real_type = Number;
-
- /**
- * @name Constructors and initialization
- */
- /** @{ */
- /**
- * Constructor. Initialize the matrix to be empty, without any structure,
- * i.e., the matrix is not usable at all. This constructor is therefore
- * only useful for matrices which are members of a class.
- *
- * You have to initialize the matrix before usage with reinit.
- */
- SparseMatrix();
-
- /**
- * Constructor. Takes a Utilities::CUDA::Handle and a sparse matrix on the
- * host. The sparse matrix on the host is copied on the @ref GlossDevice "device" and the
- * elements are reordered according to the format supported by cuSPARSE.
- */
- SparseMatrix(Utilities::CUDA::Handle &handle,
- const ::dealii::SparseMatrix &sparse_matrix_host);
-
- /**
- * Move constructor. Create a new SparseMatrix by stealing the internal
- * data.
- */
- SparseMatrix(CUDAWrappers::SparseMatrix &&);
-
- /**
- * Copy constructor is deleted.
- */
- SparseMatrix(const CUDAWrappers::SparseMatrix &) = delete;
-
- /**
- * Destructor. Free all memory.
- */
- ~SparseMatrix();
-
- /**
- * Move assignment operator.
- */
- SparseMatrix &
- operator=(CUDAWrappers::SparseMatrix &&);
-
- /**
- * Copy assignment is deleted.
- */
- SparseMatrix &
- operator=(const CUDAWrappers::SparseMatrix &) = delete;
-
- /**
- * Reinitialize the sparse matrix. The sparse matrix on the host is copied
- * to the @ref GlossDevice "device" and the elementes are reordered according to the format
- * supported by cuSPARSE.
- */
- void
- reinit(Utilities::CUDA::Handle &handle,
- const ::dealii::SparseMatrix &sparse_matrix_host);
- /** @} */
-
- /**
- * @name Information on the matrix
- */
- /** @{ */
- /**
- * Return the dimension of the codomain (or range) space. Note that the
- * matrix is of dimension $m \times n$.
- */
- size_type
- m() const;
-
- /**
- * Return the dimension of the domain space. Note that the matrix is of
- * dimension $m \times n$.
- */
- size_type
- n() const;
-
- /**
- * Return the number of nonzero elements of this matrix. Actually, it
- * returns the number of entries in the sparsity pattern; if any of the
- * entries should happen to be zero, it is counted anyway.
- */
- std::size_t
- n_nonzero_elements() const;
-
- /**
- * Print the matrix to the given stream, using the format (row,column)
- * value, i.e. one nonzero entry of the matrix per line. If
- * across is true, print all entries on a single line, using the
- * format row,column:value.
- *
- * If the argument diagonal_first is true, diagonal elements of
- * quadratic matrices are printed first in their row. If it is false,
- * the elements in a row are written in ascending column order.
- */
- template
- void
- print(StreamType &out,
- const bool across = false,
- const bool diagonal_first = true) const;
-
- /**
- * Print the matrix in the usual format, i.e., as a matrix and not as a list
- * of nonzero elements. For better readability, elements not in the matrix
- * are displayed as empty space, while matrix elements which are explicitly
- * set to zero are displayed as such.
- *
- * The parameters allow for a flexible setting of the output format:
- * @p precision and @p scientific are used to determine the number format,
- * where scientific = false
means fixed point notation. A zero
- * entry for @p width makes the function compute a width, but it may be
- * changed to a positive value, if output is crude.
- *
- * Additionally, a character for an empty value may be specified in
- * @p zero_string, and a character to separate row entries can be set in
- * @p separator.
- *
- * Finally, the whole matrix can be multiplied with a common @p denominator
- * to produce more readable output, even integers.
- *
- * @attention This function may produce @em large amounts of output if
- * applied to a large matrix!
- */
- void
- print_formatted(std::ostream &out,
- const unsigned int precision = 3,
- const bool scientific = true,
- const unsigned int width = 0,
- const char *zero_string = " ",
- const double denominator = 1.,
- const char *separator = " ") const;
- /** @} */
-
- /**
- * @name Modifying entries
- */
- /** @{ */
- /**
- * Multiply the entire matrix by a fixed factor.
- */
- SparseMatrix &
- operator*=(const Number factor);
-
- /**
- * Divide the entire matrix by a fixed factor.
- */
- SparseMatrix &
- operator/=(const Number factor);
- /** @} */
-
- /**
- * @name Multiplications
- */
- /** @{ */
- /**
- * Matrix-vector multiplication: let $dst = M \cdot src$ with $M$
- * being this matrix.
- */
- void
- vmult(LinearAlgebra::CUDAWrappers::Vector &dst,
- const LinearAlgebra::CUDAWrappers::Vector &src) const;
-
- /**
- * Matrix-vector multiplication: let $dst = M^T \cdot src$ with
- * $M$ being this matrix. This function does the same as vmult() but
- * takes this transposed matrix.
- */
- void
- Tvmult(LinearAlgebra::CUDAWrappers::Vector &dst,
- const LinearAlgebra::CUDAWrappers::Vector &src) const;
-
- /**
- * Adding matrix-vector multiplication. Add $M \cdot src$ on $dst$
- * with $M$ being this matrix.
- */
- void
- vmult_add(LinearAlgebra::CUDAWrappers::Vector &dst,
- const LinearAlgebra::CUDAWrappers::Vector &src) const;
-
- /**
- * Adding matrix-vector multiplication. Add $M^T \cdot src$ to
- * $dst$ with $M$ being this matrix. This function foes the same
- * as vmult_add() but takes the transposed matrix.
- */
- void
- Tvmult_add(LinearAlgebra::CUDAWrappers::Vector &dst,
- const LinearAlgebra::CUDAWrappers::Vector &src) const;
-
- /**
- * Return the square of the norm of the vector $v$ with respect to the
- * norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful,
- * e.g., in the finite context, where the $L_2$ norm of a function equals
- * the matrix norm with respect to the @ref GlossMassMatrix "mass matrix" of the vector
- * representing the nodal values of the finite element function.
- *
- * Obviously, the matrix needs to be quadratic for this operation.
- */
- Number
- matrix_norm_square(
- const LinearAlgebra::CUDAWrappers::Vector &v) const;
-
- /**
- * Compute the matrix scalar product $\left(u,Mv\right)$.
- */
- Number
- matrix_scalar_product(
- const LinearAlgebra::CUDAWrappers::Vector &u,
- const LinearAlgebra::CUDAWrappers::Vector &v) const;
-
- /**
- * Compute the residual of an equation $M \cdot x=b$, where the residual is
- * defined to be $r=b-M \cdot x$. Write the residual into $dst$. The
- * $l_2$ norm of the residual vector is returned.
- *
- * Source $x$ and destination $dst$ must not be the same vector.
- */
- Number
- residual(LinearAlgebra::CUDAWrappers::Vector &dst,
- const LinearAlgebra::CUDAWrappers::Vector &x,
- const LinearAlgebra::CUDAWrappers::Vector &b) const;
- /** @} */
-
- /**
- * @name Matrix norms
- */
- /** @{ */
- /**
- * Return the $l_1$-norm of the matrix, that is $|M|_1=\max_{\mathrm{all\
- * columns\ }j}\sum_{\mathrm{all\ rows\ }i} |M_{ij}|$, (max. sum of
- * columns). This is the natural matrix norm that is compatible to the
- * $l_1$-norm for vectors, i.e., $|Mv|_1\leq |M|_1 |v|_1$.
- */
- Number
- l1_norm() const;
-
- /**
- * Return the $l_\infty$-norm of the matrix, that is
- * $|M|_\infty=\max_{\mathrm{all\ rows\ }i}\sum_{\mathrm{all\ columns\ }j}
- * |M_{ij}|$, (max. sum of rows). This is the natural norm that is
- * compatible to the $l_\infty$-norm of vectors, i.e., $|Mv|_\infty \leq
- * |M|_\infty |v|_\infty$.
- */
- Number
- linfty_norm() const;
-
- /**
- * Return the frobenius norm of the matrix, i.e., the square root of the
- * sum of squares of all entries in the matrix.
- */
- Number
- frobenius_norm() const;
- /** @} */
-
- /**
- * @name Access to underlying CUDA data
- */
- /** @{ */
- /**
- * Return a tuple containing the pointer to the values of matrix, the
- * pointer to the columns indices, the pointer to the rows pointer,
- * the cuSPARSE matrix description, and the cuSPARSE SP matrix description.
- */
- std::tuple
- get_cusparse_matrix() const;
- /** @} */
-
- private:
- /**
- * cuSPARSE handle used to call cuSPARSE functions.
- */
- cusparseHandle_t cusparse_handle;
-
- /**
- * Number of non-zero elements in the sparse matrix.
- */
- int nnz;
-
- /**
- * Number of rows of the sparse matrix.
- */
- int n_rows;
-
- /**
- * Number of columns of the sparse matrix.
- */
- int n_cols;
-
- /**
- * Pointer to the values (on the @ref GlossDevice "device") of the sparse matrix.
- */
- std::unique_ptr val_dev;
-
- /**
- * Pointer to the column indices (on the @ref GlossDevice "device") of the sparse matrix.
- */
- std::unique_ptr column_index_dev;
-
- /**
- * Pointer to the row pointer (on the @ref GlossDevice "device") of the sparse matrix.
- */
- std::unique_ptr row_ptr_dev;
-
- /**
- * cuSPARSE description of the matrix.
- */
- cusparseMatDescr_t descr;
-
- /**
- * cuSPARSE description of the sparse matrix.
- */
- cusparseSpMatDescr_t sp_descr;
- };
-
-
-
- template
- inline typename SparseMatrix::size_type
- SparseMatrix::m() const
- {
- return n_rows;
- }
-
-
-
- template
- inline typename SparseMatrix::size_type
- SparseMatrix::n() const
- {
- return n_cols;
- }
-
-
-
- template