From 2020ca43c1a825f0cf9044ebda52770e0d6de59b Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Fri, 31 Mar 2023 22:51:41 -0400 Subject: [PATCH] Add glossary entry --- doc/doxygen/headers/glossary.h | 10 +++ include/deal.II/base/cuda.h | 16 ++-- include/deal.II/base/memory_space_data.h | 6 +- include/deal.II/base/numbers.h | 4 +- include/deal.II/base/partitioner.h | 4 +- include/deal.II/base/point.h | 36 ++++----- include/deal.II/base/tensor.h | 80 +++++++++---------- include/deal.II/lac/cuda_precondition.h | 20 ++--- include/deal.II/lac/cuda_solver_direct.h | 2 +- include/deal.II/lac/cuda_sparse_matrix.h | 10 +-- include/deal.II/lac/la_parallel_vector.h | 18 ++--- include/deal.II/lac/trilinos_tpetra_vector.h | 2 +- .../deal.II/matrix_free/cuda_matrix_free.h | 2 +- .../matrix_free/cuda_matrix_free.templates.h | 2 +- 14 files changed, 111 insertions(+), 101 deletions(-) diff --git a/doc/doxygen/headers/glossary.h b/doc/doxygen/headers/glossary.h index 3ce2463379..066cb3907b 100644 --- a/doc/doxygen/headers/glossary.h +++ b/doc/doxygen/headers/glossary.h @@ -728,6 +728,16 @@ * * * + *
@anchor GlossDevice Device
+ * + *
We commonly refer to GPUs as "devices" in deal.II. The context is + * always related to Kokkos or CUDA that motivated using this term. + * Occasionally, we also call data corresponding to MemorySpace::Default "device data" + * (even though it is allocated in CPU memory if Kokkos was configured without + * a GPU backend) to distinguish between MemorySpace::Default and MemorySpace::Host. + *
+ * + * *
@anchor GlossDimension Dimensions `dim` and `spacedim`
* *
Many classes and functions in deal.II have two template parameters, diff --git a/include/deal.II/base/cuda.h b/include/deal.II/base/cuda.h index d882804dd4..b67875510e 100644 --- a/include/deal.II/base/cuda.h +++ b/include/deal.II/base/cuda.h @@ -78,7 +78,7 @@ namespace Utilities }; /** - * Allocate @p n_elements on the device. + * Allocate @p n_elements on the @ref GlossDevice "device". */ template inline void @@ -90,7 +90,7 @@ namespace Utilities } /** - * Free memory on the device. + * Free memory on the @ref GlossDevice "device". */ template inline void @@ -102,7 +102,7 @@ namespace Utilities } /** - * Allocator to be used for `std::unique_ptr` pointing to device memory. + * Allocator to be used for `std::unique_ptr` pointing to @ref GlossDevice "device" memory. */ template Number * @@ -114,7 +114,7 @@ namespace Utilities } /** - * Deleter to be used for `std::unique_ptr` pointing to device memory. + * Deleter to be used for `std::unique_ptr` pointing to @ref GlossDevice "device" memory. */ template void @@ -125,7 +125,7 @@ namespace Utilities } /** - * Copy the device ArrayView @p in to the host ArrayView @p out. + * Copy the @ref GlossDevice "device" ArrayView @p in to the host ArrayView @p out. */ template inline void @@ -141,7 +141,7 @@ namespace Utilities } /** - * Copy the host ArrayView @p in to the device ArrayView @p out. + * Copy the host ArrayView @p in to the @ref GlossDevice "device" ArrayView @p out. */ template inline void @@ -169,8 +169,8 @@ namespace Utilities } /** - * Copy the elements in @p vector_host to the device in @p pointer_dev. The - * memory needs to be allocate on the device before this function is called. + * Copy the elements in @p vector_host to the @ref GlossDevice "device" in @p pointer_dev. The + * memory needs to be allocate on the @ref GlossDevice "device" before this function is called. */ template inline void diff --git a/include/deal.II/base/memory_space_data.h b/include/deal.II/base/memory_space_data.h index 827500bdcb..cf5a0a3140 100644 --- a/include/deal.II/base/memory_space_data.h +++ b/include/deal.II/base/memory_space_data.h @@ -35,7 +35,7 @@ DEAL_II_NAMESPACE_OPEN namespace MemorySpace { /** - * Structure which stores data on the host or the device depending on the + * Structure which stores data on the host or the @ref GlossDevice "device" depending on the * template parameter @p MemorySpace. Valid choices are MemorySpace::Host, * MemorySpace::Default, and MemorySpace::CUDA (if CUDA was enabled in * deal.II). The data is copied into the structure which then owns the data @@ -48,7 +48,7 @@ namespace MemorySpace /** * Copy the class member values to @p begin. - * If the data is on the device it is moved to the host. + * If the data is on the @ref GlossDevice "device" it is moved to the host. */ void copy_to(T *begin, const std::size_t n_elements); @@ -67,7 +67,7 @@ namespace MemorySpace Kokkos::View values_host_buffer; /** - * Kokkos View owning the data on the device (unless @p values_sm_ptr is used). + * Kokkos View owning the data on the @ref GlossDevice "device" (unless @p values_sm_ptr is used). */ Kokkos::View values; diff --git a/include/deal.II/base/numbers.h b/include/deal.II/base/numbers.h index 4dcbaa426c..b4b1d6214f 100644 --- a/include/deal.II/base/numbers.h +++ b/include/deal.II/base/numbers.h @@ -450,7 +450,7 @@ namespace numbers * template is selected if number is not a complex data type, this * function simply returns the given number. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ static constexpr DEAL_II_HOST_DEVICE const number & conjugate(const number &x); @@ -460,7 +460,7 @@ namespace numbers * general template is chosen for types not equal to std::complex, this * function simply returns the square of the given number. * - * @note If the template type can be used in device code, the same holds true + * @note If the template type can be used in @ref GlossDevice "device" code, the same holds true * for this function. */ static constexpr DEAL_II_HOST_DEVICE real_type diff --git a/include/deal.II/base/partitioner.h b/include/deal.II/base/partitioner.h index a2faad97a8..479e29134c 100644 --- a/include/deal.II/base/partitioner.h +++ b/include/deal.II/base/partitioner.h @@ -677,7 +677,7 @@ namespace Utilities private: /** * Initialize import_indices_plain_dev from import_indices_data. This - * function is only used when using device-aware MPI. + * function is only used when using @ref GlossDevice "device"-aware MPI. */ void initialize_import_indices_plain_dev() const; @@ -729,7 +729,7 @@ namespace Utilities * The set of (local) indices that we are importing during compress(), * i.e., others' ghosts that belong to the local range. The data stored is * the same as in import_indices_data but the data is expanded in plain - * arrays. This variable is only used when using device-aware MPI. + * arrays. This variable is only used when using @ref GlossDevice "device"-aware MPI. */ // The variable is mutable to enable lazy initialization in // export_to_ghosted_array_start(). diff --git a/include/deal.II/base/point.h b/include/deal.II/base/point.h index 7e996b7294..c2d94e8eb6 100644 --- a/include/deal.II/base/point.h +++ b/include/deal.II/base/point.h @@ -113,7 +113,7 @@ public: * Standard constructor. Creates an object that corresponds to the origin, * i.e., all coordinates are set to zero. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ DEAL_II_HOST_DEVICE Point(); @@ -130,7 +130,7 @@ public: * dim!=1 as it would leave some components of the point * coordinates uninitialized. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ explicit DEAL_II_HOST_DEVICE Point(const Number x); @@ -142,7 +142,7 @@ public: * coordinates uninitialized (if dim>2) or would not use some arguments (if * dim<2). * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ DEAL_II_HOST_DEVICE Point(const Number x, const Number y); @@ -154,7 +154,7 @@ public: * point coordinates uninitialized (if dim>3) or would not use some * arguments (if dim<3). * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ DEAL_II_HOST_DEVICE Point(const Number x, const Number y, const Number z); @@ -172,7 +172,7 @@ public: * that is zero in all coordinates except for a single 1 in the ith * coordinate. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ static DEAL_II_HOST_DEVICE Point unit_vector(const unsigned int i); @@ -180,7 +180,7 @@ public: /** * Read access to the indexth coordinate. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ DEAL_II_HOST_DEVICE Number operator()(const unsigned int index) const; @@ -188,7 +188,7 @@ public: /** * Read and write access to the indexth coordinate. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ DEAL_II_HOST_DEVICE Number & operator()(const unsigned int index); @@ -210,7 +210,7 @@ public: /** * Add an offset given as Tensor<1,dim,Number> to a point. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ DEAL_II_HOST_DEVICE Point operator+(const Tensor<1, dim, Number> &) const; @@ -222,7 +222,7 @@ public: * origin) and, consequently, the result is returned as a Tensor@<1,dim@> * rather than as a Point@. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ DEAL_II_HOST_DEVICE Tensor<1, dim, Number> operator-(const Point &) const; @@ -233,7 +233,7 @@ public: * documentation of this class, the result is then naturally returned as a * Point@ object rather than as a Tensor@<1,dim@>. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ DEAL_II_HOST_DEVICE Point operator-(const Tensor<1, dim, Number> &) const; @@ -241,7 +241,7 @@ public: /** * The opposite vector. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ DEAL_II_HOST_DEVICE Point operator-() const; @@ -258,7 +258,7 @@ public: /** * Multiply the current point by a factor. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. * * @relatesalso EnableIfScalar */ @@ -272,7 +272,7 @@ public: /** * Divide the current point by a factor. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template DEAL_II_HOST_DEVICE Point< @@ -284,7 +284,7 @@ public: /** * Return the scalar product of the vectors representing two points. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ DEAL_II_HOST_DEVICE Number operator*(const Tensor<1, dim, Number> &p) const; @@ -299,7 +299,7 @@ public: * Tensor::norm_square() which returns the square of the * Frobenius norm. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ DEAL_II_HOST_DEVICE typename numbers::NumberTraits::real_type square() const; @@ -309,7 +309,7 @@ public: * p, i.e. the $l_2$ norm of the difference between the * vectors representing the two points. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ DEAL_II_HOST_DEVICE typename numbers::NumberTraits::real_type distance(const Point &p) const; @@ -318,7 +318,7 @@ public: * Return the squared Euclidean distance of this point to the point * p. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ DEAL_II_HOST_DEVICE typename numbers::NumberTraits::real_type distance_square(const Point &p) const; @@ -648,7 +648,7 @@ inline void Point::serialize(Archive &ar, const unsigned int) /** * Global operator scaling a point vector by a scalar. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. * * @relates Point */ diff --git a/include/deal.II/base/tensor.h b/include/deal.II/base/tensor.h index b09d19f5cc..ed5a8ebbb3 100644 --- a/include/deal.II/base/tensor.h +++ b/include/deal.II/base/tensor.h @@ -141,7 +141,7 @@ public: /** * Constructor. Set to zero. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ constexpr DEAL_II_HOST_DEVICE Tensor(); @@ -151,7 +151,7 @@ public: * obviously requires that the @p OtherNumber type is convertible to @p * Number. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template constexpr DEAL_II_HOST_DEVICE @@ -160,7 +160,7 @@ public: /** * Constructor, where the data is copied from a C-style array. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template constexpr DEAL_II_HOST_DEVICE @@ -236,7 +236,7 @@ public: * This is the non-const conversion operator that returns a writable * reference. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ constexpr DEAL_II_HOST_DEVICE operator Number &(); @@ -247,7 +247,7 @@ public: * * This is the const conversion operator that returns a read-only reference. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ constexpr DEAL_II_HOST_DEVICE operator const Number &() const; @@ -256,7 +256,7 @@ public: * obviously requires that the @p OtherNumber type is convertible to @p * Number. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template constexpr DEAL_II_HOST_DEVICE Tensor & @@ -269,7 +269,7 @@ public: * copy constructor for Sacado::Rad::ADvar types automatically. * See https://github.com/dealii/dealii/pull/5865. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ constexpr DEAL_II_HOST_DEVICE Tensor & operator=(const Tensor<0, dim, Number> &rhs); @@ -287,7 +287,7 @@ public: * This operator assigns a scalar to a tensor. This obviously requires * that the @p OtherNumber type is convertible to @p Number. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template constexpr DEAL_II_HOST_DEVICE Tensor & @@ -319,7 +319,7 @@ public: /** * Add another scalar. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template constexpr DEAL_II_HOST_DEVICE Tensor & @@ -328,7 +328,7 @@ public: /** * Subtract another scalar. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template constexpr DEAL_II_HOST_DEVICE Tensor & @@ -337,7 +337,7 @@ public: /** * Multiply the scalar with a factor. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template constexpr DEAL_II_HOST_DEVICE Tensor & @@ -346,7 +346,7 @@ public: /** * Divide the scalar by factor. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template constexpr DEAL_II_HOST_DEVICE Tensor & @@ -355,7 +355,7 @@ public: /** * Tensor with inverted entries. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ constexpr DEAL_II_HOST_DEVICE Tensor operator-() const; @@ -387,7 +387,7 @@ public: * Return the square of the Frobenius-norm of a tensor, i.e. the sum of the * absolute squares of all entries. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ constexpr DEAL_II_HOST_DEVICE real_type norm_square() const; @@ -558,7 +558,7 @@ public: /** * Constructor. Initialize all entries to zero. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ constexpr DEAL_II_HOST_DEVICE_ALWAYS_INLINE Tensor(); @@ -566,7 +566,7 @@ public: /** * A constructor where the data is copied from a C-style array. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ constexpr DEAL_II_HOST_DEVICE explicit Tensor(const array_type &initializer); @@ -581,7 +581,7 @@ public: * either equal to @p Number, or is convertible to @p Number. * Number. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template constexpr DEAL_II_HOST_DEVICE explicit Tensor( @@ -592,7 +592,7 @@ public: * obviously requires that the @p OtherNumber type is convertible to @p * Number. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template constexpr DEAL_II_HOST_DEVICE @@ -627,7 +627,7 @@ public: /** * Read-Write access operator. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ constexpr DEAL_II_HOST_DEVICE value_type & operator[](const unsigned int i); @@ -635,7 +635,7 @@ public: /** * Read-only access operator. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ constexpr DEAL_II_HOST_DEVICE const value_type & operator[](const unsigned int i) const; @@ -681,7 +681,7 @@ public: * This obviously requires that the @p OtherNumber type is convertible to @p * Number. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template constexpr DEAL_II_HOST_DEVICE Tensor & @@ -735,7 +735,7 @@ public: /** * Add another tensor. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template constexpr DEAL_II_HOST_DEVICE Tensor & @@ -744,7 +744,7 @@ public: /** * Subtract another tensor. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template constexpr DEAL_II_HOST_DEVICE Tensor & @@ -754,7 +754,7 @@ public: * Scale the tensor by factor, i.e. multiply all components by * factor. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template constexpr DEAL_II_HOST_DEVICE Tensor & @@ -763,7 +763,7 @@ public: /** * Scale the vector by 1/factor. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template constexpr DEAL_II_HOST_DEVICE Tensor & @@ -772,7 +772,7 @@ public: /** * Unary minus operator. Negate all entries of a tensor. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ constexpr DEAL_II_HOST_DEVICE Tensor operator-() const; @@ -797,7 +797,7 @@ public: * the absolute squares of all entries. For the present case of rank-1 * tensors, this equals the usual l2 norm of the vector. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ DEAL_II_HOST_DEVICE typename numbers::NumberTraits::real_type @@ -807,7 +807,7 @@ public: * Return the square of the Frobenius-norm of a tensor, i.e. the sum of the * absolute squares of all entries. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ constexpr DEAL_II_HOST_DEVICE typename numbers::NumberTraits::real_type @@ -897,7 +897,7 @@ private: * This constructor is for internal use. It provides a way * to create constexpr constructors for Tensor * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. */ template constexpr DEAL_II_HOST_DEVICE @@ -1961,7 +1961,7 @@ operator<<(std::ostream &out, const Tensor<0, dim, Number> &p) * This function unwraps the underlying @p Number stored in the Tensor and * multiplies @p object with it. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. * * @relatesalso Tensor */ @@ -1981,7 +1981,7 @@ constexpr DEAL_II_HOST_DEVICE inline DEAL_II_ALWAYS_INLINE * This function unwraps the underlying @p Number stored in the Tensor and * multiplies @p object with it. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. * * @relatesalso Tensor */ @@ -2001,7 +2001,7 @@ constexpr DEAL_II_HOST_DEVICE inline DEAL_II_ALWAYS_INLINE * OtherNumber that are stored within the Tensor and multiplies them. It * returns an unwrapped number of product type. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. * * @relatesalso Tensor */ @@ -2019,7 +2019,7 @@ DEAL_II_HOST_DEVICE constexpr DEAL_II_ALWAYS_INLINE /** * Division of a tensor of rank 0 by a scalar number. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. * * @relatesalso Tensor */ @@ -2038,7 +2038,7 @@ DEAL_II_HOST_DEVICE constexpr DEAL_II_ALWAYS_INLINE /** * Add two tensors of rank 0. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. * * @relatesalso Tensor */ @@ -2055,7 +2055,7 @@ constexpr DEAL_II_HOST_DEVICE_ALWAYS_INLINE /** * Subtract two tensors of rank 0. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. * * @relatesalso Tensor */ @@ -2077,7 +2077,7 @@ constexpr DEAL_II_HOST_DEVICE_ALWAYS_INLINE * number, a complex floating point number, etc.) is allowed, see the * documentation of EnableIfScalar for details. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. * * @relatesalso Tensor */ @@ -2105,7 +2105,7 @@ constexpr DEAL_II_HOST_DEVICE inline DEAL_II_ALWAYS_INLINE * number, a complex floating point number, etc.) is allowed, see the * documentation of EnableIfScalar for details. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. * * @relatesalso Tensor */ @@ -2176,7 +2176,7 @@ namespace internal * discussion on operator*() above for more information about template * arguments and the return type. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. * * @relatesalso Tensor */ @@ -2197,7 +2197,7 @@ constexpr DEAL_II_HOST_DEVICE inline DEAL_II_ALWAYS_INLINE * * @tparam rank The rank of both tensors. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. * * @relatesalso Tensor */ @@ -2221,7 +2221,7 @@ constexpr DEAL_II_HOST_DEVICE inline DEAL_II_ALWAYS_INLINE * * @tparam rank The rank of both tensors. * - * @note This function can also be used in device code. + * @note This function can also be used in @ref GlossDevice "device" code. * * @relatesalso Tensor */ diff --git a/include/deal.II/lac/cuda_precondition.h b/include/deal.II/lac/cuda_precondition.h index bba3c077d8..fc3f06d05b 100644 --- a/include/deal.II/lac/cuda_precondition.h +++ b/include/deal.II/lac/cuda_precondition.h @@ -196,31 +196,31 @@ namespace CUDAWrappers SmartPointer> matrix_pointer; /** - * Pointer to the values (on the device) of the computed preconditioning + * Pointer to the values (on the @ref GlossDevice "device") of the computed preconditioning * matrix. */ std::unique_ptr P_val_dev; /** - * Pointer to the row pointer (on the device) of the sparse matrix this + * Pointer to the row pointer (on the @ref GlossDevice "device") of the sparse matrix this * object was initialized with. Guarded by matrix_pointer. */ const int *P_row_ptr_dev; /** - * Pointer to the column indices (on the device) of the sparse matrix this + * Pointer to the column indices (on the @ref GlossDevice "device") of the sparse matrix this * object was initialized with. Guarded by matrix_pointer. */ const int *P_column_index_dev; /** - * Pointer to the value (on the device) for a temporary (helper) vector + * Pointer to the value (on the @ref GlossDevice "device") for a temporary (helper) vector * used in vmult(). */ std::unique_ptr tmp_dev; /** - * Pointer to an internal buffer (on the device) that is used for + * Pointer to an internal buffer (on the @ref GlossDevice "device") that is used for * computing the decomposition. */ std::unique_ptr buffer_dev; @@ -413,31 +413,31 @@ namespace CUDAWrappers SmartPointer> matrix_pointer; /** - * Pointer to the values (on the device) of the computed preconditioning + * Pointer to the values (on the @ref GlossDevice "device") of the computed preconditioning * matrix. */ std::unique_ptr P_val_dev; /** - * Pointer to the row pointer (on the device) of the sparse matrix this + * Pointer to the row pointer (on the @ref GlossDevice "device") of the sparse matrix this * object was initialized with. Guarded by matrix_pointer. */ const int *P_row_ptr_dev; /** - * Pointer to the column indices (on the device) of the sparse matrix this + * Pointer to the column indices (on the @ref GlossDevice "device") of the sparse matrix this * object was initialized with. Guarded by matrix_pointer. */ const int *P_column_index_dev; /** - * Pointer to the value (on the device) for a temporary (helper) vector + * Pointer to the value (on the @ref GlossDevice "device") for a temporary (helper) vector * used in vmult(). */ std::unique_ptr tmp_dev; /** - * Pointer to an internal buffer (on the device) that is used for + * Pointer to an internal buffer (on the @ref GlossDevice "device") that is used for * computing the decomposition. */ std::unique_ptr buffer_dev; diff --git a/include/deal.II/lac/cuda_solver_direct.h b/include/deal.II/lac/cuda_solver_direct.h index dff21c90af..f2b3817fd4 100644 --- a/include/deal.II/lac/cuda_solver_direct.h +++ b/include/deal.II/lac/cuda_solver_direct.h @@ -54,7 +54,7 @@ namespace CUDAWrappers /** * Set the solver type. Possibilities are: *
    - *
  • "Cholesky" which performs a Cholesky decomposition on the device + *
  • "Cholesky" which performs a Cholesky decomposition on the @ref GlossDevice "device" *
  • *
  • "LU_dense" which converts the sparse matrix to a dense * matrix and uses LU factorization
  • diff --git a/include/deal.II/lac/cuda_sparse_matrix.h b/include/deal.II/lac/cuda_sparse_matrix.h index 266afa2608..b1d2ed03e1 100644 --- a/include/deal.II/lac/cuda_sparse_matrix.h +++ b/include/deal.II/lac/cuda_sparse_matrix.h @@ -79,7 +79,7 @@ namespace CUDAWrappers /** * Constructor. Takes a Utilities::CUDA::Handle and a sparse matrix on the - * host. The sparse matrix on the host is copied on the device and the + * host. The sparse matrix on the host is copied on the @ref GlossDevice "device" and the * elements are reordered according to the format supported by cuSPARSE. */ SparseMatrix(Utilities::CUDA::Handle & handle, @@ -115,7 +115,7 @@ namespace CUDAWrappers /** * Reinitialize the sparse matrix. The sparse matrix on the host is copied - * to the device and the elementes are reordered according to the format + * to the @ref GlossDevice "device" and the elementes are reordered according to the format * supported by cuSPARSE. */ void @@ -349,17 +349,17 @@ namespace CUDAWrappers int n_cols; /** - * Pointer to the values (on the device) of the sparse matrix. + * Pointer to the values (on the @ref GlossDevice "device") of the sparse matrix. */ std::unique_ptr val_dev; /** - * Pointer to the column indices (on the device) of the sparse matrix. + * Pointer to the column indices (on the @ref GlossDevice "device") of the sparse matrix. */ std::unique_ptr column_index_dev; /** - * Pointer to the row pointer (on the device) of the sparse matrix. + * Pointer to the row pointer (on the @ref GlossDevice "device") of the sparse matrix. */ std::unique_ptr row_ptr_dev; diff --git a/include/deal.II/lac/la_parallel_vector.h b/include/deal.II/lac/la_parallel_vector.h index 194d8af58d..cb2d74d36c 100644 --- a/include/deal.II/lac/la_parallel_vector.h +++ b/include/deal.II/lac/la_parallel_vector.h @@ -220,8 +220,8 @@ namespace LinearAlgebra * necessary. Since an MPI communication may be performed, import needs to * be called on all the processors. * - * @note By default, the GPU device id is chosen in a round-robin fashion - * according to the local MPI rank id. To choose a different device, Kokkos + * @note By default, the GPU @ref GlossDevice "device" id is chosen in a round-robin fashion + * according to the local MPI rank id. To choose a different @ref GlossDevice "device", Kokkos * has to be initialized explicitly providing the respective devide id * explicitly. * @@ -578,7 +578,7 @@ namespace LinearAlgebra * Must follow a call to the @p compress_start function. * * When the MemorySpace is Default and MPI is not GPU-aware, data changed - * on the device after the call to compress_start will be lost. + * on the @ref GlossDevice "device" after the call to compress_start will be lost. */ void compress_finish(VectorOperation::values operation); @@ -734,7 +734,7 @@ namespace LinearAlgebra * improve performance. * * @note If the MemorySpace is Default, the data in the ReadWriteVector will - * be moved to the device. + * be moved to the @ref GlossDevice "device". */ virtual void import(const LinearAlgebra::ReadWriteVector &V, @@ -968,7 +968,7 @@ namespace LinearAlgebra * It holds that end() - begin() == locally_owned_size(). * * @note For the Default memory space, the iterator might point to memory - * on the device. + * on the @ref GlossDevice "device". */ iterator begin(); @@ -978,7 +978,7 @@ namespace LinearAlgebra * of the vector. * * @note For the Default memory space, the iterator might point to memory - * on the device. + * on the @ref GlossDevice "device". */ const_iterator begin() const; @@ -988,7 +988,7 @@ namespace LinearAlgebra * of locally owned entries. * * @note For the Default memory space, the iterator might point to memory - * on the device. + * on the @ref GlossDevice "device". */ iterator end(); @@ -998,7 +998,7 @@ namespace LinearAlgebra * the array of the locally owned entries. * * @note For the Default memory space, the iterator might point to memory - * on the device. + * on the @ref GlossDevice "device". */ const_iterator end() const; @@ -1072,7 +1072,7 @@ namespace LinearAlgebra * Return the pointer to the underlying raw array. * * @note For the Default memory space, the pointer might point to memory - * on the device. + * on the @ref GlossDevice "device". */ Number * get_values() const; diff --git a/include/deal.II/lac/trilinos_tpetra_vector.h b/include/deal.II/lac/trilinos_tpetra_vector.h index 330776ac4e..3a53c80694 100644 --- a/include/deal.II/lac/trilinos_tpetra_vector.h +++ b/include/deal.II/lac/trilinos_tpetra_vector.h @@ -107,7 +107,7 @@ namespace LinearAlgebra * * In case Kokkos was configured with GPU support, this class performs its * actions on the GPU. In particular, there is no need for manually - * synchronizing memory between host and device. + * synchronizing memory between host and @ref GlossDevice "device". * * @ingroup TrilinosWrappers * @ingroup Vectors diff --git a/include/deal.II/matrix_free/cuda_matrix_free.h b/include/deal.II/matrix_free/cuda_matrix_free.h index 0b2780bf3d..cf2c0a2c4c 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.h @@ -839,7 +839,7 @@ namespace CUDAWrappers /** - * Copy @p data from the device to the device. @p update_flags should be + * Copy @p data from the @ref GlossDevice "device" to the host. @p update_flags should be * identical to the one used in MatrixFree::AdditionalData. * * @relates CUDAWrappers::MatrixFree diff --git a/include/deal.II/matrix_free/cuda_matrix_free.templates.h b/include/deal.II/matrix_free/cuda_matrix_free.templates.h index 36758a0eea..1fde44beb0 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.templates.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.templates.h @@ -171,7 +171,7 @@ namespace CUDAWrappers /** - * Allocate an array to the device and copy @p array_host to the device. + * Allocate an array on the @ref GlossDevice "device" and copy @p array_host to the @ref GlossDevice "device". */ template void -- 2.39.5