From 98225311fdb2043b29985d07d94b7eb08126ed51 Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Mon, 27 Feb 2023 22:36:46 +0100 Subject: [PATCH] Replace KOKKOS_CONSTEXPR_FUNCTION --- include/deal.II/base/config.h.in | 14 - include/deal.II/base/symmetric_tensor.h | 1381 +++++++++++------ include/deal.II/base/table_indices.h | 22 +- .../physics/elasticity/standard_tensors.h | 68 +- 4 files changed, 945 insertions(+), 540 deletions(-) diff --git a/include/deal.II/base/config.h.in b/include/deal.II/base/config.h.in index c6ef29e331..b2b830344d 100644 --- a/include/deal.II/base/config.h.in +++ b/include/deal.II/base/config.h.in @@ -173,20 +173,6 @@ #endif #cmakedefine DEAL_II_FALLTHROUGH @DEAL_II_FALLTHROUGH@ #cmakedefine DEAL_II_CONSTEXPR @DEAL_II_CONSTEXPR@ -// clang++ assumes that all constexpr functions are __host__ __device__ when -// compiling CUDA code, i.e, when Kokkos was configured with CUDA support. -// This is problematic when calling non-constexpr functions in constexpr -// functions. Also, we intend constexpr functions to only be device-callable if -// not marked with DEAL_II_HOST_DEVICE. Hence, just annotate constexpr -// functions additionally with "__host__". -#if defined(DEAL_II_CONSTEXPR) -# if defined(__clang__) && defined(__CUDA__) -# define DEAL_II_CONSTEXPR_FUNCTION DEAL_II_CONSTEXPR __host__ -# else -# define DEAL_II_CONSTEXPR_FUNCTION DEAL_II_CONSTEXPR -# endif -#endif - // defined for backwards compatibility with older deal.II versions #define DEAL_II_WITH_CXX11 diff --git a/include/deal.II/base/symmetric_tensor.h b/include/deal.II/base/symmetric_tensor.h index 580e257c1c..90b2899d3f 100644 --- a/include/deal.II/base/symmetric_tensor.h +++ b/include/deal.II/base/symmetric_tensor.h @@ -45,9 +45,11 @@ class SymmetricTensor; * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor<2, dim, Number> - unit_symmetric_tensor(); +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim, Number> + unit_symmetric_tensor(); /** * Return the tensor of rank 4 that, when multiplied by a symmetric rank 2 @@ -78,9 +80,11 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor<4, dim, Number> - deviator_tensor(); +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<4, dim, Number> + deviator_tensor(); /** * Return the fourth-order symmetric identity tensor $\mathbb S$ which maps @@ -120,17 +124,25 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor<4, dim, Number> - identity_tensor(); +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<4, dim, Number> + identity_tensor(); template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim, Number> -invert(const SymmetricTensor<2, dim, Number> &); +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim, Number> + invert(const SymmetricTensor<2, dim, Number> &); template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE SymmetricTensor<4, dim, Number> -invert(const SymmetricTensor<4, dim, Number> &); +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE SymmetricTensor<4, dim, Number> + invert(const SymmetricTensor<4, dim, Number> &); /** * Compute and return the trace of a tensor of rank 2, i.e. the sum of its @@ -142,8 +154,11 @@ invert(const SymmetricTensor<4, dim, Number> &); * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE Number -trace(const SymmetricTensor<2, dim2, Number> &); +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline DEAL_II_ALWAYS_INLINE Number + trace(const SymmetricTensor<2, dim2, Number> &); /** * Compute the deviator of a symmetric tensor, which is defined as @@ -156,8 +171,10 @@ trace(const SymmetricTensor<2, dim2, Number> &); * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor<2, dim, Number> +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim, Number> deviator(const SymmetricTensor<2, dim, Number> &); /** @@ -174,8 +191,11 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE Number -determinant(const SymmetricTensor<2, dim, Number> &); +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Number + determinant(const SymmetricTensor<2, dim, Number> &); @@ -244,16 +264,19 @@ namespace internal */ namespace SymmetricTensorAccessors { - /** - * Create a TableIndices<2> object where the first entries up to - * position-1 are taken from previous_indices, and new_index is - * put at position position. The remaining indices remain in - * invalid state. - */ - DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE TableIndices<2> - merge(const TableIndices<2> &previous_indices, - const unsigned int new_index, - const unsigned int position) +/** + * Create a TableIndices<2> object where the first entries up to + * position-1 are taken from previous_indices, and new_index is + * put at position position. The remaining indices remain in + * invalid state. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE TableIndices<2> + merge(const TableIndices<2> &previous_indices, + const unsigned int new_index, + const unsigned int position) { AssertIndexRange(position, 2); @@ -265,16 +288,19 @@ namespace internal - /** - * Create a TableIndices<4> object where the first entries up to - * position-1 are taken from previous_indices, and new_index is - * put at position position. The remaining indices remain in - * invalid state. - */ - DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE TableIndices<4> - merge(const TableIndices<4> &previous_indices, - const unsigned int new_index, - const unsigned int position) +/** + * Create a TableIndices<4> object where the first entries up to + * position-1 are taken from previous_indices, and new_index is + * put at position position. The remaining indices remain in + * invalid state. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE TableIndices<4> + merge(const TableIndices<4> &previous_indices, + const unsigned int new_index, + const unsigned int position) { AssertIndexRange(position, 4); @@ -488,45 +514,57 @@ namespace internal typename AccessorTypes::tensor_type; private: - /** - * Constructor. Take a reference to the tensor object which we will - * access. - * - * The second argument denotes the values of previous indices into the - * tensor. For example, for a rank-4 tensor, if P=2, then we will - * already have had two successive element selections (e.g. through - * tensor[1][2]), and the two index values have to be stored - * somewhere. This class therefore only makes use of the first rank-P - * elements of this array, but passes it on to the next level with P-1 - * which fills the next entry, and so on. - * - * The constructor is made private in order to prevent you having such - * objects around. The only way to create such objects is via the - * Table class, which only generates them as temporary objects. - * This guarantees that the accessor objects go out of scope earlier - * than the mother object, avoid problems with data consistency. - */ - DEAL_II_CONSTEXPR_FUNCTION - Accessor(tensor_type &tensor, const TableIndices &previous_indices); +/** + * Constructor. Take a reference to the tensor object which we will + * access. + * + * The second argument denotes the values of previous indices into the + * tensor. For example, for a rank-4 tensor, if P=2, then we will + * already have had two successive element selections (e.g. through + * tensor[1][2]), and the two index values have to be stored + * somewhere. This class therefore only makes use of the first rank-P + * elements of this array, but passes it on to the next level with P-1 + * which fills the next entry, and so on. + * + * The constructor is made private in order to prevent you having such + * objects around. The only way to create such objects is via the + * Table class, which only generates them as temporary objects. + * This guarantees that the accessor objects go out of scope earlier + * than the mother object, avoid problems with data consistency. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr Accessor(tensor_type & tensor, + const TableIndices &previous_indices); - /** - * Copy constructor. - */ - DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE - Accessor(const Accessor &) = default; +/** + * Copy constructor. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE + Accessor(const Accessor &) = default; public: - /** - * Index operator. - */ - DEAL_II_CONSTEXPR_FUNCTION Accessor - operator[](const unsigned int i); +/** + * Index operator. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr Accessor + operator[](const unsigned int i); - /** - * Index operator. - */ - DEAL_II_CONSTEXPR_FUNCTION Accessor - operator[](const unsigned int i) const; +/** + * Index operator. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr Accessor + operator[](const unsigned int i) const; private: /** @@ -567,48 +605,60 @@ namespace internal typename AccessorTypes::tensor_type; private: - /** - * Constructor. Take a reference to the tensor object which we will - * access. - * - * The second argument denotes the values of previous indices into the - * tensor. For example, for a rank-4 tensor, if P=2, then we will - * already have had two successive element selections (e.g. through - * tensor[1][2]), and the two index values have to be stored - * somewhere. This class therefore only makes use of the first rank-P - * elements of this array, but passes it on to the next level with P-1 - * which fills the next entry, and so on. - * - * For this particular specialization, i.e. for P==1, all but the last - * index are already filled. - * - * The constructor is made private in order to prevent you having such - * objects around. The only way to create such objects is via the - * Table class, which only generates them as temporary objects. - * This guarantees that the accessor objects go out of scope earlier - * than the mother object, avoid problems with data consistency. - */ - DEAL_II_CONSTEXPR_FUNCTION - Accessor(tensor_type &tensor, const TableIndices &previous_indices); +/** + * Constructor. Take a reference to the tensor object which we will + * access. + * + * The second argument denotes the values of previous indices into the + * tensor. For example, for a rank-4 tensor, if P=2, then we will + * already have had two successive element selections (e.g. through + * tensor[1][2]), and the two index values have to be stored + * somewhere. This class therefore only makes use of the first rank-P + * elements of this array, but passes it on to the next level with P-1 + * which fills the next entry, and so on. + * + * For this particular specialization, i.e. for P==1, all but the last + * index are already filled. + * + * The constructor is made private in order to prevent you having such + * objects around. The only way to create such objects is via the + * Table class, which only generates them as temporary objects. + * This guarantees that the accessor objects go out of scope earlier + * than the mother object, avoid problems with data consistency. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr Accessor(tensor_type & tensor, + const TableIndices &previous_indices); - /** - * Copy constructor. - */ - DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE - Accessor(const Accessor &) = default; +/** + * Copy constructor. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE + Accessor(const Accessor &) = default; public: - /** - * Index operator. - */ - DEAL_II_CONSTEXPR_FUNCTION reference - operator[](const unsigned int); +/** + * Index operator. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr reference + operator[](const unsigned int); - /** - * Index operator. - */ - DEAL_II_CONSTEXPR_FUNCTION reference - operator[](const unsigned int) const; +/** + * Index operator. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr reference + operator[](const unsigned int) const; private: /** @@ -734,11 +784,14 @@ public: internal::SymmetricTensorAccessors::StorageType:: n_independent_components; - /** - * Default constructor. Creates a tensor with all entries equal to zero. - */ - DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE - SymmetricTensor() = default; +/** + * Default constructor. Creates a tensor with all entries equal to zero. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE + SymmetricTensor() = default; /** * Constructor. Generate a symmetric tensor from a general one. Assumes that @@ -756,23 +809,25 @@ public: template explicit SymmetricTensor(const Tensor<2, dim, OtherNumber> &t); - /** - * A constructor that creates a symmetric tensor from an array holding its - * independent elements. Using this constructor assumes that the caller - * knows the order in which elements are stored in symmetric tensors; its - * use is therefore discouraged, but if you think you want to use it anyway - * you can query the order of elements using the unrolled_index() function. - * - * This constructor is currently only implemented for symmetric tensors of - * rank 2. - * - * The size of the array passed is equal to - * SymmetricTensor::n_independent_components; the reason for using - * the object from the internal namespace is to work around bugs in some - * older compilers. - */ - DEAL_II_CONSTEXPR_FUNCTION - SymmetricTensor(const Number (&array)[n_independent_components]); +/** + * A constructor that creates a symmetric tensor from an array holding its + * independent elements. Using this constructor assumes that the caller + * knows the order in which elements are stored in symmetric tensors; its + * use is therefore discouraged, but if you think you want to use it anyway + * you can query the order of elements using the unrolled_index() function. + * + * This constructor is currently only implemented for symmetric tensors of + * rank 2. + * + * The size of the array passed is equal to + * SymmetricTensor::n_independent_components; the reason for using + * the object from the internal namespace is to work around bugs in some + * older compilers. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr SymmetricTensor(const Number (&array)[n_independent_components]); /** * Copy constructor from tensors with different underlying scalar type. This @@ -780,8 +835,11 @@ public: * Number. */ template - DEAL_II_CONSTEXPR_FUNCTION explicit SymmetricTensor( - const SymmetricTensor &initializer); +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr explicit SymmetricTensor( + const SymmetricTensor &initializer); /** * Return a pointer to the first element of the underlying storage. @@ -839,70 +897,101 @@ public: * @p Number. */ template - DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor & - operator=(const SymmetricTensor &rhs); +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr SymmetricTensor & + operator=(const SymmetricTensor &rhs); - /** - * This operator assigns a scalar to a tensor. To avoid confusion with what - * exactly it means to assign a scalar value to a tensor, zero is the only - * value allowed for d, allowing the intuitive notation - * $\mathbf A = 0$ to reset all elements of the tensor to zero. - */ - DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor & - operator=(const Number &d); +/** + * This operator assigns a scalar to a tensor. To avoid confusion with what + * exactly it means to assign a scalar value to a tensor, zero is the only + * value allowed for d, allowing the intuitive notation + * $\mathbf A = 0$ to reset all elements of the tensor to zero. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr SymmetricTensor & + operator=(const Number &d); - /** - * Convert the present symmetric tensor into a full tensor with the same - * elements, but using the different storage scheme of full tensors. - */ - DEAL_II_CONSTEXPR_FUNCTION operator Tensor() const; +/** + * Convert the present symmetric tensor into a full tensor with the same + * elements, but using the different storage scheme of full tensors. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr + operator Tensor() const; - /** - * Test for equality of two tensors. - */ - DEAL_II_CONSTEXPR_FUNCTION bool - operator==(const SymmetricTensor &) const; +/** + * Test for equality of two tensors. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr bool + operator==(const SymmetricTensor &) const; - /** - * Test for inequality of two tensors. - */ - DEAL_II_CONSTEXPR_FUNCTION bool - operator!=(const SymmetricTensor &) const; +/** + * Test for inequality of two tensors. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr bool + operator!=(const SymmetricTensor &) const; /** * Add another tensor. */ template - DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor & - operator+=(const SymmetricTensor &); +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr SymmetricTensor & + operator+=(const SymmetricTensor &); /** * Subtract another tensor. */ template - DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor & - operator-=(const SymmetricTensor &); +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr SymmetricTensor & + operator-=(const SymmetricTensor &); /** * Scale the tensor by factor, i.e. multiply all components by * factor. */ template - DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor & - operator*=(const OtherNumber &factor); +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr SymmetricTensor & + operator*=(const OtherNumber &factor); /** * Scale the tensor by 1/factor. */ template - DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor & - operator/=(const OtherNumber &factor); +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr SymmetricTensor & + operator/=(const OtherNumber &factor); - /** - * Unary minus operator. Negate all entries of a tensor. - */ - DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor - operator-() const; +/** + * Unary minus operator. Negate all entries of a tensor. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr SymmetricTensor + operator-() const; /** * Double contraction product between the present symmetric tensor and a @@ -957,92 +1046,125 @@ public: * symmetric tensors. */ template - DEAL_II_CONSTEXPR_FUNCTION typename internal::SymmetricTensorAccessors:: - double_contraction_result::type - operator*(const SymmetricTensor<2, dim, OtherNumber> &s) const; +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + DEAL_II_CONSTEXPR typename internal::SymmetricTensorAccessors:: + double_contraction_result::type + operator*(const SymmetricTensor<2, dim, OtherNumber> &s) const; /** * Contraction over the last two indices of the present object with the first * two indices of the rank-4 symmetric tensor given as argument. */ template - DEAL_II_CONSTEXPR_FUNCTION typename internal::SymmetricTensorAccessors:: - double_contraction_result::type - operator*(const SymmetricTensor<4, dim, OtherNumber> &s) const; +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + DEAL_II_CONSTEXPR typename internal::SymmetricTensorAccessors:: + double_contraction_result::type + operator*(const SymmetricTensor<4, dim, OtherNumber> &s) const; - /** - * Return a read-write reference to the indicated element. - */ - DEAL_II_CONSTEXPR_FUNCTION Number & - operator()(const TableIndices &indices); +/** + * Return a read-write reference to the indicated element. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr Number & + operator()(const TableIndices &indices); - /** - * Return a @p const reference to the value referred to by the argument. - */ - DEAL_II_CONSTEXPR_FUNCTION const Number & - operator()(const TableIndices &indices) const; +/** + * Return a @p const reference to the value referred to by the argument. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr const Number & + operator()(const TableIndices &indices) const; - /** - * Access the elements of a row of this symmetric tensor. This function is - * called for constant tensors. - */ - DEAL_II_CONSTEXPR_FUNCTION internal::SymmetricTensorAccessors:: - Accessor - operator[](const unsigned int row) const; +/** + * Access the elements of a row of this symmetric tensor. This function is + * called for constant tensors. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr internal::SymmetricTensorAccessors:: + Accessor + operator[](const unsigned int row) const; - /** - * Access the elements of a row of this symmetric tensor. This function is - * called for non-constant tensors. - */ - DEAL_II_CONSTEXPR_FUNCTION internal::SymmetricTensorAccessors:: - Accessor - operator[](const unsigned int row); +/** + * Access the elements of a row of this symmetric tensor. This function is + * called for non-constant tensors. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr internal::SymmetricTensorAccessors:: + Accessor + operator[](const unsigned int row); - /** - * Return a @p const reference to the value referred to by the argument. - * - * Exactly the same as operator(). - */ - DEAL_II_CONSTEXPR_FUNCTION const Number & - operator[](const TableIndices &indices) const; +/** + * Return a @p const reference to the value referred to by the argument. + * + * Exactly the same as operator(). + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr const Number & + operator[](const TableIndices &indices) const; - /** - * Return a read-write reference to the indicated element. - * - * Exactly the same as operator(). - */ - DEAL_II_CONSTEXPR_FUNCTION Number & - operator[](const TableIndices &indices); +/** + * Return a read-write reference to the indicated element. + * + * Exactly the same as operator(). + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr Number & + operator[](const TableIndices &indices); - /** - * Access to an element according to unrolled index. The function - * s.access_raw_entry(unrolled_index) does the same as - * s[s.unrolled_to_component_indices(unrolled_index)], but more - * efficiently. - */ - DEAL_II_CONSTEXPR_FUNCTION const Number & - access_raw_entry(const unsigned int unrolled_index) const; +/** + * Access to an element according to unrolled index. The function + * s.access_raw_entry(unrolled_index) does the same as + * s[s.unrolled_to_component_indices(unrolled_index)], but more + * efficiently. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr const Number & + access_raw_entry(const unsigned int unrolled_index) const; - /** - * Access to an element according to unrolled index. The function - * s.access_raw_entry(unrolled_index) does the same as - * s[s.unrolled_to_component_indices(unrolled_index)], but more - * efficiently. - */ - DEAL_II_CONSTEXPR_FUNCTION Number & - access_raw_entry(const unsigned int unrolled_index); +/** + * Access to an element according to unrolled index. The function + * s.access_raw_entry(unrolled_index) does the same as + * s[s.unrolled_to_component_indices(unrolled_index)], but more + * efficiently. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr Number & + access_raw_entry(const unsigned int unrolled_index); - /** - * Return the Frobenius-norm of a tensor, i.e. the square root of the sum of - * squares of all entries. This norm is induced by the scalar product - * defined above for two symmetric tensors. Note that it includes all - * entries of the tensor, counting symmetry, not only the unique ones (for - * example, for rank-2 tensors, this norm includes adding up the squares of - * upper right as well as lower left entries, not just one of them, although - * they are equal for symmetric tensors). - */ - DEAL_II_CONSTEXPR_FUNCTION typename numbers::NumberTraits::real_type - norm() const; +/** + * Return the Frobenius-norm of a tensor, i.e. the square root of the sum of + * squares of all entries. This norm is induced by the scalar product + * defined above for two symmetric tensors. Note that it includes all + * entries of the tensor, counting symmetry, not only the unique ones (for + * example, for rank-2 tensors, this norm includes adding up the squares of + * upper right as well as lower left entries, not just one of them, although + * they are equal for symmetric tensors). + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr typename numbers::NumberTraits::real_type + norm() const; /** * Tensor objects can be unrolled by simply pasting all elements into one @@ -1051,38 +1173,53 @@ public: * [0,n_independent_components) the given entry in a symmetric * tensor has. */ - static DEAL_II_CONSTEXPR_FUNCTION unsigned int - component_to_unrolled_index(const TableIndices &indices); + static +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr unsigned int + component_to_unrolled_index(const TableIndices &indices); /** * The opposite of the previous function: given an index $i$ in the unrolled * form of the tensor, return what set of indices $(k,l)$ (for rank-2 * tensors) or $(k,l,m,n)$ (for rank-4 tensors) corresponds to it. */ - static DEAL_II_CONSTEXPR_FUNCTION TableIndices - unrolled_to_component_indices(const unsigned int i); + static +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr TableIndices + unrolled_to_component_indices(const unsigned int i); - /** - * Reset all values to zero. - * - * Note that this is partly inconsistent with the semantics of the @p - * clear() member functions of the standard library containers and of - * several other classes within deal.II, which not only reset the values of - * stored elements to zero, but release all memory and return the object - * into a virginial state. However, since the size of objects of the present - * type is determined by its template parameters, resizing is not an option, - * and indeed the state where all elements have a zero value is the state - * right after construction of such an object. - */ - DEAL_II_CONSTEXPR_FUNCTION void - clear(); +/** + * Reset all values to zero. + * + * Note that this is partly inconsistent with the semantics of the @p + * clear() member functions of the standard library containers and of + * several other classes within deal.II, which not only reset the values of + * stored elements to zero, but release all memory and return the object + * into a virginial state. However, since the size of objects of the present + * type is determined by its template parameters, resizing is not an option, + * and indeed the state where all elements have a zero value is the state + * right after construction of such an object. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr void + clear(); /** * Determine an estimate for the memory consumption (in bytes) of this * object. */ - static DEAL_II_CONSTEXPR_FUNCTION std::size_t - memory_consumption(); + static +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + constexpr std::size_t + memory_consumption(); /** * Read or write the data of this object to or from a stream for the purpose @@ -1118,28 +1255,52 @@ private: // Make a few more functions friends. template - friend DEAL_II_CONSTEXPR_FUNCTION Number2 - trace(const SymmetricTensor<2, dim2, Number2> &d); + friend +# if defined(__clang__) && defined(__CUDA__) + __host__ +# endif + constexpr Number2 + trace(const SymmetricTensor<2, dim2, Number2> &d); template - friend DEAL_II_CONSTEXPR_FUNCTION Number2 - determinant(const SymmetricTensor<2, dim2, Number2> &t); + friend +# if defined(__clang__) && defined(__CUDA__) + __host__ +# endif + DEAL_II_CONSTEXPR Number2 + determinant(const SymmetricTensor<2, dim2, Number2> &t); template - friend DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor<2, dim2, Number2> - deviator(const SymmetricTensor<2, dim2, Number2> &t); + friend +# if defined(__clang__) && defined(__CUDA__) + __host__ +# endif + constexpr SymmetricTensor<2, dim2, Number2> + deviator(const SymmetricTensor<2, dim2, Number2> &t); template - friend DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor<2, dim2, Number2> - unit_symmetric_tensor(); + friend +# if defined(__clang__) && defined(__CUDA__) + __host__ +# endif + DEAL_II_CONSTEXPR SymmetricTensor<2, dim2, Number2> + unit_symmetric_tensor(); template - friend DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor<4, dim2, Number2> - deviator_tensor(); + friend +# if defined(__clang__) && defined(__CUDA__) + __host__ +# endif + DEAL_II_CONSTEXPR SymmetricTensor<4, dim2, Number2> + deviator_tensor(); template - friend DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor<4, dim2, Number2> - identity_tensor(); + friend +# if defined(__clang__) && defined(__CUDA__) + __host__ +# endif + DEAL_II_CONSTEXPR SymmetricTensor<4, dim2, Number2> + identity_tensor(); // Make a few helper classes friends as well. @@ -1170,10 +1331,13 @@ namespace internal namespace SymmetricTensorAccessors { template - DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE - Accessor::Accessor( - tensor_type & tensor, - const TableIndices &previous_indices) +# if defined(__clang__) && defined(__CUDA__) + __host__ +# endif + constexpr DEAL_II_ALWAYS_INLINE + Accessor::Accessor( + tensor_type & tensor, + const TableIndices &previous_indices) : tensor(tensor) , previous_indices(previous_indices) {} @@ -1181,10 +1345,13 @@ namespace internal template - DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - Accessor - Accessor::operator[]( - const unsigned int i) +# if defined(__clang__) && defined(__CUDA__) + __host__ +# endif + constexpr inline DEAL_II_ALWAYS_INLINE + Accessor + Accessor::operator[]( + const unsigned int i) { return Accessor( tensor, merge(previous_indices, i, rank_ - P)); @@ -1193,10 +1360,13 @@ namespace internal template - DEAL_II_CONSTEXPR_FUNCTION - DEAL_II_ALWAYS_INLINE Accessor - Accessor::operator[]( - const unsigned int i) const +# if defined(__clang__) && defined(__CUDA__) + __host__ +# endif + constexpr DEAL_II_ALWAYS_INLINE + Accessor + Accessor::operator[]( + const unsigned int i) const { return Accessor( tensor, merge(previous_indices, i, rank_ - P)); @@ -1205,10 +1375,13 @@ namespace internal template - DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE - Accessor::Accessor( - tensor_type & tensor, - const TableIndices &previous_indices) +# if defined(__clang__) && defined(__CUDA__) + __host__ +# endif + constexpr DEAL_II_ALWAYS_INLINE + Accessor::Accessor( + tensor_type & tensor, + const TableIndices &previous_indices) : tensor(tensor) , previous_indices(previous_indices) {} @@ -1216,7 +1389,10 @@ namespace internal template - DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE +# if defined(__clang__) && defined(__CUDA__) + __host__ +# endif + constexpr inline DEAL_II_ALWAYS_INLINE typename Accessor::reference Accessor::operator[]( const unsigned int i) @@ -1226,7 +1402,10 @@ namespace internal template - DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE +# if defined(__clang__) && defined(__CUDA__) + __host__ +# endif + constexpr DEAL_II_ALWAYS_INLINE typename Accessor::reference Accessor::operator[]( const unsigned int i) const @@ -1262,18 +1441,24 @@ SymmetricTensor::SymmetricTensor( template template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE -SymmetricTensor::SymmetricTensor( - const SymmetricTensor &initializer) +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr DEAL_II_ALWAYS_INLINE + SymmetricTensor::SymmetricTensor( + const SymmetricTensor &initializer) : data(initializer.data) {} template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE -SymmetricTensor::SymmetricTensor( - const Number (&array)[n_independent_components]) +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr inline DEAL_II_ALWAYS_INLINE + SymmetricTensor::SymmetricTensor( + const Number (&array)[n_independent_components]) : data( *reinterpret_cast(array)) { @@ -1286,8 +1471,10 @@ SymmetricTensor::SymmetricTensor( template template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor & +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor & SymmetricTensor::operator=( const SymmetricTensor &t) { @@ -1298,8 +1485,10 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor & +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor & SymmetricTensor::operator=(const Number &d) { Assert(numbers::value_is_zero(d), @@ -1661,9 +1850,12 @@ namespace internal template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE - SymmetricTensor::operator Tensor() - const +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr DEAL_II_ALWAYS_INLINE + SymmetricTensor::operator Tensor() + const { return internal::SymmetricTensorImplementation::convert_to_tensor(*this); } @@ -1671,9 +1863,12 @@ DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE template -DEAL_II_CONSTEXPR_FUNCTION bool -SymmetricTensor::operator==( - const SymmetricTensor &t) const +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr bool + SymmetricTensor::operator==( + const SymmetricTensor &t) const { return data == t.data; } @@ -1681,9 +1876,12 @@ SymmetricTensor::operator==( template -DEAL_II_CONSTEXPR_FUNCTION bool -SymmetricTensor::operator!=( - const SymmetricTensor &t) const +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr bool + SymmetricTensor::operator!=( + const SymmetricTensor &t) const { return data != t.data; } @@ -1692,8 +1890,10 @@ SymmetricTensor::operator!=( template template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor & +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor & SymmetricTensor::operator+=( const SymmetricTensor &t) { @@ -1705,8 +1905,10 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE template template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor & +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor & SymmetricTensor::operator-=( const SymmetricTensor &t) { @@ -1718,8 +1920,10 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE template template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor & +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor & SymmetricTensor::operator*=(const OtherNumber &d) { data *= d; @@ -1730,8 +1934,10 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE template template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor & +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor & SymmetricTensor::operator/=(const OtherNumber &d) { data /= d; @@ -1741,8 +1947,10 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor SymmetricTensor::operator-() const { SymmetricTensor tmp = *this; @@ -1753,8 +1961,11 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE void -SymmetricTensor::clear() +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr inline DEAL_II_ALWAYS_INLINE void + SymmetricTensor::clear() { data.clear(); } @@ -1762,8 +1973,11 @@ SymmetricTensor::clear() template -DEAL_II_CONSTEXPR_FUNCTION std::size_t -SymmetricTensor::memory_consumption() +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr std::size_t + SymmetricTensor::memory_consumption() { // all memory consists of statically allocated memory of the current // object, no pointers @@ -1775,7 +1989,10 @@ SymmetricTensor::memory_consumption() namespace internal { template - DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE +# if defined(__clang__) && defined(__CUDA__) + __host__ +# endif + DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE typename SymmetricTensorAccessors:: double_contraction_result<2, 2, dim, Number, OtherNumber>::type perform_double_contraction( @@ -1810,7 +2027,10 @@ namespace internal template - DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE +# if defined(__clang__) && defined(__CUDA__) + __host__ +# endif + DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE typename SymmetricTensorAccessors:: double_contraction_result<4, 2, dim, Number, OtherNumber>::type perform_double_contraction( @@ -1836,7 +2056,10 @@ namespace internal template - DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE +# if defined(__clang__) && defined(__CUDA__) + __host__ +# endif + DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE typename SymmetricTensorAccessors::StorageType< 2, dim, @@ -1874,7 +2097,10 @@ namespace internal template - DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE +# if defined(__clang__) && defined(__CUDA__) + __host__ +# endif + DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE typename SymmetricTensorAccessors::StorageType< 4, dim, @@ -1916,7 +2142,10 @@ namespace internal template template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE typename internal::SymmetricTensorAccessors:: double_contraction_result::type SymmetricTensor::operator*( @@ -1934,10 +2163,13 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE template template -DEAL_II_CONSTEXPR_FUNCTION inline typename internal::SymmetricTensorAccessors:: - double_contraction_result::type - SymmetricTensor::operator*( - const SymmetricTensor<4, dim, OtherNumber> &s) const +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + DEAL_II_CONSTEXPR inline typename internal::SymmetricTensorAccessors:: + double_contraction_result::type + SymmetricTensor::operator*( + const SymmetricTensor<4, dim, OtherNumber> &s) const { typename internal::SymmetricTensorAccessors:: double_contraction_result::type tmp; @@ -2153,9 +2385,12 @@ namespace internal template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE Number & -SymmetricTensor::operator()( - const TableIndices &indices) +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr inline DEAL_II_ALWAYS_INLINE Number & + SymmetricTensor::operator()( + const TableIndices &indices) { for (unsigned int r = 0; r < rank; ++r) AssertIndexRange(indices[r], dimension); @@ -2165,9 +2400,12 @@ SymmetricTensor::operator()( template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE const Number & -SymmetricTensor::operator()( - const TableIndices &indices) const +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr inline DEAL_II_ALWAYS_INLINE const Number & + SymmetricTensor::operator()( + const TableIndices &indices) const { for (unsigned int r = 0; r < rank; ++r) AssertIndexRange(indices[r], dimension); @@ -2204,9 +2442,13 @@ namespace internal template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE internal:: - SymmetricTensorAccessors::Accessor - SymmetricTensor::operator[](const unsigned int row) const +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr DEAL_II_ALWAYS_INLINE internal::SymmetricTensorAccessors:: + Accessor + SymmetricTensor::operator[]( + const unsigned int row) const { return internal::SymmetricTensorAccessors:: Accessor( @@ -2218,9 +2460,12 @@ DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE internal:: template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE internal:: - SymmetricTensorAccessors::Accessor - SymmetricTensor::operator[](const unsigned int row) +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr inline DEAL_II_ALWAYS_INLINE internal::SymmetricTensorAccessors:: + Accessor + SymmetricTensor::operator[](const unsigned int row) { return internal::SymmetricTensorAccessors:: Accessor( @@ -2232,9 +2477,12 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE internal:: template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE const Number & -SymmetricTensor::operator[]( - const TableIndices &indices) const +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr DEAL_II_ALWAYS_INLINE const Number & + SymmetricTensor::operator[]( + const TableIndices &indices) const { return operator()(indices); } @@ -2242,9 +2490,12 @@ SymmetricTensor::operator[]( template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE Number & -SymmetricTensor::operator[]( - const TableIndices &indices) +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr inline DEAL_II_ALWAYS_INLINE Number & + SymmetricTensor::operator[]( + const TableIndices &indices) { return operator()(indices); } @@ -2315,9 +2566,12 @@ namespace internal template -DEAL_II_CONSTEXPR_FUNCTION inline const Number & -SymmetricTensor::access_raw_entry( - const unsigned int index) const +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr inline const Number & + SymmetricTensor::access_raw_entry( + const unsigned int index) const { AssertIndexRange(index, n_independent_components); return data[internal::SymmetricTensorImplementation::entry_to_indices(*this, @@ -2327,8 +2581,12 @@ SymmetricTensor::access_raw_entry( template -DEAL_II_CONSTEXPR_FUNCTION inline Number & -SymmetricTensor::access_raw_entry(const unsigned int index) +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr inline Number & + SymmetricTensor::access_raw_entry( + const unsigned int index) { AssertIndexRange(index, n_independent_components); return data[internal::SymmetricTensorImplementation::entry_to_indices(*this, @@ -2429,8 +2687,11 @@ namespace internal template -DEAL_II_CONSTEXPR_FUNCTION typename numbers::NumberTraits::real_type -SymmetricTensor::norm() const +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr typename numbers::NumberTraits::real_type + SymmetricTensor::norm() const { return internal::compute_norm(data); } @@ -2523,9 +2784,12 @@ namespace internal template -DEAL_II_CONSTEXPR_FUNCTION unsigned int -SymmetricTensor::component_to_unrolled_index( - const TableIndices &indices) +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr unsigned int + SymmetricTensor::component_to_unrolled_index( + const TableIndices &indices) { return internal::SymmetricTensorImplementation::component_to_unrolled_index< dim>(indices); @@ -2625,9 +2889,12 @@ namespace internal } // namespace internal template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE TableIndices -SymmetricTensor::unrolled_to_component_indices( - const unsigned int i) +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + constexpr DEAL_II_ALWAYS_INLINE TableIndices + SymmetricTensor::unrolled_to_component_indices( + const unsigned int i) { return internal::SymmetricTensorImplementation::unrolled_to_component_indices< dim>(i, std::integral_constant()); @@ -2662,10 +2929,13 @@ SymmetricTensor::serialize(Archive &ar, const unsigned int) * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor::type> - operator+(const SymmetricTensor & left, - const SymmetricTensor &right) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline DEAL_II_ALWAYS_INLINE + SymmetricTensor::type> + operator+(const SymmetricTensor & left, + const SymmetricTensor &right) { SymmetricTensor::type> tmp = left; @@ -2687,10 +2957,13 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor::type> - operator-(const SymmetricTensor & left, - const SymmetricTensor &right) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline DEAL_II_ALWAYS_INLINE + SymmetricTensor::type> + operator-(const SymmetricTensor & left, + const SymmetricTensor &right) { SymmetricTensor::type> tmp = left; @@ -2707,10 +2980,13 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE - Tensor::type> - operator+(const SymmetricTensor &left, - const Tensor & right) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE + Tensor::type> + operator+(const SymmetricTensor &left, + const Tensor & right) { return Tensor(left) + right; } @@ -2724,10 +3000,13 @@ DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE - Tensor::type> - operator+(const Tensor & left, - const SymmetricTensor &right) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE + Tensor::type> + operator+(const Tensor & left, + const SymmetricTensor &right) { return left + Tensor(right); } @@ -2741,10 +3020,13 @@ DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE - Tensor::type> - operator-(const SymmetricTensor &left, - const Tensor & right) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE + Tensor::type> + operator-(const SymmetricTensor &left, + const Tensor & right) { return Tensor(left) - right; } @@ -2758,10 +3040,13 @@ DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE - Tensor::type> - operator-(const Tensor & left, - const SymmetricTensor &right) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE + Tensor::type> + operator-(const Tensor & left, + const SymmetricTensor &right) { return left - Tensor(right); } @@ -2769,8 +3054,11 @@ DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE Number -determinant(const SymmetricTensor<2, dim, Number> &t) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Number + determinant(const SymmetricTensor<2, dim, Number> &t) { switch (dim) { @@ -2809,8 +3097,11 @@ determinant(const SymmetricTensor<2, dim, Number> &t) * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE Number -third_invariant(const SymmetricTensor<2, dim, Number> &t) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + DEAL_II_CONSTEXPR DEAL_II_ALWAYS_INLINE Number + third_invariant(const SymmetricTensor<2, dim, Number> &t) { return determinant(t); } @@ -2818,8 +3109,11 @@ third_invariant(const SymmetricTensor<2, dim, Number> &t) template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE Number -trace(const SymmetricTensor<2, dim, Number> &d) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline DEAL_II_ALWAYS_INLINE Number + trace(const SymmetricTensor<2, dim, Number> &d) { Number t = d.data[0]; for (unsigned int i = 1; i < dim; ++i) @@ -2840,8 +3134,11 @@ trace(const SymmetricTensor<2, dim, Number> &d) * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION Number -first_invariant(const SymmetricTensor<2, dim, Number> &t) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr Number + first_invariant(const SymmetricTensor<2, dim, Number> &t) { return trace(t); } @@ -2859,8 +3156,11 @@ first_invariant(const SymmetricTensor<2, dim, Number> &t) * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE Number -second_invariant(const SymmetricTensor<2, 1, Number> &) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE Number + second_invariant(const SymmetricTensor<2, 1, Number> &) { return internal::NumberType::value(0.0); } @@ -2886,8 +3186,11 @@ second_invariant(const SymmetricTensor<2, 1, Number> &) * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE Number -second_invariant(const SymmetricTensor<2, 2, Number> &t) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE Number + second_invariant(const SymmetricTensor<2, 2, Number> &t) { return t[0][0] * t[1][1] - t[0][1] * t[0][1]; } @@ -2903,8 +3206,11 @@ second_invariant(const SymmetricTensor<2, 2, Number> &t) * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE Number -second_invariant(const SymmetricTensor<2, 3, Number> &t) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE Number + second_invariant(const SymmetricTensor<2, 3, Number> &t) { return (t[0][0] * t[1][1] + t[1][1] * t[2][2] + t[2][2] * t[0][0] - t[0][1] * t[0][1] - t[0][2] * t[0][2] - t[1][2] * t[1][2]); @@ -3308,9 +3614,11 @@ eigenvectors(const SymmetricTensor<2, dim, Number> &T, * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION - DEAL_II_ALWAYS_INLINE SymmetricTensor - transpose(const SymmetricTensor &t) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE SymmetricTensor + transpose(const SymmetricTensor &t) { return t; } @@ -3318,8 +3626,10 @@ DEAL_II_CONSTEXPR_FUNCTION template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor<2, dim, Number> +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim, Number> deviator(const SymmetricTensor<2, dim, Number> &t) { SymmetricTensor<2, dim, Number> tmp = t; @@ -3335,9 +3645,11 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor<2, dim, Number> - unit_symmetric_tensor() +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim, Number> + unit_symmetric_tensor() { // create a default constructed matrix filled with // zeros, then set the diagonal elements to one @@ -3364,8 +3676,11 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE template -DEAL_II_CONSTEXPR_FUNCTION inline SymmetricTensor<4, dim, Number> -deviator_tensor() +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + DEAL_II_CONSTEXPR inline SymmetricTensor<4, dim, Number> + deviator_tensor() { SymmetricTensor<4, dim, Number> tmp; @@ -3392,9 +3707,11 @@ deviator_tensor() template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor<4, dim, Number> - identity_tensor() +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<4, dim, Number> + identity_tensor() { SymmetricTensor<4, dim, Number> tmp; @@ -3428,8 +3745,11 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim, Number> -invert(const SymmetricTensor<2, dim, Number> &t) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim, Number> + invert(const SymmetricTensor<2, dim, Number> &t) { return internal::SymmetricTensorImplementation::Inverse<2, dim, Number>:: value(t); @@ -3448,8 +3768,11 @@ invert(const SymmetricTensor<2, dim, Number> &t) * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor<4, dim, Number> - invert(const SymmetricTensor<4, dim, Number> &t) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr SymmetricTensor<4, dim, Number> + invert(const SymmetricTensor<4, dim, Number> &t) { return internal::SymmetricTensorImplementation::Inverse<4, dim, Number>:: value(t); @@ -3479,9 +3802,12 @@ DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor<4, dim, Number> * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline SymmetricTensor<4, dim, Number> -outer_product(const SymmetricTensor<2, dim, Number> &t1, - const SymmetricTensor<2, dim, Number> &t2) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline SymmetricTensor<4, dim, Number> + outer_product(const SymmetricTensor<2, dim, Number> &t1, + const SymmetricTensor<2, dim, Number> &t2) { SymmetricTensor<4, dim, Number> tmp; @@ -3505,8 +3831,10 @@ outer_product(const SymmetricTensor<2, dim, Number> &t1, * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor<2, dim, Number> +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim, Number> symmetrize(const Tensor<2, dim, Number> &t) { SymmetricTensor<2, dim, Number> result; @@ -3530,8 +3858,10 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor operator*(const SymmetricTensor &t, const Number &factor) { SymmetricTensor tt = t; @@ -3549,8 +3879,10 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE * @relatesalso SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION - DEAL_II_ALWAYS_INLINE SymmetricTensor +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE SymmetricTensor operator*(const Number &factor, const SymmetricTensor &t) { // simply forward to the other operator @@ -3584,13 +3916,16 @@ DEAL_II_CONSTEXPR_FUNCTION * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE SymmetricTensor< - rank_, - dim, - typename ProductType::type>::type> -operator*(const SymmetricTensor &t, - const OtherNumber & factor) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor< + rank_, + dim, + typename ProductType::type>::type> + operator*(const SymmetricTensor &t, + const OtherNumber & factor) { // form the product. we have to convert the two factors into the final // type via explicit casts because, for awkward reasons, the C++ @@ -3613,13 +3948,16 @@ operator*(const SymmetricTensor &t, * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE SymmetricTensor< - rank_, - dim, - typename ProductType::type>::type> -operator*(const Number & factor, - const SymmetricTensor &t) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor< + rank_, + dim, + typename ProductType::type>::type> + operator*(const Number & factor, + const SymmetricTensor &t) { // simply forward to the other operator with switched arguments return (t * factor); @@ -3633,13 +3971,16 @@ operator*(const Number & factor, * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline SymmetricTensor< - rank_, - dim, - typename ProductType::type>::type> -operator/(const SymmetricTensor &t, - const OtherNumber & factor) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline SymmetricTensor< + rank_, + dim, + typename ProductType::type>::type> + operator/(const SymmetricTensor &t, + const OtherNumber & factor) { using product_type = typename ProductType::type; SymmetricTensor tt(t); @@ -3656,8 +3997,10 @@ operator/(const SymmetricTensor &t, * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor operator*(const SymmetricTensor &t, const double factor) { SymmetricTensor tt(t); @@ -3674,8 +4017,10 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE - SymmetricTensor +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor operator*(const double factor, const SymmetricTensor &t) { SymmetricTensor tt(t); @@ -3691,8 +4036,11 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline SymmetricTensor -operator/(const SymmetricTensor &t, const double factor) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline SymmetricTensor + operator/(const SymmetricTensor &t, const double factor) { SymmetricTensor tt(t); tt /= factor; @@ -3709,7 +4057,10 @@ operator/(const SymmetricTensor &t, const double factor) * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE typename ProductType::type scalar_product(const SymmetricTensor<2, dim, Number> & t1, const SymmetricTensor<2, dim, OtherNumber> &t2) @@ -3731,7 +4082,10 @@ DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline DEAL_II_ALWAYS_INLINE typename ProductType::type scalar_product(const SymmetricTensor<2, dim, Number> &t1, const Tensor<2, dim, OtherNumber> & t2) @@ -3758,7 +4112,10 @@ DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE typename ProductType::type scalar_product(const Tensor<2, dim, Number> & t1, const SymmetricTensor<2, dim, OtherNumber> &t2) @@ -3782,11 +4139,14 @@ DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline DEAL_II_ALWAYS_INLINE void -double_contract( - SymmetricTensor<2, 1, typename ProductType::type> &tmp, - const SymmetricTensor<4, 1, Number> & t, - const SymmetricTensor<2, 1, OtherNumber> & s) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline DEAL_II_ALWAYS_INLINE void + double_contract( + SymmetricTensor<2, 1, typename ProductType::type> &tmp, + const SymmetricTensor<4, 1, Number> & t, + const SymmetricTensor<2, 1, OtherNumber> & s) { tmp[0][0] = t[0][0][0][0] * s[0][0]; } @@ -3808,11 +4168,14 @@ double_contract( * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline void -double_contract( - SymmetricTensor<2, 1, typename ProductType::type> &tmp, - const SymmetricTensor<2, 1, Number> & s, - const SymmetricTensor<4, 1, OtherNumber> & t) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline void + double_contract( + SymmetricTensor<2, 1, typename ProductType::type> &tmp, + const SymmetricTensor<2, 1, Number> & s, + const SymmetricTensor<4, 1, OtherNumber> & t) { tmp[0][0] = t[0][0][0][0] * s[0][0]; } @@ -3834,11 +4197,14 @@ double_contract( * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline void -double_contract( - SymmetricTensor<2, 2, typename ProductType::type> &tmp, - const SymmetricTensor<4, 2, Number> & t, - const SymmetricTensor<2, 2, OtherNumber> & s) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline void + double_contract( + SymmetricTensor<2, 2, typename ProductType::type> &tmp, + const SymmetricTensor<4, 2, Number> & t, + const SymmetricTensor<2, 2, OtherNumber> & s) { const unsigned int dim = 2; @@ -3865,11 +4231,14 @@ double_contract( * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline void -double_contract( - SymmetricTensor<2, 2, typename ProductType::type> &tmp, - const SymmetricTensor<2, 2, Number> & s, - const SymmetricTensor<4, 2, OtherNumber> & t) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline void + double_contract( + SymmetricTensor<2, 2, typename ProductType::type> &tmp, + const SymmetricTensor<2, 2, Number> & s, + const SymmetricTensor<4, 2, OtherNumber> & t) { const unsigned int dim = 2; @@ -3896,11 +4265,14 @@ double_contract( * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline void -double_contract( - SymmetricTensor<2, 3, typename ProductType::type> &tmp, - const SymmetricTensor<4, 3, Number> & t, - const SymmetricTensor<2, 3, OtherNumber> & s) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline void + double_contract( + SymmetricTensor<2, 3, typename ProductType::type> &tmp, + const SymmetricTensor<4, 3, Number> & t, + const SymmetricTensor<2, 3, OtherNumber> & s) { const unsigned int dim = 3; @@ -3928,11 +4300,14 @@ double_contract( * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION inline void -double_contract( - SymmetricTensor<2, 3, typename ProductType::type> &tmp, - const SymmetricTensor<2, 3, Number> & s, - const SymmetricTensor<4, 3, OtherNumber> & t) +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr inline void + double_contract( + SymmetricTensor<2, 3, typename ProductType::type> &tmp, + const SymmetricTensor<2, 3, Number> & s, + const SymmetricTensor<4, 3, OtherNumber> & t) { const unsigned int dim = 3; @@ -3952,8 +4327,10 @@ double_contract( * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION - Tensor<1, dim, typename ProductType::type> +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr Tensor<1, dim, typename ProductType::type> operator*(const SymmetricTensor<2, dim, Number> &src1, const Tensor<1, dim, OtherNumber> & src2) { @@ -3972,8 +4349,10 @@ DEAL_II_CONSTEXPR_FUNCTION * @relates SymmetricTensor */ template -DEAL_II_CONSTEXPR_FUNCTION - Tensor<1, dim, typename ProductType::type> +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr Tensor<1, dim, typename ProductType::type> operator*(const Tensor<1, dim, Number> & src1, const SymmetricTensor<2, dim, OtherNumber> &src2) { @@ -4007,7 +4386,10 @@ template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE typename Tensor::type>::tensor_type @@ -4043,7 +4425,10 @@ template -DEAL_II_CONSTEXPR_FUNCTION DEAL_II_ALWAYS_INLINE +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + constexpr DEAL_II_ALWAYS_INLINE typename Tensor::type>::tensor_type diff --git a/include/deal.II/base/table_indices.h b/include/deal.II/base/table_indices.h index ae1ceff506..50d640d9bb 100644 --- a/include/deal.II/base/table_indices.h +++ b/include/deal.II/base/table_indices.h @@ -89,12 +89,15 @@ public: constexpr bool operator!=(const TableIndices &other) const; - /** - * Sort the indices in ascending order. While this operation is not very - * useful for Table objects, it is used for the SymmetricTensor class. - */ - DEAL_II_CONSTEXPR_FUNCTION void - sort(); +/** + * Sort the indices in ascending order. While this operation is not very + * useful for Table objects, it is used for the SymmetricTensor class. + */ +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + DEAL_II_CONSTEXPR void + sort(); /** * Write or read the data of this object to or from a stream for the purpose @@ -165,8 +168,11 @@ TableIndices::operator!=(const TableIndices &other) const template -DEAL_II_CONSTEXPR_FUNCTION inline void -TableIndices::sort() +#if defined(__clang__) && defined(__CUDA__) +__host__ +#endif + DEAL_II_CONSTEXPR inline void + TableIndices::sort() { std::sort(std::begin(indices), std::end(indices)); } diff --git a/include/deal.II/physics/elasticity/standard_tensors.h b/include/deal.II/physics/elasticity/standard_tensors.h index 550bb84c35..069a1ad743 100644 --- a/include/deal.II/physics/elasticity/standard_tensors.h +++ b/include/deal.II/physics/elasticity/standard_tensors.h @@ -231,8 +231,12 @@ namespace Physics * @dealiiHolzapfelA{229,6.83} */ template - static DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor<4, dim, Number> - Dev_P(const Tensor<2, dim, Number> &F); + static +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + DEAL_II_CONSTEXPR SymmetricTensor<4, dim, Number> + Dev_P(const Tensor<2, dim, Number> &F); /** * Return the transpose of the fourth-order referential deviatoric tensor, @@ -246,8 +250,12 @@ namespace Physics * @f] */ template - static DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor<4, dim, Number> - Dev_P_T(const Tensor<2, dim, Number> &F); + static +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + DEAL_II_CONSTEXPR SymmetricTensor<4, dim, Number> + Dev_P_T(const Tensor<2, dim, Number> &F); /** @} */ @@ -273,8 +281,12 @@ namespace Physics * @dealiiHolzapfelA{228,6.82} */ template - static DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor<2, dim, Number> - ddet_F_dC(const Tensor<2, dim, Number> &F); + static +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + DEAL_II_CONSTEXPR SymmetricTensor<2, dim, Number> + ddet_F_dC(const Tensor<2, dim, Number> &F); /** @} */ @@ -298,8 +310,12 @@ namespace Physics * @dealiiWriggersA{76,3.255} */ template - static DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor<4, dim, Number> - dC_inv_dC(const Tensor<2, dim, Number> &F); + static +#if defined(__clang__) && defined(__CUDA__) + __host__ +#endif + DEAL_II_CONSTEXPR SymmetricTensor<4, dim, Number> + dC_inv_dC(const Tensor<2, dim, Number> &F); /** @} */ }; @@ -316,9 +332,12 @@ namespace Physics template template -DEAL_II_CONSTEXPR_FUNCTION inline SymmetricTensor<4, dim, Number> -Physics::Elasticity::StandardTensors::Dev_P( - const Tensor<2, dim, Number> &F) +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + DEAL_II_CONSTEXPR inline SymmetricTensor<4, dim, Number> + Physics::Elasticity::StandardTensors::Dev_P( + const Tensor<2, dim, Number> &F) { // Make things work with AD types using std::pow; @@ -343,9 +362,12 @@ Physics::Elasticity::StandardTensors::Dev_P( template template -DEAL_II_CONSTEXPR_FUNCTION inline SymmetricTensor<4, dim, Number> -Physics::Elasticity::StandardTensors::Dev_P_T( - const Tensor<2, dim, Number> &F) +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + DEAL_II_CONSTEXPR inline SymmetricTensor<4, dim, Number> + Physics::Elasticity::StandardTensors::Dev_P_T( + const Tensor<2, dim, Number> &F) { // Make things work with AD types using std::pow; @@ -371,9 +393,12 @@ Physics::Elasticity::StandardTensors::Dev_P_T( template template -DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor<2, dim, Number> - Physics::Elasticity::StandardTensors::ddet_F_dC( - const Tensor<2, dim, Number> &F) +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + DEAL_II_CONSTEXPR SymmetricTensor<2, dim, Number> + Physics::Elasticity::StandardTensors::ddet_F_dC( + const Tensor<2, dim, Number> &F) { return internal::NumberType::value(0.5 * determinant(F)) * symmetrize(invert(transpose(F) * F)); @@ -383,9 +408,12 @@ DEAL_II_CONSTEXPR_FUNCTION SymmetricTensor<2, dim, Number> template template -DEAL_II_CONSTEXPR_FUNCTION inline SymmetricTensor<4, dim, Number> -Physics::Elasticity::StandardTensors::dC_inv_dC( - const Tensor<2, dim, Number> &F) +# if defined(__clang__) && defined(__CUDA__) +__host__ +# endif + DEAL_II_CONSTEXPR inline SymmetricTensor<4, dim, Number> + Physics::Elasticity::StandardTensors::dC_inv_dC( + const Tensor<2, dim, Number> &F) { const SymmetricTensor<2, dim, Number> C_inv = symmetrize(invert(transpose(F) * F)); -- 2.39.5