DEAL_II_WITH_TASKFLOW=1 \
DEAL_II_WITH_COMPLEX_VALUES=1 \
DEAL_II_WITH_CUDA=1 \
- DEAL_II_COMPILER_CUDA_AWARE=1 \
DEAL_II_WITH_GINKGO=1 \
DEAL_II_WITH_GMSH=1 \
DEAL_II_GMSH_WITH_API=1 \
#cmakedefine DEAL_II_RESTRICT @DEAL_II_RESTRICT@
#cmakedefine DEAL_II_COMPILER_HAS_DIAGNOSTIC_PRAGMA
-/*
- * A variable to tell if the compiler used in the current compilation process
- * understands CUDA code.
- */
-#if defined(DEAL_II_WITH_CUDA) && defined(__CUDACC__)
-# define DEAL_II_COMPILER_CUDA_AWARE
-#endif
-
/***********************************************************************
* CPU features:
*
#include <deal.II/base/array_view.h>
#include <deal.II/base/exceptions.h>
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#ifdef DEAL_II_WITH_CUDA
# include <cusolverDn.h>
# include <cusolverSp.h>
# include <cusparse.h>
#include <deal.II/base/types.h>
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#ifdef DEAL_II_WITH_CUDA
# include <cuComplex.h>
#endif
}
};
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#ifdef DEAL_II_WITH_CUDA
template <>
struct NumberType<cuComplex>
{
}
Number *temp_array_ptr = temporary_storage.data();
-# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+# if defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
// When using CUDAs-aware MPI, the set of local indices that are ghosts
// indices on other processors is expanded in arrays. This is for
// performance reasons as this can significantly decrease the number of
for (unsigned int i = 0; i < n_import_targets; ++i)
{
-# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+# if defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
{
const auto chunk_size = import_indices_plain_dev[i].second;
}
else
{
-# ifdef DEAL_II_COMPILER_CUDA_AWARE
+# ifdef DEAL_II_WITH_CUDA
cudaError_t cuda_error =
cudaMemcpy(ghost_array.data() + ghost_range.first,
ghost_array.data() + offset,
}
else
{
-# ifdef DEAL_II_COMPILER_CUDA_AWARE
+# ifdef DEAL_II_WITH_CUDA
cudaError_t cuda_error =
cudaMemcpy(ghost_array_ptr + offset,
ghost_array.data() + my_ghosts->first,
ExcMessage("Index overflow: Maximum message size in MPI is 2GB. "
"The number of ghost entries times the size of 'Number' "
"exceeds this value. This is not supported."));
-# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+# if defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
cudaDeviceSynchronize();
# endif
"import_from_ghosted_array_start as is passed "
"to import_from_ghosted_array_finish."));
-# ifdef DEAL_II_COMPILER_CUDA_AWARE
+# ifdef DEAL_II_WITH_CUDA
if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
{
cudaMemset(ghost_array.data(),
const unsigned int n_import_targets = import_targets_data.size();
const unsigned int n_ghost_targets = ghost_targets_data.size();
-# if (defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
+# if (defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
// When using CUDAs-aware MPI, the set of local indices that are ghosts
// indices on other processors is expanded in arrays. This is for
// performance reasons as this can significantly decrease the number of
AssertThrowMPI(ierr);
const Number *read_position = temporary_storage.data();
-# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
+# if !(defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
// If the operation is no insertion, add the imported data to the
// local values. For insert, nothing is done here (but in debug mode
// we assert that the specified value is either zero or matches with
{
Assert(ghost_array.begin() != nullptr, ExcInternalError());
-# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+# if defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
{
Assert(std::is_trivial<Number>::value, ExcNotImplemented());
DEAL_II_HOST_DEVICE const typename Tensor<rank_, dim, Number>::value_type &
Tensor<rank_, dim, Number>::operator[](const unsigned int i) const
{
-# ifndef DEAL_II_COMPILER_CUDA_AWARE
+# if KOKKOS_VERSION < 30700
+# ifdef KOKKOS_ACTIVE_MEMORY_SPACE_HOST
AssertIndexRange(i, dim);
+# endif
+# else
+ KOKKOS_IF_ON_HOST((AssertIndexRange(i, dim);))
# endif
return values[i];
constexpr inline DEAL_II_ALWAYS_INLINE const Number &
Tensor<rank_, dim, Number>::operator[](const TableIndices<rank_> &indices) const
{
-# ifndef DEAL_II_COMPILER_CUDA_AWARE
+# if KOKKOS_VERSION < 30700
+# ifdef KOKKOS_ACTIVE_MEMORY_SPACE_HOST
Assert(dim != 0,
ExcMessage("Cannot access an object of type Tensor<rank_,0,Number>"));
+# endif
+# else
+ KOKKOS_IF_ON_HOST(
+ (Assert(dim != 0,
+ ExcMessage(
+ "Cannot access an object of type Tensor<rank_,0,Number>"));))
# endif
return TensorAccessors::extract<rank_>(*this, indices);
constexpr inline DEAL_II_ALWAYS_INLINE Number &
Tensor<rank_, dim, Number>::operator[](const TableIndices<rank_> &indices)
{
-# ifndef DEAL_II_COMPILER_CUDA_AWARE
+# if KOKKOS_VERSION < 30700
+# ifdef KOKKOS_ACTIVE_MEMORY_SPACE_HOST
Assert(dim != 0,
ExcMessage("Cannot access an object of type Tensor<rank_,0,Number>"));
+# endif
+# else
+ KOKKOS_IF_ON_HOST(
+ (Assert(dim != 0,
+ ExcMessage(
+ "Cannot access an object of type Tensor<rank_,0,Number>"));))
# endif
return TensorAccessors::extract<rank_>(*this, indices);
vec.zero_out_ghost_values();
}
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#ifdef DEAL_II_WITH_CUDA
template <typename Number>
__global__ void
set_zero_kernel(const size_type * constrained_dofs,
#include <deal.II/base/config.h>
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#ifdef DEAL_II_WITH_CUDA
DEAL_II_NAMESPACE_OPEN
#include <deal.II/base/config.h>
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#ifdef DEAL_II_WITH_CUDA
# include <deal.II/base/cuda_size.h>
#include <deal.II/lac/cuda_kernels.h>
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#ifdef DEAL_II_WITH_CUDA
DEAL_II_NAMESPACE_OPEN
#include <memory>
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#ifdef DEAL_II_WITH_CUDA
DEAL_II_NAMESPACE_OPEN
#include <deal.II/base/config.h>
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#ifdef DEAL_II_WITH_CUDA
# include <deal.II/base/cuda.h>
# include <deal.II/lac/cuda_sparse_matrix.h>
#include <iomanip>
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#ifdef DEAL_II_WITH_CUDA
# include <deal.II/base/cuda.h>
# include <deal.II/lac/cuda_vector.h>
}
-# ifdef DEAL_II_COMPILER_CUDA_AWARE
+# ifdef DEAL_II_WITH_CUDA
template <typename Number>
__global__ void
set_initial_guess_kernel(const types::global_dof_index offset,
const Number mean_value = vector.mean_value();
vector.add(-mean_value);
}
-# endif // DEAL_II_COMPILER_CUDA_AWARE
+# endif // DEAL_II_WITH_CUDA
struct EigenvalueTracker
{
#include <deal.II/base/config.h>
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#ifdef DEAL_II_WITH_CUDA
# include <deal.II/base/tensor.h>
# include <deal.II/base/utilities.h>
#include <deal.II/base/config.h>
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#ifdef DEAL_II_WITH_CUDA
# include <deal.II/base/cuda_size.h>
#include <deal.II/base/config.h>
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#ifdef DEAL_II_WITH_CUDA
# include <deal.II/base/cuda_size.h>
# include <deal.II/base/mpi_stub.h>
#include <deal.II/matrix_free/cuda_matrix_free.h>
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#ifdef DEAL_II_WITH_CUDA
# include <deal.II/base/cuda.h>
# include <deal.II/base/cuda_size.h>
#include <deal.II/matrix_free/cuda_matrix_free.templates.h>
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#ifdef DEAL_II_WITH_CUDA
DEAL_II_NAMESPACE_OPEN
};
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
+#ifdef DEAL_II_WITH_CUDA
// By default, all the ranks will try to access the device 0.
// If we are running with MPI support it is better to address different graphic
// cards for different processes even if only one node is used. The choice below