-D DEAL_II_WITH_KOKKOS="ON" \
-D KOKKOS_DIR=${GITHUB_WORKSPACE}/../kokkos-install \
-D DEAL_II_WITH_MPI="ON" \
+ -D DEAL_II_MPI_WITH_DEVICE_SUPPORT="ON" \
-D DEAL_II_WITH_P4EST="ON" \
-D DEAL_II_COMPONENT_EXAMPLES="ON" \
..
@DEAL_II_EXPAND_COMPLEX_SCALARS@;
}
+// complex types and long double are typically not directly supported on GPUs
+MPI_DEVICE_SCALARS := { int;
+ long int;
+ unsigned int;
+ unsigned long int;
+ unsigned long long int;
+ float;
+ double;
+ }
+
// template names for serial vectors that we can instantiate as T<S> where
// S=REAL_SCALARS for example
DEAL_II_VEC_TEMPLATES := { Vector; BlockVector }
# (in Modules/FindMPI.cmake) at some point. For the time being this is an
# advanced configuration option.
#
- option(DEAL_II_MPI_WITH_CUDA_SUPPORT "Enable MPI Cuda support" OFF)
- mark_as_advanced(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+ if(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+ option(DEAL_II_MPI_WITH_DEVICE_SUPPORT "Enable MPI Device support" ON)
+ else()
+ option(DEAL_II_MPI_WITH_DEVICE_SUPPORT "Enable MPI Device support" OFF)
+ endif()
+ mark_as_advanced(DEAL_II_MPI_WITH_DEVICE_SUPPORT)
endmacro()
macro(feature_mpi_error_message)
if(NOT DEAL_II_WITH_MPI)
#
- # Disable and hide the DEAL_II_MPI_WITH_CUDA_SUPPORT option
+ # Disable and hide the DEAL_II_MPI_WITH_DEVICE_SUPPORT option
#
- set(DEAL_II_MPI_WITH_CUDA_SUPPORT)
- unset(DEAL_II_MPI_WITH_CUDA_SUPPORT CACHE)
+ set(DEAL_II_MPI_WITH_DEVICE_SUPPORT)
+ unset(DEAL_II_MPI_WITH_DEVICE_SUPPORT CACHE)
endif()
DEAL_II_LAPACK_WITH_MKL=1 \
DEAL_II_WITH_METIS=1 \
DEAL_II_WITH_MPI=1 \
- DEAL_II_MPI_WITH_CUDA_SUPPORT=1 \
+ DEAL_II_MPI_WITH_DEVICE_SUPPORT=1 \
DEAL_II_MPI_VERSION_MAJOR=3 \
DEAL_II_MPI_VERSION_MINOR=0 \
DEAL_II_WITH_MUPARSER=1 \
-DDEAL_II_WITH_CUDA=ON
-DDEAL_II_WITH_MPI=ON
- -DDEAL_II_MPI_WITH_CUDA_SUPPORT=ON
+ -DDEAL_II_MPI_WITH_DEVICE_SUPPORT=ON
</pre>
Note, that there is no check that detects if the MPI implementation
really is CUDA-aware. Activating this flag for incompatible MPI libraries
// memory space to use. There is also LinearAlgebra::CUDAWrappers::Vector
// that always uses GPU memory storage but doesn't work with MPI. It might
// be worth noticing that the communication between different MPI processes
- // can be improved if the MPI implementation is CUDA-aware and the configure
- // flag `DEAL_II_MPI_WITH_CUDA_SUPPORT` is enabled. (The value of this
+ // can be improved if the MPI implementation is GPU-aware and the configure
+ // flag `DEAL_II_MPI_WITH_DEVICE_SUPPORT` is enabled. (The value of this
// flag needs to be set at the time you call `cmake` when installing
// deal.II.)
//
# define DEAL_II_MPI_VERSION_GTE(major,minor) false
#endif
+#cmakedefine DEAL_II_MPI_WITH_DEVICE_SUPPORT
+#ifdef DEAL_II_MPI_WITH_DEVICE_SUPPORT
#cmakedefine DEAL_II_MPI_WITH_CUDA_SUPPORT
+#endif
/***********************************************************************
* Two macro names that we put at the top and bottom of all deal.II files
private:
/**
* Initialize import_indices_plain_dev from import_indices_data. This
- * function is only used when using CUDA-aware MPI.
+ * function is only used when using device-aware MPI.
*/
void
initialize_import_indices_plain_dev() const;
/**
* The set of (local) indices that we are importing during compress(),
* i.e., others' ghosts that belong to the local range. The data stored is
- * the same than in import_indices_data but the data is expanded in plain
- * arrays. This variable is only used when using CUDA-aware MPI.
+ * the same as in import_indices_data but the data is expanded in plain
+ * arrays. This variable is only used when using device-aware MPI.
*/
// The variable is mutable to enable lazy initialization in
- // export_to_ghosted_array_start(). This way partitioner does not have to
- // be templated on the MemorySpaceType.
+ // export_to_ghosted_array_start().
mutable std::vector<
- std::pair<std::unique_ptr<unsigned int[], void (*)(unsigned int *)>,
- unsigned int>>
+ Kokkos::View<unsigned int *, MemorySpace::Default::kokkos_space>>
import_indices_plain_dev;
/**
}
Number *temp_array_ptr = temporary_storage.data();
-# if defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
- // When using CUDAs-aware MPI, the set of local indices that are ghosts
+# if defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT)
+ // When using device-aware MPI, the set of local indices that are ghosts
// indices on other processors is expanded in arrays. This is for
// performance reasons as this can significantly decrease the number of
// kernel launched. The indices are expanded the first time the function
// is called.
- if ((std::is_same<MemorySpaceType, MemorySpace::CUDA>::value) &&
+ if ((std::is_same<MemorySpaceType, MemorySpace::Default>::value) &&
(import_indices_plain_dev.size() == 0))
initialize_import_indices_plain_dev();
# endif
for (unsigned int i = 0; i < n_import_targets; ++i)
{
-# if defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
- if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
+# if defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT)
+# ifdef DEAL_II_HAVE_CXX17
+ if constexpr (std::is_same<MemorySpaceType,
+ MemorySpace::Default>::value)
+# else
+ if (std::is_same<MemorySpaceType, MemorySpace::Default>::value)
+# endif
{
- const auto chunk_size = import_indices_plain_dev[i].second;
- const int n_blocks =
- 1 + chunk_size / (::dealii::CUDAWrappers::chunk_size *
- ::dealii::CUDAWrappers::block_size);
- ::dealii::LinearAlgebra::CUDAWrappers::kernel::
- gather<<<n_blocks, ::dealii::CUDAWrappers::block_size>>>(
- temp_array_ptr,
- import_indices_plain_dev[i].first.get(),
- locally_owned_array.data(),
- chunk_size);
- cudaDeviceSynchronize();
+ const auto chunk_size = import_indices_plain_dev[i].size();
+ using IndexType = decltype(chunk_size);
+
+ auto import_indices = import_indices_plain_dev[i];
+ auto locally_owned_array_data = locally_owned_array.data();
+ MemorySpace::Default::kokkos_space::execution_space exec;
+ Kokkos::parallel_for(
+ "fill temp_array_ptr",
+ Kokkos::RangePolicy<
+ MemorySpace::Default::kokkos_space::execution_space>(
+ exec, 0, chunk_size),
+ KOKKOS_LAMBDA(IndexType idx) {
+ temp_array_ptr[idx] =
+ locally_owned_array_data[import_indices[idx]];
+ });
+ exec.fence();
}
else
# endif
{
const unsigned int chunk_size =
ghost_range.second - ghost_range.first;
+# ifdef DEAL_II_HAVE_CXX17
+ if constexpr (std::is_same<MemorySpaceType,
+ MemorySpace::Host>::value)
+# else
if (std::is_same<MemorySpaceType, MemorySpace::Host>::value)
+# endif
{
- std::copy(ghost_array.data() + offset,
- ghost_array.data() + offset + chunk_size,
- ghost_array.data() + ghost_range.first);
+ // If source and destination are overlapping, we must be
+ // careful to use an appropriate copy function.
+ if (ghost_range.first > offset)
+ std::copy_backward(ghost_array.data() + offset,
+ ghost_array.data() + offset +
+ chunk_size,
+ ghost_array.data() +
+ ghost_range.first);
+ else
+ std::copy(ghost_array.data() + offset,
+ ghost_array.data() + offset + chunk_size,
+ ghost_array.data() + ghost_range.first);
std::fill(ghost_array.data() +
std::max(ghost_range.second, offset),
ghost_array.data() + offset + chunk_size,
}
else
{
-# ifdef DEAL_II_WITH_CUDA
- cudaError_t cuda_error =
- cudaMemcpy(ghost_array.data() + ghost_range.first,
- ghost_array.data() + offset,
- chunk_size * sizeof(Number),
- cudaMemcpyDeviceToDevice);
- AssertCuda(cuda_error);
- cuda_error =
- cudaMemset(ghost_array.data() +
- std::max(ghost_range.second, offset),
- 0,
- (offset + chunk_size -
- std::max(ghost_range.second, offset)) *
- sizeof(Number));
- AssertCuda(cuda_error);
-# else
- Assert(
- false,
- ExcMessage(
- "If the compiler doesn't understand CUDA code, only MemorySpace::Host is allowed!"));
-# endif
+ Kokkos::View<const Number *,
+ MemorySpace::Default::kokkos_space>
+ ghost_src_view(ghost_array.data() + offset, chunk_size);
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>
+ ghost_dst_view(ghost_array.data() + ghost_range.first,
+ chunk_size);
+
+ // If source and destination are overlapping, we can't
+ // just call deep_copy but must copy the data to a buffer
+ // first.
+ if ((offset < ghost_range.first &&
+ ghost_range.first < offset + chunk_size) ||
+ (ghost_range.first < offset &&
+ offset < ghost_range.first + chunk_size))
+ {
+ Kokkos::View<Number *,
+ MemorySpace::Default::kokkos_space>
+ copy(Kokkos::view_alloc(
+ "copy", Kokkos::WithoutInitializing),
+ chunk_size);
+ Kokkos::deep_copy(copy, ghost_src_view);
+ Kokkos::deep_copy(ghost_dst_view, copy);
+ }
+ else
+ {
+ Kokkos::deep_copy(ghost_dst_view, ghost_src_view);
+ }
+ Kokkos::deep_copy(
+ Kokkos::View<Number *,
+ MemorySpace::Default::kokkos_space>(
+ ghost_array.data() +
+ std::max(ghost_range.second, offset),
+ (offset + chunk_size -
+ std::max(ghost_range.second, offset))),
+ 0);
}
offset += chunk_size;
}
if (ghost_array_ptr + offset !=
ghost_array.data() + my_ghosts->first)
{
+# ifdef DEAL_II_HAVE_CXX17
+ if constexpr (std::is_same<MemorySpaceType,
+ MemorySpace::Host>::value)
+# else
if (std::is_same<MemorySpaceType,
MemorySpace::Host>::value)
+# endif
{
- std::copy(ghost_array.data() + my_ghosts->first,
- ghost_array.data() + my_ghosts->second,
- ghost_array_ptr + offset);
+ if (offset > my_ghosts->first)
+ std::copy_backward(ghost_array.data() +
+ my_ghosts->first,
+ ghost_array_ptr +
+ my_ghosts->second,
+ ghost_array.data() + offset);
+ else
+ std::copy(ghost_array.data() + my_ghosts->first,
+ ghost_array.data() + my_ghosts->second,
+ ghost_array_ptr + offset);
std::fill(
std::max(ghost_array.data() + my_ghosts->first,
ghost_array_ptr + offset + chunk_size),
}
else
{
-# ifdef DEAL_II_WITH_CUDA
- cudaError_t cuda_error =
- cudaMemcpy(ghost_array_ptr + offset,
- ghost_array.data() + my_ghosts->first,
- chunk_size * sizeof(Number),
- cudaMemcpyDeviceToDevice);
- AssertCuda(cuda_error);
- cuda_error = cudaMemset(
- std::max(ghost_array.data() + my_ghosts->first,
- ghost_array_ptr + offset + chunk_size),
- 0,
- (ghost_array.data() + my_ghosts->second -
- std::max(ghost_array.data() + my_ghosts->first,
- ghost_array_ptr + offset + chunk_size)) *
- sizeof(Number));
- AssertCuda(cuda_error);
-# else
- Assert(
- false,
- ExcMessage(
- "If the compiler doesn't understand CUDA code, only MemorySpace::Host is allowed!"));
-# endif
+ Kokkos::View<Number *,
+ MemorySpace::Default::kokkos_space>
+ copy("copy", chunk_size);
+ Kokkos::deep_copy(
+ copy,
+ Kokkos::View<Number *,
+ MemorySpace::Default::kokkos_space>(
+ ghost_array.data() + my_ghosts->first,
+ chunk_size));
+ Kokkos::deep_copy(
+ Kokkos::View<Number *,
+ MemorySpace::Default::kokkos_space>(
+ ghost_array_ptr + offset, chunk_size),
+ copy);
+ Kokkos::deep_copy(
+ Kokkos::View<Number *,
+ MemorySpace::Default::kokkos_space>(
+ std::max(ghost_array.data() + my_ghosts->first,
+ ghost_array_ptr + offset + chunk_size),
+ (ghost_array.data() + my_ghosts->second -
+ std::max(ghost_array.data() + my_ghosts->first,
+ ghost_array_ptr + offset +
+ chunk_size))),
+ 0);
}
}
offset += chunk_size;
ExcMessage("Index overflow: Maximum message size in MPI is 2GB. "
"The number of ghost entries times the size of 'Number' "
"exceeds this value. This is not supported."));
-# if defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
- if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
- cudaDeviceSynchronize();
-# endif
+ if (std::is_same<MemorySpaceType, MemorySpace::Default>::value)
+ Kokkos::fence();
const int ierr =
MPI_Isend(ghost_array_ptr,
ghost_targets_data[i].second * sizeof(Number),
// straight forward for complex numbers. Therefore, comparison of complex
// numbers is prohibited and throws an exception.
template <typename Number>
- Number
+ DEAL_II_HOST_DEVICE Number
get_min(const Number a, const Number b)
{
return std::min(a, b);
}
template <typename Number>
- Number
+ DEAL_II_HOST_DEVICE Number
get_max(const Number a, const Number b)
{
return std::max(a, b);
"import_from_ghosted_array_start as is passed "
"to import_from_ghosted_array_finish."));
-# ifdef DEAL_II_WITH_CUDA
- if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
- {
- cudaMemset(ghost_array.data(),
- 0,
- sizeof(Number) * ghost_array.size());
- }
- else
-# endif
- {
-# ifdef DEAL_II_HAVE_CXX17
- if constexpr (std::is_trivial<Number>::value)
-# else
- if (std::is_trivial<Number>::value)
-# endif
- std::memset(ghost_array.data(),
- 0,
- sizeof(Number) * ghost_array.size());
- else
- std::fill(ghost_array.data(),
- ghost_array.data() + ghost_array.size(),
- 0);
- }
+ Kokkos::deep_copy(
+ Kokkos::View<Number *, typename MemorySpaceType::kokkos_space>(
+ ghost_array.data(), ghost_array.size()),
+ 0);
return;
}
# endif
const unsigned int n_import_targets = import_targets_data.size();
const unsigned int n_ghost_targets = ghost_targets_data.size();
-# if (defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
- // When using CUDAs-aware MPI, the set of local indices that are ghosts
+# if defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT)
+ // When using device-aware MPI, the set of local indices that are ghosts
// indices on other processors is expanded in arrays. This is for
// performance reasons as this can significantly decrease the number of
// kernel launched. The indices are expanded the first time the function
// is called.
- if ((std::is_same<MemorySpaceType, MemorySpace::CUDA>::value) &&
+ if ((std::is_same<MemorySpaceType, MemorySpace::Default>::value) &&
(import_indices_plain_dev.size() == 0))
initialize_import_indices_plain_dev();
# endif
AssertThrowMPI(ierr);
const Number *read_position = temporary_storage.data();
-# if !(defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
- // If the operation is no insertion, add the imported data to the
- // local values. For insert, nothing is done here (but in debug mode
- // we assert that the specified value is either zero or matches with
- // the ones already present
- if (vector_operation == dealii::VectorOperation::add)
- for (const auto &import_range : import_indices_data)
- for (unsigned int j = import_range.first; j < import_range.second;
- j++)
- locally_owned_array[j] += *read_position++;
- else if (vector_operation == dealii::VectorOperation::min)
- for (const auto &import_range : import_indices_data)
- for (unsigned int j = import_range.first; j < import_range.second;
- j++)
- {
- locally_owned_array[j] =
- internal::get_min(*read_position, locally_owned_array[j]);
- read_position++;
- }
- else if (vector_operation == dealii::VectorOperation::max)
- for (const auto &import_range : import_indices_data)
- for (unsigned int j = import_range.first; j < import_range.second;
- j++)
+# if defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT)
+# ifdef DEAL_II_HAVE_CXX17
+ if constexpr (std::is_same<MemorySpaceType,
+ MemorySpace::Default>::value)
+# else
+ if (std::is_same<MemorySpaceType, MemorySpace::Default>::value)
+# endif
+ {
+ if (vector_operation == dealii::VectorOperation::add)
{
- locally_owned_array[j] =
- internal::get_max(*read_position, locally_owned_array[j]);
- read_position++;
+ for (auto const &import_indices_plain :
+ import_indices_plain_dev)
+ {
+ const auto chunk_size = import_indices_plain.size();
+
+ using IndexType = decltype(chunk_size);
+ MemorySpace::Default::kokkos_space::execution_space exec;
+ Kokkos::parallel_for(
+ "fill locally_owned_array, add",
+ Kokkos::RangePolicy<
+ MemorySpace::Default::kokkos_space::execution_space>(
+ exec, 0, chunk_size),
+ KOKKOS_LAMBDA(IndexType idx) {
+ locally_owned_array
+ .data()[import_indices_plain(idx)] +=
+ read_position[idx];
+ });
+ exec.fence();
+
+ read_position += chunk_size;
+ }
}
- else
- for (const auto &import_range : import_indices_data)
- for (unsigned int j = import_range.first; j < import_range.second;
- j++, read_position++)
- // Below we use relatively large precision in units in the last
- // place (ULP) as this Assert can be easily triggered in
- // p::d::SolutionTransfer. The rationale is that during
- // interpolation on two elements sharing the face, values on
- // this face obtained from each side might be different due to
- // additions being done in different order. If the local
- // value is zero, it indicates that the local process has not
- // set the value during the cell loop and its value can be
- // safely overridden.
- Assert(*read_position == Number() ||
- internal::get_abs(locally_owned_array[j] -
- *read_position) <=
- internal::get_abs(locally_owned_array[j] +
- *read_position) *
- 100000. *
- std::numeric_limits<typename numbers::NumberTraits<
- Number>::real_type>::epsilon(),
- typename dealii::LinearAlgebra::distributed::Vector<
- Number>::ExcNonMatchingElements(*read_position,
- locally_owned_array[j],
- my_pid));
-# else
- if (vector_operation == dealii::VectorOperation::add)
- {
- for (auto const &import_indices_plain : import_indices_plain_dev)
+ else if (vector_operation == dealii::VectorOperation::min)
{
- const auto chunk_size = import_indices_plain.second;
- const int n_blocks =
- 1 + chunk_size / (::dealii::CUDAWrappers::chunk_size *
- ::dealii::CUDAWrappers::block_size);
- dealii::LinearAlgebra::CUDAWrappers::kernel::
- masked_vector_bin_op<Number,
- dealii::LinearAlgebra::CUDAWrappers::
- kernel::Binop_Addition>
- <<<n_blocks, dealii::CUDAWrappers::block_size>>>(
- import_indices_plain.first.get(),
- locally_owned_array.data(),
- read_position,
- chunk_size);
- read_position += chunk_size;
+ for (auto const &import_indices_plain :
+ import_indices_plain_dev)
+ {
+ const auto chunk_size = import_indices_plain.size();
+
+ using IndexType = decltype(chunk_size);
+ MemorySpace::Default::kokkos_space::execution_space exec;
+ Kokkos::parallel_for(
+ "fill locally_owned_array, min",
+ Kokkos::RangePolicy<
+ MemorySpace::Default::kokkos_space::execution_space>(
+ exec, 0, chunk_size),
+ KOKKOS_LAMBDA(IndexType idx) {
+ locally_owned_array
+ .data()[import_indices_plain(idx)] =
+ internal::get_min(
+ locally_owned_array
+ .data()[import_indices_plain(idx)],
+ read_position[idx]);
+ });
+ exec.fence();
+
+ read_position += chunk_size;
+ }
}
- }
- else if (vector_operation == dealii::VectorOperation::min)
- {
- for (auto const &import_indices_plain : import_indices_plain_dev)
+ else if (vector_operation == dealii::VectorOperation::max)
{
- const auto chunk_size = import_indices_plain.second;
- const int n_blocks =
- 1 + chunk_size / (::dealii::CUDAWrappers::chunk_size *
- ::dealii::CUDAWrappers::block_size);
- dealii::LinearAlgebra::CUDAWrappers::kernel::
- masked_vector_bin_op<
- Number,
- dealii::LinearAlgebra::CUDAWrappers::kernel::Binop_Min>
- <<<n_blocks, dealii::CUDAWrappers::block_size>>>(
- import_indices_plain.first.get(),
- locally_owned_array.data(),
- read_position,
- chunk_size);
- read_position += chunk_size;
+ for (auto const &import_indices_plain :
+ import_indices_plain_dev)
+ {
+ const auto chunk_size = import_indices_plain.size();
+
+ using IndexType = decltype(chunk_size);
+ MemorySpace::Default::kokkos_space::execution_space exec;
+ Kokkos::parallel_for(
+ "fill locally_owned_array, max",
+ Kokkos::RangePolicy<
+ MemorySpace::Default::kokkos_space::execution_space>(
+ exec, 0, chunk_size),
+ KOKKOS_LAMBDA(IndexType idx) {
+ locally_owned_array
+ .data()[import_indices_plain(idx)] =
+ internal::get_max(
+ locally_owned_array
+ .data()[import_indices_plain(idx)],
+ read_position[idx]);
+ });
+ exec.fence();
+
+ read_position += chunk_size;
+ }
}
- }
- else if (vector_operation == dealii::VectorOperation::max)
- {
- for (auto const &import_indices_plain : import_indices_plain_dev)
+ else
{
- const auto chunk_size = import_indices_plain.second;
- const int n_blocks =
- 1 + chunk_size / (::dealii::CUDAWrappers::chunk_size *
- ::dealii::CUDAWrappers::block_size);
- dealii::LinearAlgebra::CUDAWrappers::kernel::
- masked_vector_bin_op<
- Number,
- dealii::LinearAlgebra::CUDAWrappers::kernel::Binop_Max>
- <<<n_blocks, dealii::CUDAWrappers::block_size>>>(
- import_indices_plain.first.get(),
- locally_owned_array.data(),
- read_position,
- chunk_size);
- read_position += chunk_size;
+ for (auto const &import_indices_plain :
+ import_indices_plain_dev)
+ {
+ // We can't easily assert here, so we just move the
+ // pointer matching the host code.
+ const auto chunk_size = import_indices_plain.size();
+ read_position += chunk_size;
+ }
}
}
else
+# endif
{
- for (auto const &import_indices_plain : import_indices_plain_dev)
- {
- // We can't easily assert here, so we just move the pointer
- // matching the host code.
- const auto chunk_size = import_indices_plain.second;
- read_position += chunk_size;
- }
+ // If the operation is no insertion, add the imported data to the
+ // local values. For insert, nothing is done here (but in debug
+ // mode we assert that the specified value is either zero or
+ // matches with the ones already present
+ if (vector_operation == dealii::VectorOperation::add)
+ for (const auto &import_range : import_indices_data)
+ for (unsigned int j = import_range.first;
+ j < import_range.second;
+ j++)
+ locally_owned_array[j] += *read_position++;
+ else if (vector_operation == dealii::VectorOperation::min)
+ for (const auto &import_range : import_indices_data)
+ for (unsigned int j = import_range.first;
+ j < import_range.second;
+ j++)
+ {
+ locally_owned_array[j] =
+ internal::get_min(*read_position,
+ locally_owned_array[j]);
+ read_position++;
+ }
+ else if (vector_operation == dealii::VectorOperation::max)
+ for (const auto &import_range : import_indices_data)
+ for (unsigned int j = import_range.first;
+ j < import_range.second;
+ j++)
+ {
+ locally_owned_array[j] =
+ internal::get_max(*read_position,
+ locally_owned_array[j]);
+ read_position++;
+ }
+ else
+ for (const auto &import_range : import_indices_data)
+ for (unsigned int j = import_range.first;
+ j < import_range.second;
+ j++, read_position++)
+ // Below we use relatively large precision in units in the
+ // last place (ULP) as this Assert can be easily triggered
+ // in p::d::SolutionTransfer. The rationale is that during
+ // interpolation on two elements sharing the face, values on
+ // this face obtained from each side might be different due
+ // to additions being done in different order. If the local
+ // value is zero, it indicates that the local process has
+ // not set the value during the cell loop and its value can
+ // be safely overridden.
+ Assert(
+ *read_position == Number() ||
+ internal::get_abs(locally_owned_array[j] -
+ *read_position) <=
+ internal::get_abs(locally_owned_array[j] +
+ *read_position) *
+ 100000. *
+ std::numeric_limits<typename numbers::NumberTraits<
+ Number>::real_type>::epsilon(),
+ typename dealii::LinearAlgebra::distributed::Vector<
+ Number>::ExcNonMatchingElements(*read_position,
+ locally_owned_array[j],
+ my_pid));
}
-# endif
+
AssertDimension(read_position - temporary_storage.data(),
n_import_indices());
}
{
Assert(ghost_array.begin() != nullptr, ExcInternalError());
-# if defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
- if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
+# if defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT)
+# ifdef DEAL_II_HAVE_CXX17
+ if constexpr (std::is_same<MemorySpaceType,
+ MemorySpace::Default>::value)
+# else
+ if (std::is_same<MemorySpaceType, MemorySpace::Default>::value)
+# endif
{
- Assert(std::is_trivial<Number>::value, ExcNotImplemented());
- cudaMemset(ghost_array.data(),
- 0,
- sizeof(Number) * n_ghost_indices());
+ Kokkos::deep_copy(
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space>(
+ ghost_array.data(), n_ghost_indices()),
+ 0);
}
else
# endif
// allocate import_data in case it is not set up yet
if (partitioner->n_import_indices() > 0)
{
-# if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+# if !defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT)
if (std::is_same<MemorySpaceType,
dealii::MemorySpace::Default>::value)
{
}
}
-# if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+# if !defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT)
if (std::is_same<MemorySpaceType, dealii::MemorySpace::Default>::value)
{
// Move the data to the host and then move it back to the
// make this function thread safe
std::lock_guard<std::mutex> lock(mutex);
-# if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+# if !defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT)
if (std::is_same<MemorySpaceType, MemorySpace::Default>::value)
{
Assert(partitioner->n_import_indices() == 0 ||
// allocate import_data in case it is not set up yet
if (partitioner->n_import_indices() > 0)
{
-# if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+# if !defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT)
if (std::is_same<MemorySpaceType, MemorySpace::Default>::value)
{
if (import_data.values_host_buffer.size() == 0)
}
}
-# if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+# if !defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT)
if (std::is_same<MemorySpaceType, MemorySpace::Default>::value)
{
// Move the data to the host and then move it back to the
// make this function thread safe
std::lock_guard<std::mutex> lock(mutex);
-# if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+# if !defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT)
if (std::is_same<MemorySpaceType, MemorySpace::Default>::value)
{
partitioner->export_to_ghosted_array_finish(
, use_coloring(use_coloring)
, overlap_communication_computation(overlap_communication_computation)
{
-# ifndef DEAL_II_MPI_WITH_CUDA_SUPPORT
+# ifndef DEAL_II_MPI_WITH_DEVICE_SUPPORT
AssertThrow(
overlap_communication_computation == false,
ExcMessage(
set(_separate_src
${_separate_src}
cuda.cc
- partitioner_cuda.cc
)
endif()
mpi_noncontiguous_partitioner.inst.in
mpi_remote_point_evaluation.inst.in
partitioner.inst.in
- partitioner_cuda.inst.in
polynomials_rannacher_turek.inst.in
symmetric_tensor.inst.in
symbolic_function.inst.in
return memory;
}
+
+
+ void
+ Partitioner::initialize_import_indices_plain_dev() const
+ {
+ const unsigned int n_import_targets = import_targets_data.size();
+ import_indices_plain_dev.reserve(n_import_targets);
+ for (unsigned int i = 0; i < n_import_targets; ++i)
+ {
+ // Expand the indices on the host
+ std::vector<std::pair<unsigned int, unsigned int>>::const_iterator
+ my_imports = import_indices_data.begin() +
+ import_indices_chunks_by_rank_data[i],
+ end_my_imports = import_indices_data.begin() +
+ import_indices_chunks_by_rank_data[i + 1];
+ std::vector<unsigned int> import_indices_plain_host;
+ for (; my_imports != end_my_imports; ++my_imports)
+ {
+ const unsigned int chunk_size =
+ my_imports->second - my_imports->first;
+ for (unsigned int j = 0; j < chunk_size; ++j)
+ import_indices_plain_host.push_back(my_imports->first + j);
+ }
+
+ // Move the indices to the device
+ const auto chunk_size = import_indices_plain_host.size();
+ import_indices_plain_dev.emplace_back("import_indices_plain_dev" +
+ std::to_string(i),
+ chunk_size);
+ Kokkos::deep_copy(import_indices_plain_dev.back(),
+ Kokkos::View<unsigned int *, Kokkos::HostSpace>(
+ import_indices_plain_host.data(), chunk_size));
+ }
+ }
+
} // namespace MPI
} // end of namespace Utilities
std::vector<MPI_Request> &) const;
#endif
}
+
+for (SCALAR : MPI_DEVICE_SCALARS)
+ {
+#ifdef DEAL_II_WITH_MPI
+ template void Utilities::MPI::Partitioner::
+ export_to_ghosted_array_start<SCALAR, dealii::MemorySpace::Default>(
+ const unsigned int,
+ const ArrayView<const SCALAR, MemorySpace::Default> &,
+ const ArrayView<SCALAR, MemorySpace::Default> &,
+ const ArrayView<SCALAR, MemorySpace::Default> &,
+ std::vector<MPI_Request> &) const;
+
+ template void Utilities::MPI::Partitioner::export_to_ghosted_array_finish<
+ SCALAR,
+ MemorySpace::Default>(const ArrayView<SCALAR, MemorySpace::Default> &,
+ std::vector<MPI_Request> &) const;
+
+ template void Utilities::MPI::Partitioner::import_from_ghosted_array_start<
+ SCALAR,
+ MemorySpace::Default>(const VectorOperation::values,
+ const unsigned int,
+ const ArrayView<SCALAR, MemorySpace::Default> &,
+ const ArrayView<SCALAR, MemorySpace::Default> &,
+ std::vector<MPI_Request> &) const;
+
+ template void Utilities::MPI::Partitioner::
+ import_from_ghosted_array_finish<SCALAR, MemorySpace::Default>(
+ const VectorOperation::values,
+ const ArrayView<const SCALAR, MemorySpace::Default> &,
+ const ArrayView<SCALAR, MemorySpace::Default> &,
+ const ArrayView<SCALAR, MemorySpace::Default> &,
+ std::vector<MPI_Request> &) const;
+#endif
+ }
+++ /dev/null
-// ---------------------------------------------------------------------
-//
-// Copyright (C) 1999 - 2021 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// The deal.II library is free software; you can use it, redistribute
-// it, and/or modify it under the terms of the GNU Lesser General
-// Public License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-// The full text of the license can be found in the file LICENSE.md at
-// the top level directory of deal.II.
-//
-// ---------------------------------------------------------------------
-
-#include <deal.II/base/partitioner.h>
-#include <deal.II/base/partitioner.templates.h>
-
-DEAL_II_NAMESPACE_OPEN
-
-
-
-namespace Utilities
-{
- namespace MPI
- {
- void
- Partitioner::initialize_import_indices_plain_dev() const
- {
- const unsigned int n_import_targets = import_targets_data.size();
- import_indices_plain_dev.reserve(n_import_targets);
- for (unsigned int i = 0; i < n_import_targets; ++i)
- {
- // Expand the indices on the host
- std::vector<std::pair<unsigned int, unsigned int>>::const_iterator
- my_imports = import_indices_data.begin() +
- import_indices_chunks_by_rank_data[i],
- end_my_imports = import_indices_data.begin() +
- import_indices_chunks_by_rank_data[i + 1];
- std::vector<unsigned int> import_indices_plain_host;
- for (; my_imports != end_my_imports; ++my_imports)
- {
- const unsigned int chunk_size =
- my_imports->second - my_imports->first;
- for (unsigned int j = 0; j < chunk_size; ++j)
- import_indices_plain_host.push_back(my_imports->first + j);
- }
-
- // Move the indices to the device
- import_indices_plain_dev.emplace_back(std::make_pair(
- std::unique_ptr<unsigned int[], void (*)(unsigned int *)>(
- nullptr, Utilities::CUDA::delete_device_data<unsigned int>),
- import_indices_plain_host.size()));
-
- import_indices_plain_dev[i].first.reset(
- Utilities::CUDA::allocate_device_data<unsigned int>(
- import_indices_plain_dev[i].second));
- Utilities::CUDA::copy_to_dev(import_indices_plain_host,
- import_indices_plain_dev[i].first.get());
- }
- }
- } // namespace MPI
-} // namespace Utilities
-
-// explicit instantiations from .templates.h file
-#include "partitioner_cuda.inst"
-
-DEAL_II_NAMESPACE_CLOSE
+++ /dev/null
-// ---------------------------------------------------------------------
-//
-// Copyright (C) 2017 - 2019 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// The deal.II library is free software; you can use it, redistribute
-// it, and/or modify it under the terms of the GNU Lesser General
-// Public License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-// The full text of the license can be found in the file LICENSE.md at
-// the top level directory of deal.II.
-//
-// ---------------------------------------------------------------------
-
-
-
-for (SCALAR : MPI_SCALARS)
- {
-#ifdef DEAL_II_WITH_MPI
- template void Utilities::MPI::Partitioner::
- export_to_ghosted_array_start<SCALAR, dealii::MemorySpace::CUDA>(
- const unsigned int,
- const ArrayView<const SCALAR, MemorySpace::CUDA> &,
- const ArrayView<SCALAR, MemorySpace::CUDA> &,
- const ArrayView<SCALAR, MemorySpace::CUDA> &,
- std::vector<MPI_Request> &) const;
-
- template void Utilities::MPI::Partitioner::export_to_ghosted_array_finish<
- SCALAR,
- MemorySpace::CUDA>(const ArrayView<SCALAR, MemorySpace::CUDA> &,
- std::vector<MPI_Request> &) const;
-
- template void Utilities::MPI::Partitioner::import_from_ghosted_array_start<
- SCALAR,
- MemorySpace::CUDA>(const VectorOperation::values,
- const unsigned int,
- const ArrayView<SCALAR, MemorySpace::CUDA> &,
- const ArrayView<SCALAR, MemorySpace::CUDA> &,
- std::vector<MPI_Request> &) const;
-
- template void Utilities::MPI::Partitioner::import_from_ghosted_array_finish<
- SCALAR,
- MemorySpace::CUDA>(const VectorOperation::values,
- const ArrayView<const SCALAR, MemorySpace::CUDA> &,
- const ArrayView<SCALAR, MemorySpace::CUDA> &,
- const ArrayView<SCALAR, MemorySpace::CUDA> &,
- std::vector<MPI_Request> &) const;
-#endif
- }
template <typename Number>
void
-print_cuda_view(const ArrayView<Number, MemorySpace::CUDA> cuda_view)
+print_device_view(
+ const Kokkos::View<Number *, MemorySpace::Default::kokkos_space> device_view)
{
- std::vector<Number> cpu_values(cuda_view.size());
- Utilities::CUDA::copy_to_host(cuda_view.data(), cpu_values);
+ std::vector<Number> cpu_values(device_view.size());
+ Kokkos::deep_copy(Kokkos::View<Number *, Kokkos::HostSpace>(
+ cpu_values.data(), cpu_values.size()),
+ device_view);
for (Number value : cpu_values)
deallog << value << " ";
deallog << std::endl;
std::vector<unsigned int> cpu_locally_owned_data(local_size);
for (unsigned int i = 0; i < local_size; ++i)
cpu_locally_owned_data[i] = my_start + i;
- std::unique_ptr<unsigned int[], void (*)(unsigned int *)> locally_owned_data(
- nullptr, Utilities::CUDA::delete_device_data<unsigned int>);
- locally_owned_data.reset(
- Utilities::CUDA::allocate_device_data<unsigned int>(local_size));
- ArrayView<unsigned int, MemorySpace::CUDA> locally_owned_data_view(
- locally_owned_data.get(), local_size);
- Utilities::CUDA::copy_to_dev(cpu_locally_owned_data,
- locally_owned_data.get());
+ Kokkos::View<unsigned *, MemorySpace::Default::kokkos_space>
+ locally_owned_data("locally_owned_data", local_size);
+ ArrayView<unsigned int, MemorySpace::Default> locally_owned_data_view(
+ locally_owned_data.data(), local_size);
+ Kokkos::deep_copy(
+ locally_owned_data,
+ Kokkos::View<unsigned *, Kokkos::HostSpace>(cpu_locally_owned_data.data(),
+ cpu_locally_owned_data.size()));
// set up a ghost array
- std::unique_ptr<unsigned int[], void (*)(unsigned int *)> ghosts(
- nullptr, Utilities::CUDA::delete_device_data<unsigned int>);
- ghosts.reset(
- Utilities::CUDA::allocate_device_data<unsigned int>(v.n_ghost_indices()));
- ArrayView<unsigned int, MemorySpace::CUDA> ghosts_view(ghosts.get(),
- v.n_ghost_indices());
-
- std::unique_ptr<unsigned int[], void (*)(unsigned int *)> temp_array(
- nullptr, Utilities::CUDA::delete_device_data<unsigned int>);
- temp_array.reset(
- Utilities::CUDA::allocate_device_data<unsigned int>(v.n_import_indices()));
- ArrayView<unsigned int, MemorySpace::CUDA> temp_array_view(
- temp_array.get(), v.n_import_indices());
+ Kokkos::View<unsigned *, MemorySpace::Default::kokkos_space> ghosts(
+ "ghosts", v.n_ghost_indices());
+ ArrayView<unsigned int, MemorySpace::Default> ghosts_view(ghosts.data(),
+ ghosts.size());
+ Kokkos::View<unsigned *, MemorySpace::Default::kokkos_space> temp_array(
+ "temp_array", v.n_import_indices());
+ ArrayView<unsigned int, MemorySpace::Default> temp_array_view(
+ temp_array.data(), temp_array.size());
std::vector<MPI_Request> requests;
// send the full array
- v.export_to_ghosted_array_start<unsigned int, MemorySpace::CUDA>(
+ v.export_to_ghosted_array_start<unsigned int, MemorySpace::Default>(
3, locally_owned_data_view, temp_array_view, ghosts_view, requests);
- v.export_to_ghosted_array_finish<unsigned int, MemorySpace::CUDA>(ghosts_view,
- requests);
+ v.export_to_ghosted_array_finish<unsigned int, MemorySpace::Default>(
+ ghosts_view, requests);
deallog << "All ghosts: ";
- print_cuda_view(ghosts_view);
+ print_device_view(ghosts);
// send only the array in w
- cudaError_t cuda_error =
- cudaMemset(ghosts_view.data(),
- 0,
- ghosts_view.size() * sizeof(unsigned int));
- AssertCuda(cuda_error);
+ Kokkos::deep_copy(ghosts, 0);
Assert(temp_array_view.size() >= w.n_import_indices(), ExcInternalError());
- ArrayView<unsigned int, MemorySpace::CUDA> temp_array_view_w(
+ ArrayView<unsigned int, MemorySpace::Default> temp_array_view_w(
temp_array_view.data(), w.n_import_indices());
- w.export_to_ghosted_array_start<unsigned int, MemorySpace::CUDA>(
+ w.export_to_ghosted_array_start<unsigned int, MemorySpace::Default>(
3, locally_owned_data_view, temp_array_view_w, ghosts_view, requests);
// start a second send operation for the x partitioner in parallel to make
// sure communication does not get messed up
- std::unique_ptr<unsigned int[], void (*)(unsigned int *)> temp_array2(
- nullptr, Utilities::CUDA::delete_device_data<unsigned int>);
- temp_array2.reset(
- Utilities::CUDA::allocate_device_data<unsigned int>(x.n_import_indices()));
- ArrayView<unsigned int, MemorySpace::CUDA> temp_array2_view(
- temp_array2.get(), x.n_import_indices());
-
- std::unique_ptr<unsigned int[], void (*)(unsigned int *)> ghosts2(
- nullptr, Utilities::CUDA::delete_device_data<unsigned int>);
- ghosts2.reset(
- Utilities::CUDA::allocate_device_data<unsigned int>(x.n_ghost_indices()));
- ArrayView<unsigned int, MemorySpace::CUDA> ghosts2_view(ghosts2.get(),
- x.n_ghost_indices());
+ Kokkos::View<unsigned *, MemorySpace::Default::kokkos_space> temp_array2(
+ "temp_array2", x.n_import_indices());
+ ArrayView<unsigned int, MemorySpace::Default> temp_array2_view(
+ temp_array2.data(), temp_array2.size());
+
+ Kokkos::View<unsigned *, MemorySpace::Default::kokkos_space> ghosts2(
+ "ghosts2", x.n_ghost_indices());
+ ArrayView<unsigned int, MemorySpace::Default> ghosts2_view(ghosts2.data(),
+ ghosts2.size());
std::vector<MPI_Request> requests2;
- x.export_to_ghosted_array_start<unsigned int, MemorySpace::CUDA>(
+ x.export_to_ghosted_array_start<unsigned int, MemorySpace::Default>(
4, locally_owned_data_view, temp_array2_view, ghosts2_view, requests2);
- w.export_to_ghosted_array_finish<unsigned int, MemorySpace::CUDA>(ghosts_view,
- requests);
+ w.export_to_ghosted_array_finish<unsigned int, MemorySpace::Default>(
+ ghosts_view, requests);
deallog << "Ghosts on reduced 1: ";
- print_cuda_view(ghosts_view);
+ print_device_view(ghosts);
- cuda_error = cudaMemset(ghosts_view.data(),
- 0,
- ghosts_view.size() * sizeof(unsigned int));
- AssertCuda(cuda_error);
+ Kokkos::deep_copy(ghosts, 0);
Assert(temp_array_view.size() >= x.n_import_indices(), ExcInternalError());
- ArrayView<unsigned int, MemorySpace::CUDA> temp_array_view_x(
+ ArrayView<unsigned int, MemorySpace::Default> temp_array_view_x(
temp_array_view.data(), x.n_import_indices());
- x.export_to_ghosted_array_start<unsigned int, MemorySpace::CUDA>(
+ x.export_to_ghosted_array_start<unsigned int, MemorySpace::Default>(
3, locally_owned_data_view, temp_array_view_x, ghosts_view, requests);
- x.export_to_ghosted_array_finish<unsigned int, MemorySpace::CUDA>(ghosts_view,
- requests);
+ x.export_to_ghosted_array_finish<unsigned int, MemorySpace::Default>(
+ ghosts_view, requests);
deallog << "Ghosts on reduced 2: ";
- print_cuda_view(ghosts_view);
+ print_device_view(ghosts);
- x.export_to_ghosted_array_finish<unsigned int, MemorySpace::CUDA>(
+ x.export_to_ghosted_array_finish<unsigned int, MemorySpace::Default>(
ghosts2_view, requests2);
deallog << "Ghosts on reduced 2 without excess entries: ";
- print_cuda_view(ghosts2_view);
+ print_device_view(ghosts2);
- x.export_to_ghosted_array_start<unsigned int, MemorySpace::CUDA>(
+ x.export_to_ghosted_array_start<unsigned int, MemorySpace::Default>(
3, locally_owned_data_view, temp_array_view_x, ghosts_view, requests);
- x.export_to_ghosted_array_finish<unsigned int, MemorySpace::CUDA>(ghosts_view,
- requests);
+ x.export_to_ghosted_array_finish<unsigned int, MemorySpace::Default>(
+ ghosts_view, requests);
deallog << "Ghosts on reduced 2: ";
- print_cuda_view(ghosts_view);
+ print_device_view(ghosts);
}
main(int argc, char **argv)
{
Utilities::MPI::MPI_InitFinalize mpi(argc, argv);
- MPILogInitAll log;
- init_cuda(true);
+ Kokkos::initialize();
+ MPILogInitAll log;
test();
+ Kokkos::finalize();
}
template <typename Number>
void
-print_cuda_view(const ArrayView<Number, MemorySpace::CUDA> cuda_view)
+print_device_view(
+ const Kokkos::View<Number *, MemorySpace::Default::kokkos_space> device_view)
{
- std::vector<Number> cpu_values(cuda_view.size());
- Utilities::CUDA::copy_to_host(cuda_view.data(), cpu_values);
+ std::vector<Number> cpu_values(device_view.size());
+ Kokkos::deep_copy(Kokkos::View<Number *, Kokkos::HostSpace>(
+ cpu_values.data(), cpu_values.size()),
+ device_view);
for (Number value : cpu_values)
deallog << value << " ";
deallog << std::endl;
x.set_ghost_indices(local_relevant_3, v.ghost_indices());
// set up a ghost array with some entries
- std::vector<unsigned int> cpu_ghost_array(v.n_ghost_indices(), 1);
- std::unique_ptr<unsigned int[], void (*)(unsigned int *)> ghost_array(
- Utilities::CUDA::allocate_device_data<unsigned int>(cpu_ghost_array.size()),
- Utilities::CUDA::delete_device_data<unsigned int>);
- ArrayView<unsigned int, MemorySpace::CUDA> ghost_array_view(
- ghost_array.get(), cpu_ghost_array.size());
- Utilities::CUDA::copy_to_dev(cpu_ghost_array, ghost_array.get());
+ Kokkos::View<unsigned *, MemorySpace::Default::kokkos_space> ghost_array(
+ "ghost_array", v.n_ghost_indices());
+ ArrayView<unsigned int, MemorySpace::Default> ghost_array_view(
+ ghost_array.data(), ghost_array.size());
+ Kokkos::deep_copy(ghost_array, 1);
// set up other arrays
- std::unique_ptr<unsigned int[], void (*)(unsigned int *)> locally_owned_array(
- Utilities::CUDA::allocate_device_data<unsigned int>(local_size),
- Utilities::CUDA::delete_device_data<unsigned int>);
- ArrayView<unsigned int, MemorySpace::CUDA> locally_owned_array_view(
- locally_owned_array.get(), local_size);
+ Kokkos::View<unsigned *, MemorySpace::Default::kokkos_space>
+ locally_owned_array("locally_owned_array", local_size);
+ ArrayView<unsigned int, MemorySpace::Default> locally_owned_array_view(
+ locally_owned_array.data(), locally_owned_array.size());
- std::unique_ptr<unsigned int[], void (*)(unsigned int *)> temp_array(
- Utilities::CUDA::allocate_device_data<unsigned int>(v.n_import_indices()),
- Utilities::CUDA::delete_device_data<unsigned int>);
- ArrayView<unsigned int, MemorySpace::CUDA> temp_array_view(
- temp_array.get(), v.n_import_indices());
+ Kokkos::View<unsigned *, MemorySpace::Default::kokkos_space> temp_array(
+ "temp_array", v.n_import_indices());
+ ArrayView<unsigned int, MemorySpace::Default> temp_array_view(
+ temp_array.data(), temp_array.size());
std::vector<MPI_Request> requests;
// send the full array
{
- std::unique_ptr<unsigned int[], void (*)(unsigned int *)> ghosts(
- Utilities::CUDA::allocate_device_data<unsigned int>(
- ghost_array_view.size()),
- Utilities::CUDA::delete_device_data<unsigned int>);
- ArrayView<unsigned int, MemorySpace::CUDA> ghosts_view(
- ghosts.get(), ghost_array_view.size());
- const cudaError_t cuda_error =
- cudaMemcpy(ghosts.get(),
- ghost_array_view.data(),
- ghost_array_view.size() * sizeof(unsigned int),
- cudaMemcpyDeviceToDevice);
- AssertCuda(cuda_error);
+ Kokkos::View<unsigned *, MemorySpace::Default::kokkos_space> ghosts(
+ "ghosts", ghost_array_view.size());
+ ArrayView<unsigned int, MemorySpace::Default> ghosts_view(ghosts.data(),
+ ghosts.size());
+ Kokkos::deep_copy(ghosts, ghost_array);
- v.import_from_ghosted_array_start<unsigned int, MemorySpace::CUDA>(
+ v.import_from_ghosted_array_start<unsigned int, MemorySpace::Default>(
VectorOperation::add, 3, ghosts_view, temp_array_view, requests);
- v.import_from_ghosted_array_finish<unsigned int, MemorySpace::CUDA>(
+ v.import_from_ghosted_array_finish<unsigned int, MemorySpace::Default>(
VectorOperation::add,
temp_array_view,
locally_owned_array_view,
// check that the ghost entries are zeroed out in these calls
deallog << "v ghost entries (should be zero up to index "
<< v.n_ghost_indices() - 1 << "):" << std::endl;
- print_cuda_view(ghosts_view);
+ print_device_view(ghosts);
}
deallog << "From all ghosts: ";
- print_cuda_view(locally_owned_array_view);
+ print_device_view(locally_owned_array);
// send only the array in w
- cudaError_t cuda_error =
- cudaMemset(locally_owned_array_view.data(),
- 0,
- locally_owned_array_view.size() * sizeof(unsigned int));
- AssertCuda(cuda_error);
+ Kokkos::deep_copy(locally_owned_array, 0);
Assert(temp_array_view.size() >= w.n_import_indices(), ExcInternalError());
- ArrayView<unsigned int, MemorySpace::CUDA> temp_array_view_w(
+ ArrayView<unsigned int, MemorySpace::Default> temp_array_view_w(
temp_array_view.data(), w.n_import_indices());
{
- std::unique_ptr<unsigned int[], void (*)(unsigned int *)> ghosts(
- Utilities::CUDA::allocate_device_data<unsigned int>(
- ghost_array_view.size()),
- Utilities::CUDA::delete_device_data<unsigned int>);
- ArrayView<unsigned int, MemorySpace::CUDA> ghosts_view(
- ghosts.get(), ghost_array_view.size());
- const cudaError_t cuda_error =
- cudaMemcpy(ghosts.get(),
- ghost_array_view.data(),
- ghost_array_view.size() * sizeof(unsigned int),
- cudaMemcpyDeviceToDevice);
- AssertCuda(cuda_error);
+ Kokkos::View<unsigned *, MemorySpace::Default::kokkos_space> ghosts(
+ "ghosts", ghost_array_view.size());
+ ArrayView<unsigned int, MemorySpace::Default> ghosts_view(ghosts.data(),
+ ghosts.size());
+ Kokkos::deep_copy(ghosts, ghost_array);
- w.import_from_ghosted_array_start<unsigned int, MemorySpace::CUDA>(
+ w.import_from_ghosted_array_start<unsigned int, MemorySpace::Default>(
VectorOperation::add, 3, ghosts_view, temp_array_view_w, requests);
- w.import_from_ghosted_array_finish<unsigned int, MemorySpace::CUDA>(
+ w.import_from_ghosted_array_finish<unsigned int, MemorySpace::Default>(
VectorOperation::add,
temp_array_view_w,
locally_owned_array_view,
// check that the ghost entries are zeroed out in these calls
deallog << "w ghost entries (should be zero up to index "
<< w.n_ghost_indices() - 1 << "):" << std::endl;
- print_cuda_view(ghosts_view);
+ print_device_view(ghosts);
}
deallog << "From reduced ghosts 1: ";
- print_cuda_view(locally_owned_array_view);
+ print_device_view(locally_owned_array);
// send only the array in x
- cuda_error =
- cudaMemset(locally_owned_array_view.data(),
- 0,
- locally_owned_array_view.size() * sizeof(unsigned int));
- AssertCuda(cuda_error);
+ Kokkos::deep_copy(locally_owned_array, 0);
Assert(temp_array_view.size() >= x.n_import_indices(), ExcInternalError());
- ArrayView<unsigned int, MemorySpace::CUDA> temp_array_view_x(
+ ArrayView<unsigned int, MemorySpace::Default> temp_array_view_x(
temp_array_view.data(), x.n_import_indices());
{
- std::unique_ptr<unsigned int[], void (*)(unsigned int *)> ghosts(
- Utilities::CUDA::allocate_device_data<unsigned int>(
- ghost_array_view.size()),
- Utilities::CUDA::delete_device_data<unsigned int>);
- ArrayView<unsigned int, MemorySpace::CUDA> ghosts_view(
- ghosts.get(), ghost_array_view.size());
- const cudaError_t cuda_error =
- cudaMemcpy(ghosts.get(),
- ghost_array_view.data(),
- ghost_array_view.size() * sizeof(unsigned int),
- cudaMemcpyDeviceToDevice);
- AssertCuda(cuda_error);
+ Kokkos::View<unsigned *, MemorySpace::Default::kokkos_space> ghosts(
+ "ghosts", ghost_array_view.size());
+ ArrayView<unsigned int, MemorySpace::Default> ghosts_view(ghosts.data(),
+ ghosts.size());
+ Kokkos::deep_copy(ghosts, ghost_array);
- x.import_from_ghosted_array_start<unsigned int, MemorySpace::CUDA>(
+ x.import_from_ghosted_array_start<unsigned int, MemorySpace::Default>(
VectorOperation::add, 3, ghosts_view, temp_array_view_x, requests);
- x.import_from_ghosted_array_finish<unsigned int, MemorySpace::CUDA>(
+ x.import_from_ghosted_array_finish<unsigned int, MemorySpace::Default>(
VectorOperation::add,
temp_array_view_x,
locally_owned_array_view,
// check that the ghost entries are zeroed out in these calls
deallog << "x ghost entries (should be zero up to index "
<< x.n_ghost_indices() << "):" << std::endl;
- print_cuda_view(ghosts_view);
+ print_device_view(ghosts);
}
deallog << "From reduced ghosts 2: ";
- print_cuda_view(locally_owned_array_view);
+ print_device_view(locally_owned_array);
// now send a tight array from x and add into the existing entries
- std::vector<unsigned int> cpu_ghosts(x.n_ghost_indices(), 1);
- std::unique_ptr<unsigned int[], void (*)(unsigned int *)> ghosts(
- Utilities::CUDA::allocate_device_data<unsigned int>(cpu_ghosts.size()),
- Utilities::CUDA::delete_device_data<unsigned int>);
- ArrayView<unsigned int, MemorySpace::CUDA> ghosts_view(ghosts.get(),
- cpu_ghosts.size());
- Utilities::CUDA::copy_to_dev(cpu_ghosts, ghosts.get());
+ Kokkos::View<unsigned *, MemorySpace::Default::kokkos_space> ghosts(
+ "ghosts", x.n_ghost_indices());
+ ArrayView<unsigned int, MemorySpace::Default> ghosts_view(ghosts.data(),
+ ghosts.size());
+ Kokkos::deep_copy(ghosts, 1);
- x.import_from_ghosted_array_start<unsigned int, MemorySpace::CUDA>(
+ x.import_from_ghosted_array_start<unsigned int, MemorySpace::Default>(
VectorOperation::add, 3, ghosts_view, temp_array_view_x, requests);
- x.import_from_ghosted_array_finish<unsigned int, MemorySpace::CUDA>(
+ x.import_from_ghosted_array_finish<unsigned int, MemorySpace::Default>(
VectorOperation::add,
temp_array_view_x,
locally_owned_array_view,
ghosts_view,
requests);
deallog << "From tight reduced ghosts 2: ";
- print_cuda_view(locally_owned_array_view);
+ print_device_view(locally_owned_array);
}
main(int argc, char **argv)
{
Utilities::MPI::MPI_InitFinalize mpi(argc, argv);
- MPILogInitAll log;
- init_cuda(true);
+ Kokkos::initialize();
+ MPILogInitAll log;
test();
+ Kokkos::finalize();
}
template <typename Number>
void
-print_cuda_view(const ArrayView<Number, MemorySpace::CUDA> cuda_view)
+print_device_view(
+ const Kokkos::View<Number *, MemorySpace::Default::kokkos_space> device_view)
{
- std::vector<Number> cpu_values(cuda_view.size());
- Utilities::CUDA::copy_to_host(cuda_view.data(), cpu_values);
+ std::vector<Number> cpu_values(device_view.size());
+ Kokkos::deep_copy(Kokkos::View<Number *, Kokkos::HostSpace>(
+ cpu_values.data(), cpu_values.size()),
+ device_view);
for (Number value : cpu_values)
deallog << value << " ";
deallog << std::endl;
}
-__global__ void
-set_value(double *values_dev, unsigned int index, double val)
-{
- values_dev[index] = val;
-}
-
-
template <typename Number = double>
void
test()
std::vector<Number> cpu_owned(rank == 0 ? 8 : 0);
for (unsigned int i = 0; i < cpu_owned.size(); ++i)
cpu_owned[i] = i;
- std::unique_ptr<Number[], void (*)(Number *)> owned(
- Utilities::CUDA::allocate_device_data<Number>(cpu_owned.size()),
- Utilities::CUDA::delete_device_data<Number>);
- ArrayView<Number, MemorySpace::CUDA> owned_view(owned.get(),
- cpu_owned.size());
- Utilities::CUDA::copy_to_dev(cpu_owned, owned.get());
-
- std::vector<Number> cpu_ghost(4, 0);
- std::unique_ptr<Number[], void (*)(Number *)> ghost(
- Utilities::CUDA::allocate_device_data<Number>(cpu_ghost.size()),
- Utilities::CUDA::delete_device_data<Number>);
- ArrayView<Number, MemorySpace::CUDA> ghost_view(ghost.get(),
- cpu_ghost.size());
- Utilities::CUDA::copy_to_dev(cpu_ghost, ghost.get());
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space> owned(
+ "owned", rank == 0 ? 8 : 0);
+ ArrayView<Number, MemorySpace::Default> owned_view(owned.data(),
+ owned.size());
+ MemorySpace::Default::kokkos_space::execution_space exec;
+ Kokkos::parallel_for(
+ Kokkos::RangePolicy<decltype(exec)>(exec, 0, owned.size()),
+ KOKKOS_LAMBDA(int i) { owned(i) = i; });
+ exec.fence();
+
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space> ghost("ghost", 4);
+ ArrayView<Number, MemorySpace::Default> ghost_view(ghost.data(),
+ ghost.size());
+ Kokkos::deep_copy(ghost, 0);
// update ghost values
// vector of requests
std::vector<MPI_Request> compress_requests;
// allocate temporal array
- std::unique_ptr<Number[], void (*)(Number *)> tmp_data(
- Utilities::CUDA::allocate_device_data<Number>(
- tight_partitioner->n_import_indices()),
- Utilities::CUDA::delete_device_data<Number>);
- ArrayView<Number, MemorySpace::CUDA> tmp_data_view(
- tmp_data.get(), tight_partitioner->n_import_indices());
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space> tmp_data(
+ "tmp_data", tight_partitioner->n_import_indices());
+ ArrayView<Number, MemorySpace::Default> tmp_data_view(tmp_data.data(),
+ tmp_data.size());
// begin exchange, and ...
- tight_partitioner->export_to_ghosted_array_start<Number, MemorySpace::CUDA>(
- 0, owned_view, tmp_data_view, ghost_view, requests);
+ tight_partitioner
+ ->export_to_ghosted_array_start<Number, MemorySpace::Default>(
+ 0, owned_view, tmp_data_view, ghost_view, requests);
// ... finish exchange
- tight_partitioner->export_to_ghosted_array_finish<Number, MemorySpace::CUDA>(
- ghost_view, requests);
+ tight_partitioner
+ ->export_to_ghosted_array_finish<Number, MemorySpace::Default>(ghost_view,
+ requests);
auto print = [&]() {
deallog << "owned:" << std::endl;
- print_cuda_view(owned_view);
+ print_device_view(owned);
deallog << "ghost:" << std::endl;
- print_cuda_view(ghost_view);
+ print_device_view(ghost);
};
deallog << "update ghosts()" << std::endl;
print();
- std::unique_ptr<Number[], void (*)(Number *)> import_data(
- Utilities::CUDA::allocate_device_data<Number>(
- tight_partitioner->n_import_indices()),
- Utilities::CUDA::delete_device_data<Number>);
- ArrayView<Number, MemorySpace::CUDA> import_data_view(
- tmp_data.get(), tight_partitioner->n_import_indices());
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space> import_data(
+ "import_data", tight_partitioner->n_import_indices());
+ ArrayView<Number, MemorySpace::Default> import_data_view(tmp_data.data(),
+ import_data.size());
// now do insert:
auto compress = [&](VectorOperation::values operation) {
const unsigned int counter = 0;
tight_partitioner
- ->import_from_ghosted_array_start<Number, MemorySpace::CUDA>(
+ ->import_from_ghosted_array_start<Number, MemorySpace::Default>(
operation, counter, ghost_view, import_data_view, compress_requests);
tight_partitioner
- ->import_from_ghosted_array_finish<Number, MemorySpace::CUDA>(
+ ->import_from_ghosted_array_finish<Number, MemorySpace::Default>(
operation, import_data_view, owned_view, ghost_view, compress_requests);
};
if (rank == 1)
{
- set_value<<<1, 1>>>(ghost.get(), 1, 10);
- set_value<<<1, 1>>>(ghost.get(), 2, 20);
+ Kokkos::deep_copy(Kokkos::subview(ghost, 1), 10);
+ Kokkos::deep_copy(Kokkos::subview(ghost, 2), 20);
}
deallog << "compress(add)" << std::endl;
using namespace dealii;
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
-
+ Kokkos::initialize();
MPILogInitAll log;
- init_cuda(true);
-
test();
+ Kokkos::finalize();
return 0;
}