#include <deal.II/base/config.h>
+#include <deal.II/base/cuda_size.h>
#include <deal.II/base/partitioner.h>
+#include <deal.II/lac/cuda_kernels.templates.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <type_traits>
# ifdef DEAL_II_WITH_MPI
- template <typename Number>
+ template <typename Number, typename MemorySpaceType>
void
Partitioner::export_to_ghosted_array_start(
const unsigned int communication_channel,
communicator,
&requests[i]);
AssertThrowMPI(ierr);
- ghost_array_ptr += ghost_targets()[i].second;
+ ghost_array_ptr += ghost_targets_data[i].second;
}
Number *temp_array_ptr = temporary_storage.data();
import_indices_chunks_by_rank_data[i + 1];
unsigned int index = 0;
for (; my_imports != end_my_imports; ++my_imports)
- for (unsigned int j = my_imports->first; j < my_imports->second;
- j++)
- temp_array_ptr[index++] = locally_owned_array[j];
+ {
+ const unsigned int chunk_size =
+ my_imports->second - my_imports->first;
+# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
+ defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+ if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
+ {
+ const cudaError_t cuda_error_code =
+ cudaMemcpy(temp_array_ptr + index,
+ locally_owned_array.data() + my_imports->first,
+ chunk_size * sizeof(Number),
+ cudaMemcpyDeviceToDevice);
+ AssertCuda(cuda_error_code);
+ }
+ else
+# endif
+ {
+ std::memcpy(temp_array_ptr + index,
+ locally_owned_array.data() + my_imports->first,
+ chunk_size * sizeof(Number));
+ }
+ index += chunk_size;
+ }
+
AssertDimension(index, import_targets_data[i].second);
// start the send operations
- template <typename Number>
+ template <typename Number, typename MemorySpaceType>
void
Partitioner::import_from_ghosted_array_finish(
const VectorOperation::values vector_operation,
if (vector_operation != dealii::VectorOperation::insert)
AssertDimension(n_ghost_targets + n_import_targets, requests.size());
-
// first wait for the receive to complete
if (requests.size() > 0 && n_import_targets > 0)
{
AssertThrowMPI(ierr);
const Number *read_position = temporary_storage.data();
- std::vector<std::pair<unsigned int, unsigned int>>::const_iterator
- my_imports = import_indices_data.begin();
-
+# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
+ defined(DEAL_II_WITH_CUDA_AWARE_MPI))
// If the operation is no insertion, add the imported data to the
// local values. For insert, nothing is done here (but in debug mode
// we assert that the specified value is either zero or matches with
// the ones already present
if (vector_operation == dealii::VectorOperation::add)
- for (; my_imports != import_indices_data.end(); ++my_imports)
- for (unsigned int j = my_imports->first; j < my_imports->second;
+ for (const auto &import_range : import_indices_data)
+ for (unsigned int j = import_range.first; j < import_range.second;
j++)
locally_owned_array[j] += *read_position++;
else if (vector_operation == dealii::VectorOperation::min)
- for (; my_imports != import_indices_data.end(); ++my_imports)
- for (unsigned int j = my_imports->first; j < my_imports->second;
+ for (const auto &import_range : import_indices_data)
+ for (unsigned int j = import_range.first; j < import_range.second;
j++)
{
locally_owned_array[j] =
read_position++;
}
else if (vector_operation == dealii::VectorOperation::max)
- for (; my_imports != import_indices_data.end(); ++my_imports)
- for (unsigned int j = my_imports->first; j < my_imports->second;
+ for (const auto &import_range : import_indices_data)
+ for (unsigned int j = import_range.first; j < import_range.second;
j++)
{
locally_owned_array[j] =
read_position++;
}
else
- for (; my_imports != import_indices_data.end(); ++my_imports)
- for (unsigned int j = my_imports->first; j < my_imports->second;
+ for (const auto &import_range : import_indices_data)
+ for (unsigned int j = import_range.first; j < import_range.second;
j++, read_position++)
// Below we use relatively large precision in units in the last
// place (ULP) as this Assert can be easily triggered in
Number>::ExcNonMatchingElements(*read_position,
locally_owned_array[j],
my_pid));
+# else
+ if (vector_operation == dealii::VectorOperation::add)
+ {
+ for (const auto &import_range : import_indices_data)
+ {
+ const auto chunk_size =
+ import_range.second - import_range.first;
+ const int n_blocks =
+ 1 + (chunk_size - 1) / (::dealii::CUDAWrappers::chunk_size *
+ ::dealii::CUDAWrappers::block_size);
+ dealii::LinearAlgebra::CUDAWrappers::kernel::vector_bin_op<
+ Number,
+ dealii::LinearAlgebra::CUDAWrappers::kernel::Binop_Addition>
+ <<<n_blocks, dealii::CUDAWrappers::block_size>>>(
+ locally_owned_array.data() + import_range.first,
+ read_position,
+ chunk_size);
+ read_position += chunk_size;
+ }
+ }
+ else
+ for (const auto &import_range : import_indices_data)
+ {
+ const auto chunk_size =
+ import_range.second - import_range.first;
+ const cudaError_t cuda_error_code =
+ cudaMemcpy(locally_owned_array.data() + import_range.first,
+ read_position,
+ chunk_size * sizeof(Number),
+ cudaMemcpyDeviceToDevice);
+ AssertCuda(cuda_error_code);
+ read_position += chunk_size;
+ }
+
+ static_assert(
+ std::is_same<MemorySpaceType, MemorySpace::CUDA>::value,
+ "If we are using the CPU implementation, we should not trigger the restriction");
+ Assert(vector_operation == dealii::VectorOperation::insert ||
+ vector_operation == dealii::VectorOperation::add,
+ ExcNotImplemented());
+# endif
AssertDimension(read_position - temporary_storage.data(),
n_import_indices());
}
if (ghost_array.size() > 0)
{
Assert(ghost_array.begin() != nullptr, ExcInternalError());
+
+# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
+ defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+ if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
+ {
+ Assert(std::is_trivial<Number>::value, ExcNotImplemented());
+ cudaMemset(ghost_array.data(),
+ 0,
+ sizeof(Number) * n_ghost_indices());
+ }
+ else
+# endif
+ {
# ifdef DEAL_II_WITH_CXX17
- if constexpr (std::is_trivial<Number>::value)
+ if constexpr (std::is_trivial<Number>::value)
# else
- if (std::is_trivial<Number>::value)
+ if (std::is_trivial<Number>::value)
# endif
- std::memset(ghost_array.data(),
- 0,
- sizeof(Number) * n_ghost_indices());
- else
- std::fill(ghost_array.data(),
- ghost_array.data() + n_ghost_indices(),
- 0);
+ {
+ std::memset(ghost_array.data(),
+ 0,
+ sizeof(Number) * n_ghost_indices());
+ }
+ else
+ std::fill(ghost_array.data(),
+ ghost_array.data() + n_ghost_indices(),
+ 0);
+ }
}
// clear the compress requests
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+for (SCALAR : MPI_SCALARS)
+ {
+#ifdef DEAL_II_WITH_MPI
+ template void Utilities::MPI::Partitioner::export_to_ghosted_array_start<
+ SCALAR,
+ dealii::MemorySpace::CUDA>(const unsigned int,
+ const ArrayView<const SCALAR> &,
+ const ArrayView<SCALAR> &,
+ const ArrayView<SCALAR> &,
+ std::vector<MPI_Request> &) const;
+ template void Utilities::MPI::Partitioner::import_from_ghosted_array_finish<
+ SCALAR,
+ MemorySpace::CUDA>(const VectorOperation::values,
+ const ArrayView<const SCALAR> &,
+ const ArrayView<SCALAR> &,
+ const ArrayView<SCALAR> &,
+ std::vector<MPI_Request> &) const;
+#endif
+ }
{
#ifdef DEAL_II_WITH_MPI
template void Utilities::MPI::Partitioner::export_to_ghosted_array_start<
- SCALAR>(const unsigned int,
- const ArrayView<const SCALAR> &,
- const ArrayView<SCALAR> &,
- const ArrayView<SCALAR> &,
- std::vector<MPI_Request> &) const;
+ SCALAR,
+ MemorySpace::Host>(const unsigned int,
+ const ArrayView<const SCALAR> &,
+ const ArrayView<SCALAR> &,
+ const ArrayView<SCALAR> &,
+ std::vector<MPI_Request> &) const;
template void Utilities::MPI::Partitioner::export_to_ghosted_array_finish<
SCALAR>(const ArrayView<SCALAR> &, std::vector<MPI_Request> &) const;
template void Utilities::MPI::Partitioner::import_from_ghosted_array_start<
const ArrayView<SCALAR> &,
std::vector<MPI_Request> &) const;
template void Utilities::MPI::Partitioner::import_from_ghosted_array_finish<
- SCALAR>(const VectorOperation::values,
- const ArrayView<const SCALAR> &,
- const ArrayView<SCALAR> &,
- const ArrayView<SCALAR> &,
- std::vector<MPI_Request> &) const;
+ SCALAR,
+ MemorySpace::Host>(const VectorOperation::values,
+ const ArrayView<const SCALAR> &,
+ const ArrayView<SCALAR> &,
+ const ArrayView<SCALAR> &,
+ std::vector<MPI_Request> &) const;
#endif
}