ENDIF()
ENDMACRO()
+MACRO(FEATURE_MPI_CONFIGURE_EXTERNAL)
+
+ #
+ # TODO: We might consider refactoring this option into an automatic check
+ # (in Modules/FindMPI.cmake) at some point. For the time being this is an
+ # advanced configuration option.
+ #
+ OPTION(DEAL_II_MPI_WITH_CUDA_SUPPORT "Enable MPI Cuda support" OFF)
+ MARK_AS_ADVANCED(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+ENDMACRO()
+
MACRO(FEATURE_MPI_ERROR_MESSAGE)
MESSAGE(FATAL_ERROR "\n"
"Could not find any suitable mpi library!\n"
CONFIGURE_FEATURE(MPI)
+
+
+IF(NOT DEAL_II_WITH_MPI)
+ #
+ # Disable and hide the DEAL_II_MPI_WITH_CUDA_SUPPORT option
+ #
+ SET(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+ UNSET(DEAL_II_MPI_WITH_CUDA_SUPPORT CACHE)
+ENDIF()
+++ /dev/null
-## ---------------------------------------------------------------------
-##
-## Copyright (C) 2018 by the deal.II authors
-##
-## This file is part of the deal.II library.
-##
-## The deal.II library is free software; you can use it, redistribute
-## it, and/or modify it under the terms of the GNU Lesser General
-## Public License as published by the Free Software Foundation; either
-## version 2.1 of the License, or (at your option) any later version.
-## The full text of the license can be found in the file LICENSE.md at
-## the top level directory of deal.II.
-##
-## ---------------------------------------------------------------------
-
-FOREACH(_dependency MPI CUDA)
- IF(NOT DEAL_II_WITH_${_dependency})
- IF(DEAL_II_WITH_CUDA_AWARE_MPI)
- MESSAGE(FATAL_ERROR "\n"
- "DEAL_II_WITH_CUDA_AWARE_MPI has unmet configuration requirements: "
- "DEAL_II_WITH_${_dependency} has to be set to \"ON\".\n\n"
- )
- ELSE()
- MESSAGE(STATUS
- "DEAL_II_WITH_CUDA_AWARE_MPI has unmet configuration requirements: "
- "DEAL_II_WITH_${_dependency} has to be set to \"ON\"."
- )
- SET(DEAL_II_WITH_CUDA_AWARE_MPI OFF)
- ENDIF()
- ENDIF()
-ENDFOREACH()
# #
########################################################################
- OPTION(DEAL_II_WITH_CUDA_AWARE_MPI
- "If set to ON, then we assume that the MPI implementation used is CUDA-aware."
- OFF
- )
- LIST(APPEND DEAL_II_FEATURES CUDA_AWARE_MPI)
-
OPTION(DEAL_II_WITH_64BIT_INDICES
"If set to ON, then use 64-bit data types to represent global degree of freedom indices. The default is to OFF. You only want to set this to ON if you will solve problems with more than 2^31 (approximately 2 billion) unknowns. If set to ON, you also need to ensure that both Trilinos and/or PETSc support 64-bit indices."
New: LinearAlgebra::distributed::Vector with CUDA memory space uses CUDA-aware
-MPI in case deal.II is configured with DEAL_II_WITH_CUDA_AWARE_MPI=ON.
+MPI in case deal.II is configured with DEAL_II_MPI_WITH_CUDA_SUPPORT=ON.
<br>
(Daniel Arndt, Bruno Turcksin, 2019/01/04)
#cmakedefine DEAL_II_WITH_ASSIMP
#cmakedefine DEAL_II_WITH_COMPLEX_VALUES
#cmakedefine DEAL_II_WITH_CUDA
-#cmakedefine DEAL_II_WITH_CUDA_AWARE_MPI
#cmakedefine DEAL_II_WITH_CXX14
#cmakedefine DEAL_II_WITH_CXX17
#cmakedefine DEAL_II_WITH_GINKGO
(major)*100 + (minor))
#endif
+#cmakedefine DEAL_II_MPI_WITH_CUDA_SUPPORT
+
/***********************************************************************
* Two macro names that we put at the top and bottom of all deal.II files
* and that will be expanded to "namespace dealii {" and "}".
Number *temp_array_ptr = temporary_storage.data();
# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+ defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
// When using CUDAs-aware MPI, the set of local indices that are ghosts
// indices on other processors is expanded in arrays. This is for
// performance reasons as this can significantly decrease the number of
for (unsigned int i = 0; i < n_import_targets; i++)
{
# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+ defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
{
const auto chunk_size = import_indices_plain_dev[i].second;
const unsigned int n_ghost_targets = ghost_targets_data.size();
# if (defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_WITH_CUDA_AWARE_MPI))
+ defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
// When using CUDAs-aware MPI, the set of local indices that are ghosts
// indices on other processors is expanded in arrays. This is for
// performance reasons as this can significantly decrease the number of
const Number *read_position = temporary_storage.data();
# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_WITH_CUDA_AWARE_MPI))
+ defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
// If the operation is no insertion, add the imported data to the
// local values. For insert, nothing is done here (but in debug mode
// we assert that the specified value is either zero or matches with
Assert(ghost_array.begin() != nullptr, ExcInternalError());
# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+ defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
{
Assert(std::is_trivial<Number>::value, ExcNotImplemented());
if (partitioner->n_import_indices() > 0)
{
# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+ defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
Assert(
(std::is_same<MemorySpaceType, dealii::MemorySpace::CUDA>::value),
ExcMessage(
Utilities::CUDA::allocate_device_data<Number>(
partitioner->n_import_indices()));
# else
-# ifdef DEAL_II_WITH_CUDA_AWARE_MPI
+# ifdef DEAL_II_MPI_WITH_CUDA_SUPPORT
static_assert(
std::is_same<MemorySpaceType, dealii::MemorySpace::Host>::value,
"This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!");
}
# if defined DEAL_II_COMPILER_CUDA_AWARE && \
- !defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+ !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
// Move the data to the host and then move it back to the
// the device. We use values to store the elements because the function
// uses a view of the array and thus we need the data on the host to
# endif
# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_WITH_CUDA_AWARE_MPI))
+ defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
partitioner->import_from_ghosted_array_start(
operation,
counter,
// make this function thread safe
std::lock_guard<std::mutex> lock(mutex);
# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_WITH_CUDA_AWARE_MPI))
+ defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
Assert(partitioner->n_import_indices() == 0 ||
import_data.values != nullptr,
ExcNotInitialized());
# endif
# if defined DEAL_II_COMPILER_CUDA_AWARE && \
- !defined DEAL_II_WITH_CUDA_AWARE_MPI
+ !defined DEAL_II_MPI_WITH_CUDA_SUPPORT
// The communication is done on the host, so we need to
// move the data back to the device.
if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
if (partitioner->n_import_indices() > 0)
{
# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+ defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
Assert(
(std::is_same<MemorySpaceType, dealii::MemorySpace::CUDA>::value),
ExcMessage(
Utilities::CUDA::allocate_device_data<Number>(
partitioner->n_import_indices()));
# else
-# ifdef DEAL_II_WITH_CUDA_AWARE_MPI
+# ifdef DEAL_II_MPI_WITH_CUDA_SUPPORT
static_assert(
std::is_same<MemorySpaceType, dealii::MemorySpace::Host>::value,
"This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!");
}
# if defined DEAL_II_COMPILER_CUDA_AWARE && \
- !defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+ !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
// Move the data to the host and then move it back to the
// the device. We use values to store the elements because the function
// uses a view of the array and thus we need the data on the host to
# endif
# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_WITH_CUDA_AWARE_MPI))
+ defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
partitioner->export_to_ghosted_array_start<Number, MemorySpace::Host>(
counter,
ArrayView<const Number, MemorySpace::Host>(data.values.get(),
std::lock_guard<std::mutex> lock(mutex);
# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_WITH_CUDA_AWARE_MPI))
+ defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
partitioner->export_to_ghosted_array_finish(
ArrayView<Number, MemorySpace::Host>(
data.values.get() + partitioner->local_size(),
}
# if defined DEAL_II_COMPILER_CUDA_AWARE && \
- !defined DEAL_II_WITH_CUDA_AWARE_MPI
+ !defined DEAL_II_MPI_WITH_CUDA_SUPPORT
// The communication is done on the host, so we need to
// move the data back to the device.
if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)