]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Refactor configuration option into DEAL_II_MPI_WITH_CUDA_SUPPORT
authorMatthias Maier <tamiko@43-1.org>
Mon, 29 Apr 2019 18:18:28 +0000 (13:18 -0500)
committerMatthias Maier <tamiko@43-1.org>
Fri, 10 May 2019 15:10:30 +0000 (10:10 -0500)
cmake/configure/configure_1_mpi.cmake
cmake/configure/configure_2_cuda_aware_mpi.cmake [deleted file]
cmake/setup_cached_variables.cmake
doc/news/changes/minor/20190104ArndtTurcksin
include/deal.II/base/config.h.in
include/deal.II/base/partitioner.templates.h
include/deal.II/lac/la_parallel_vector.templates.h

index a5929c59083855c038d6bd96ac679562d65a1477..c9c7767895d541ba0283521721a1a74949e353e0 100644 (file)
@@ -42,6 +42,17 @@ MACRO(FEATURE_MPI_FIND_EXTERNAL var)
   ENDIF()
 ENDMACRO()
 
+MACRO(FEATURE_MPI_CONFIGURE_EXTERNAL)
+
+  #
+  # TODO: We might consider refactoring this option into an automatic check
+  # (in Modules/FindMPI.cmake) at some point. For the time being this is an
+  # advanced configuration option.
+  #
+  OPTION(DEAL_II_MPI_WITH_CUDA_SUPPORT "Enable MPI Cuda support" OFF)
+  MARK_AS_ADVANCED(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+ENDMACRO()
+
 MACRO(FEATURE_MPI_ERROR_MESSAGE)
   MESSAGE(FATAL_ERROR "\n"
     "Could not find any suitable mpi library!\n"
@@ -61,3 +72,12 @@ ENDMACRO()
 
 
 CONFIGURE_FEATURE(MPI)
+
+
+IF(NOT DEAL_II_WITH_MPI)
+  #
+  # Disable and hide the DEAL_II_MPI_WITH_CUDA_SUPPORT option
+  #
+  SET(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+  UNSET(DEAL_II_MPI_WITH_CUDA_SUPPORT CACHE)
+ENDIF()
diff --git a/cmake/configure/configure_2_cuda_aware_mpi.cmake b/cmake/configure/configure_2_cuda_aware_mpi.cmake
deleted file mode 100644 (file)
index 6022ea6..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-## ---------------------------------------------------------------------
-##
-## Copyright (C) 2018 by the deal.II authors
-##
-## This file is part of the deal.II library.
-##
-## The deal.II library is free software; you can use it, redistribute
-## it, and/or modify it under the terms of the GNU Lesser General
-## Public License as published by the Free Software Foundation; either
-## version 2.1 of the License, or (at your option) any later version.
-## The full text of the license can be found in the file LICENSE.md at
-## the top level directory of deal.II.
-##
-## ---------------------------------------------------------------------
-
-FOREACH(_dependency MPI CUDA)
-  IF(NOT DEAL_II_WITH_${_dependency})
-    IF(DEAL_II_WITH_CUDA_AWARE_MPI)
-      MESSAGE(FATAL_ERROR "\n"
-        "DEAL_II_WITH_CUDA_AWARE_MPI has unmet configuration requirements: "
-        "DEAL_II_WITH_${_dependency} has to be set to \"ON\".\n\n"
-        )
-    ELSE()
-      MESSAGE(STATUS
-        "DEAL_II_WITH_CUDA_AWARE_MPI has unmet configuration requirements: "
-        "DEAL_II_WITH_${_dependency} has to be set to \"ON\"."
-        )
-      SET(DEAL_II_WITH_CUDA_AWARE_MPI OFF)
-    ENDIF()
-  ENDIF()
-ENDFOREACH()
index a769604252b95e51e93ccb09ac38ac21a75c5360..0f9051e9a9e31d4e521859b5f7497d6b3e57bf87 100644 (file)
@@ -365,12 +365,6 @@ UNSET(ENV{NVCCFLAGS})
 #                                                                      #
 ########################################################################
 
- OPTION(DEAL_II_WITH_CUDA_AWARE_MPI
-   "If set to ON, then we assume that the MPI implementation used is CUDA-aware."
-   OFF
-   )
- LIST(APPEND DEAL_II_FEATURES CUDA_AWARE_MPI)
-
 
 OPTION(DEAL_II_WITH_64BIT_INDICES
   "If set to ON, then use 64-bit data types to represent global degree of freedom indices. The default is to OFF. You only want to set this to ON if you will solve problems with more than 2^31 (approximately 2 billion) unknowns. If set to ON, you also need to ensure that both Trilinos and/or PETSc support 64-bit indices."
index 9496d40629368a6f0dbf01d837bbcdd078dcd207..9c74f08b9b870e2426cf2231a15d02c46cb749fa 100644 (file)
@@ -1,4 +1,4 @@
 New: LinearAlgebra::distributed::Vector with CUDA memory space uses CUDA-aware
-MPI in case deal.II is configured with DEAL_II_WITH_CUDA_AWARE_MPI=ON.
+MPI in case deal.II is configured with DEAL_II_MPI_WITH_CUDA_SUPPORT=ON.
 <br>
 (Daniel Arndt, Bruno Turcksin, 2019/01/04)
index edf13bc8030b2b7635ba81094fa3137f1db2195a..346a759ce61d551c4868f9456a817c29b62e2f30 100644 (file)
@@ -40,7 +40,6 @@
 #cmakedefine DEAL_II_WITH_ASSIMP
 #cmakedefine DEAL_II_WITH_COMPLEX_VALUES
 #cmakedefine DEAL_II_WITH_CUDA
-#cmakedefine DEAL_II_WITH_CUDA_AWARE_MPI
 #cmakedefine DEAL_II_WITH_CXX14
 #cmakedefine DEAL_II_WITH_CXX17
 #cmakedefine DEAL_II_WITH_GINKGO
     (major)*100 + (minor))
 #endif
 
+#cmakedefine DEAL_II_MPI_WITH_CUDA_SUPPORT
+
 /***********************************************************************
  * Two macro names that we put at the top and bottom of all deal.II files
  * and that will be expanded to "namespace dealii {" and "}".
index 62072f1c020e3a99bfe80285ea3dff6739f79cee..35e1bdce689eaa704e6fd2d2833cce639ff8c0a3 100644 (file)
@@ -99,7 +99,7 @@ namespace Utilities
 
       Number *temp_array_ptr = temporary_storage.data();
 #    if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-      defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+      defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
       // When using CUDAs-aware MPI, the set of local indices that are ghosts
       // indices on other processors is expanded in arrays. This is for
       // performance reasons as this can significantly decrease the number of
@@ -113,7 +113,7 @@ namespace Utilities
       for (unsigned int i = 0; i < n_import_targets; i++)
         {
 #    if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-      defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+      defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
           if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
             {
               const auto chunk_size = import_indices_plain_dev[i].second;
@@ -531,7 +531,7 @@ namespace Utilities
       const unsigned int n_ghost_targets  = ghost_targets_data.size();
 
 #    if (defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-         defined(DEAL_II_WITH_CUDA_AWARE_MPI))
+         defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
       // When using CUDAs-aware MPI, the set of local indices that are ghosts
       // indices on other processors is expanded in arrays. This is for
       // performance reasons as this can significantly decrease the number of
@@ -554,7 +554,7 @@ namespace Utilities
 
           const Number *read_position = temporary_storage.data();
 #    if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-          defined(DEAL_II_WITH_CUDA_AWARE_MPI))
+          defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
           // If the operation is no insertion, add the imported data to the
           // local values. For insert, nothing is done here (but in debug mode
           // we assert that the specified value is either zero or matches with
@@ -706,7 +706,7 @@ namespace Utilities
           Assert(ghost_array.begin() != nullptr, ExcInternalError());
 
 #    if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-      defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+      defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
           if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
             {
               Assert(std::is_trivial<Number>::value, ExcNotImplemented());
index ff2ce72f83c37e5c7a86cfc60b0a563a37f9dc6b..fe8181f2e28351c0674c5f0d7c920eeb26babb0d 100644 (file)
@@ -891,7 +891,7 @@ namespace LinearAlgebra
       if (partitioner->n_import_indices() > 0)
         {
 #  if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-    defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+    defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
           Assert(
             (std::is_same<MemorySpaceType, dealii::MemorySpace::CUDA>::value),
             ExcMessage(
@@ -901,7 +901,7 @@ namespace LinearAlgebra
               Utilities::CUDA::allocate_device_data<Number>(
                 partitioner->n_import_indices()));
 #  else
-#    ifdef DEAL_II_WITH_CUDA_AWARE_MPI
+#    ifdef DEAL_II_MPI_WITH_CUDA_SUPPORT
           static_assert(
             std::is_same<MemorySpaceType, dealii::MemorySpace::Host>::value,
             "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!");
@@ -919,7 +919,7 @@ namespace LinearAlgebra
         }
 
 #  if defined DEAL_II_COMPILER_CUDA_AWARE && \
-    !defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+    !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
       // Move the data to the host and then move it back to the
       // the device. We use values to store the elements because the function
       // uses a view of the array and thus we need the data on the host to
@@ -939,7 +939,7 @@ namespace LinearAlgebra
 #  endif
 
 #  if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-        defined(DEAL_II_WITH_CUDA_AWARE_MPI))
+        defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
       partitioner->import_from_ghosted_array_start(
         operation,
         counter,
@@ -980,7 +980,7 @@ namespace LinearAlgebra
       // make this function thread safe
       std::lock_guard<std::mutex> lock(mutex);
 #  if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-        defined(DEAL_II_WITH_CUDA_AWARE_MPI))
+        defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
       Assert(partitioner->n_import_indices() == 0 ||
                import_data.values != nullptr,
              ExcNotInitialized());
@@ -1011,7 +1011,7 @@ namespace LinearAlgebra
 #  endif
 
 #  if defined DEAL_II_COMPILER_CUDA_AWARE && \
-    !defined  DEAL_II_WITH_CUDA_AWARE_MPI
+    !defined  DEAL_II_MPI_WITH_CUDA_SUPPORT
       // The communication is done on the host, so we need to
       // move the data back to the device.
       if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
@@ -1051,7 +1051,7 @@ namespace LinearAlgebra
       if (partitioner->n_import_indices() > 0)
         {
 #  if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-    defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+    defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
           Assert(
             (std::is_same<MemorySpaceType, dealii::MemorySpace::CUDA>::value),
             ExcMessage(
@@ -1061,7 +1061,7 @@ namespace LinearAlgebra
               Utilities::CUDA::allocate_device_data<Number>(
                 partitioner->n_import_indices()));
 #  else
-#    ifdef DEAL_II_WITH_CUDA_AWARE_MPI
+#    ifdef DEAL_II_MPI_WITH_CUDA_SUPPORT
           static_assert(
             std::is_same<MemorySpaceType, dealii::MemorySpace::Host>::value,
             "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!");
@@ -1079,7 +1079,7 @@ namespace LinearAlgebra
         }
 
 #  if defined DEAL_II_COMPILER_CUDA_AWARE && \
-    !defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+    !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
       // Move the data to the host and then move it back to the
       // the device. We use values to store the elements because the function
       // uses a view of the array and thus we need the data on the host to
@@ -1099,7 +1099,7 @@ namespace LinearAlgebra
 #  endif
 
 #  if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-        defined(DEAL_II_WITH_CUDA_AWARE_MPI))
+        defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
       partitioner->export_to_ghosted_array_start<Number, MemorySpace::Host>(
         counter,
         ArrayView<const Number, MemorySpace::Host>(data.values.get(),
@@ -1146,7 +1146,7 @@ namespace LinearAlgebra
           std::lock_guard<std::mutex> lock(mutex);
 
 #  if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-        defined(DEAL_II_WITH_CUDA_AWARE_MPI))
+        defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
           partitioner->export_to_ghosted_array_finish(
             ArrayView<Number, MemorySpace::Host>(
               data.values.get() + partitioner->local_size(),
@@ -1162,7 +1162,7 @@ namespace LinearAlgebra
         }
 
 #  if defined DEAL_II_COMPILER_CUDA_AWARE && \
-    !defined  DEAL_II_WITH_CUDA_AWARE_MPI
+    !defined  DEAL_II_MPI_WITH_CUDA_SUPPORT
       // The communication is done on the host, so we need to
       // move the data back to the device.
       if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.