From: Matthias Maier Date: Mon, 29 Apr 2019 18:18:28 +0000 (-0500) Subject: Refactor configuration option into DEAL_II_MPI_WITH_CUDA_SUPPORT X-Git-Tag: v9.1.0-rc1~78^2~2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=5da0fe1c63d2c583e6969d2efe9df0bfb400097b;p=dealii.git Refactor configuration option into DEAL_II_MPI_WITH_CUDA_SUPPORT --- diff --git a/cmake/configure/configure_1_mpi.cmake b/cmake/configure/configure_1_mpi.cmake index a5929c5908..c9c7767895 100644 --- a/cmake/configure/configure_1_mpi.cmake +++ b/cmake/configure/configure_1_mpi.cmake @@ -42,6 +42,17 @@ MACRO(FEATURE_MPI_FIND_EXTERNAL var) ENDIF() ENDMACRO() +MACRO(FEATURE_MPI_CONFIGURE_EXTERNAL) + + # + # TODO: We might consider refactoring this option into an automatic check + # (in Modules/FindMPI.cmake) at some point. For the time being this is an + # advanced configuration option. + # + OPTION(DEAL_II_MPI_WITH_CUDA_SUPPORT "Enable MPI Cuda support" OFF) + MARK_AS_ADVANCED(DEAL_II_MPI_WITH_CUDA_SUPPORT) +ENDMACRO() + MACRO(FEATURE_MPI_ERROR_MESSAGE) MESSAGE(FATAL_ERROR "\n" "Could not find any suitable mpi library!\n" @@ -61,3 +72,12 @@ ENDMACRO() CONFIGURE_FEATURE(MPI) + + +IF(NOT DEAL_II_WITH_MPI) + # + # Disable and hide the DEAL_II_MPI_WITH_CUDA_SUPPORT option + # + SET(DEAL_II_MPI_WITH_CUDA_SUPPORT) + UNSET(DEAL_II_MPI_WITH_CUDA_SUPPORT CACHE) +ENDIF() diff --git a/cmake/configure/configure_2_cuda_aware_mpi.cmake b/cmake/configure/configure_2_cuda_aware_mpi.cmake deleted file mode 100644 index 6022ea69eb..0000000000 --- a/cmake/configure/configure_2_cuda_aware_mpi.cmake +++ /dev/null @@ -1,31 +0,0 @@ -## --------------------------------------------------------------------- -## -## Copyright (C) 2018 by the deal.II authors -## -## This file is part of the deal.II library. -## -## The deal.II library is free software; you can use it, redistribute -## it, and/or modify it under the terms of the GNU Lesser General -## Public License as published by the Free Software Foundation; either -## version 2.1 of the License, or (at your option) any later version. -## The full text of the license can be found in the file LICENSE.md at -## the top level directory of deal.II. -## -## --------------------------------------------------------------------- - -FOREACH(_dependency MPI CUDA) - IF(NOT DEAL_II_WITH_${_dependency}) - IF(DEAL_II_WITH_CUDA_AWARE_MPI) - MESSAGE(FATAL_ERROR "\n" - "DEAL_II_WITH_CUDA_AWARE_MPI has unmet configuration requirements: " - "DEAL_II_WITH_${_dependency} has to be set to \"ON\".\n\n" - ) - ELSE() - MESSAGE(STATUS - "DEAL_II_WITH_CUDA_AWARE_MPI has unmet configuration requirements: " - "DEAL_II_WITH_${_dependency} has to be set to \"ON\"." - ) - SET(DEAL_II_WITH_CUDA_AWARE_MPI OFF) - ENDIF() - ENDIF() -ENDFOREACH() diff --git a/cmake/setup_cached_variables.cmake b/cmake/setup_cached_variables.cmake index a769604252..0f9051e9a9 100644 --- a/cmake/setup_cached_variables.cmake +++ b/cmake/setup_cached_variables.cmake @@ -365,12 +365,6 @@ UNSET(ENV{NVCCFLAGS}) # # ######################################################################## - OPTION(DEAL_II_WITH_CUDA_AWARE_MPI - "If set to ON, then we assume that the MPI implementation used is CUDA-aware." - OFF - ) - LIST(APPEND DEAL_II_FEATURES CUDA_AWARE_MPI) - OPTION(DEAL_II_WITH_64BIT_INDICES "If set to ON, then use 64-bit data types to represent global degree of freedom indices. The default is to OFF. You only want to set this to ON if you will solve problems with more than 2^31 (approximately 2 billion) unknowns. If set to ON, you also need to ensure that both Trilinos and/or PETSc support 64-bit indices." diff --git a/doc/news/changes/minor/20190104ArndtTurcksin b/doc/news/changes/minor/20190104ArndtTurcksin index 9496d40629..9c74f08b9b 100644 --- a/doc/news/changes/minor/20190104ArndtTurcksin +++ b/doc/news/changes/minor/20190104ArndtTurcksin @@ -1,4 +1,4 @@ New: LinearAlgebra::distributed::Vector with CUDA memory space uses CUDA-aware -MPI in case deal.II is configured with DEAL_II_WITH_CUDA_AWARE_MPI=ON. +MPI in case deal.II is configured with DEAL_II_MPI_WITH_CUDA_SUPPORT=ON.
(Daniel Arndt, Bruno Turcksin, 2019/01/04) diff --git a/include/deal.II/base/config.h.in b/include/deal.II/base/config.h.in index edf13bc803..346a759ce6 100644 --- a/include/deal.II/base/config.h.in +++ b/include/deal.II/base/config.h.in @@ -40,7 +40,6 @@ #cmakedefine DEAL_II_WITH_ASSIMP #cmakedefine DEAL_II_WITH_COMPLEX_VALUES #cmakedefine DEAL_II_WITH_CUDA -#cmakedefine DEAL_II_WITH_CUDA_AWARE_MPI #cmakedefine DEAL_II_WITH_CXX14 #cmakedefine DEAL_II_WITH_CXX17 #cmakedefine DEAL_II_WITH_GINKGO @@ -346,6 +345,8 @@ (major)*100 + (minor)) #endif +#cmakedefine DEAL_II_MPI_WITH_CUDA_SUPPORT + /*********************************************************************** * Two macro names that we put at the top and bottom of all deal.II files * and that will be expanded to "namespace dealii {" and "}". diff --git a/include/deal.II/base/partitioner.templates.h b/include/deal.II/base/partitioner.templates.h index 62072f1c02..35e1bdce68 100644 --- a/include/deal.II/base/partitioner.templates.h +++ b/include/deal.II/base/partitioner.templates.h @@ -99,7 +99,7 @@ namespace Utilities Number *temp_array_ptr = temporary_storage.data(); # if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_WITH_CUDA_AWARE_MPI) + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) // When using CUDAs-aware MPI, the set of local indices that are ghosts // indices on other processors is expanded in arrays. This is for // performance reasons as this can significantly decrease the number of @@ -113,7 +113,7 @@ namespace Utilities for (unsigned int i = 0; i < n_import_targets; i++) { # if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_WITH_CUDA_AWARE_MPI) + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) if (std::is_same::value) { const auto chunk_size = import_indices_plain_dev[i].second; @@ -531,7 +531,7 @@ namespace Utilities const unsigned int n_ghost_targets = ghost_targets_data.size(); # if (defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_WITH_CUDA_AWARE_MPI)) + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)) // When using CUDAs-aware MPI, the set of local indices that are ghosts // indices on other processors is expanded in arrays. This is for // performance reasons as this can significantly decrease the number of @@ -554,7 +554,7 @@ namespace Utilities const Number *read_position = temporary_storage.data(); # if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_WITH_CUDA_AWARE_MPI)) + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)) // If the operation is no insertion, add the imported data to the // local values. For insert, nothing is done here (but in debug mode // we assert that the specified value is either zero or matches with @@ -706,7 +706,7 @@ namespace Utilities Assert(ghost_array.begin() != nullptr, ExcInternalError()); # if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_WITH_CUDA_AWARE_MPI) + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) if (std::is_same::value) { Assert(std::is_trivial::value, ExcNotImplemented()); diff --git a/include/deal.II/lac/la_parallel_vector.templates.h b/include/deal.II/lac/la_parallel_vector.templates.h index ff2ce72f83..fe8181f2e2 100644 --- a/include/deal.II/lac/la_parallel_vector.templates.h +++ b/include/deal.II/lac/la_parallel_vector.templates.h @@ -891,7 +891,7 @@ namespace LinearAlgebra if (partitioner->n_import_indices() > 0) { # if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_WITH_CUDA_AWARE_MPI) + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) Assert( (std::is_same::value), ExcMessage( @@ -901,7 +901,7 @@ namespace LinearAlgebra Utilities::CUDA::allocate_device_data( partitioner->n_import_indices())); # else -# ifdef DEAL_II_WITH_CUDA_AWARE_MPI +# ifdef DEAL_II_MPI_WITH_CUDA_SUPPORT static_assert( std::is_same::value, "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!"); @@ -919,7 +919,7 @@ namespace LinearAlgebra } # if defined DEAL_II_COMPILER_CUDA_AWARE && \ - !defined(DEAL_II_WITH_CUDA_AWARE_MPI) + !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) // Move the data to the host and then move it back to the // the device. We use values to store the elements because the function // uses a view of the array and thus we need the data on the host to @@ -939,7 +939,7 @@ namespace LinearAlgebra # endif # if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_WITH_CUDA_AWARE_MPI)) + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)) partitioner->import_from_ghosted_array_start( operation, counter, @@ -980,7 +980,7 @@ namespace LinearAlgebra // make this function thread safe std::lock_guard lock(mutex); # if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_WITH_CUDA_AWARE_MPI)) + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)) Assert(partitioner->n_import_indices() == 0 || import_data.values != nullptr, ExcNotInitialized()); @@ -1011,7 +1011,7 @@ namespace LinearAlgebra # endif # if defined DEAL_II_COMPILER_CUDA_AWARE && \ - !defined DEAL_II_WITH_CUDA_AWARE_MPI + !defined DEAL_II_MPI_WITH_CUDA_SUPPORT // The communication is done on the host, so we need to // move the data back to the device. if (std::is_same::value) @@ -1051,7 +1051,7 @@ namespace LinearAlgebra if (partitioner->n_import_indices() > 0) { # if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_WITH_CUDA_AWARE_MPI) + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) Assert( (std::is_same::value), ExcMessage( @@ -1061,7 +1061,7 @@ namespace LinearAlgebra Utilities::CUDA::allocate_device_data( partitioner->n_import_indices())); # else -# ifdef DEAL_II_WITH_CUDA_AWARE_MPI +# ifdef DEAL_II_MPI_WITH_CUDA_SUPPORT static_assert( std::is_same::value, "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!"); @@ -1079,7 +1079,7 @@ namespace LinearAlgebra } # if defined DEAL_II_COMPILER_CUDA_AWARE && \ - !defined(DEAL_II_WITH_CUDA_AWARE_MPI) + !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) // Move the data to the host and then move it back to the // the device. We use values to store the elements because the function // uses a view of the array and thus we need the data on the host to @@ -1099,7 +1099,7 @@ namespace LinearAlgebra # endif # if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_WITH_CUDA_AWARE_MPI)) + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)) partitioner->export_to_ghosted_array_start( counter, ArrayView(data.values.get(), @@ -1146,7 +1146,7 @@ namespace LinearAlgebra std::lock_guard lock(mutex); # if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_WITH_CUDA_AWARE_MPI)) + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)) partitioner->export_to_ghosted_array_finish( ArrayView( data.values.get() + partitioner->local_size(), @@ -1162,7 +1162,7 @@ namespace LinearAlgebra } # if defined DEAL_II_COMPILER_CUDA_AWARE && \ - !defined DEAL_II_WITH_CUDA_AWARE_MPI + !defined DEAL_II_MPI_WITH_CUDA_SUPPORT // The communication is done on the host, so we need to // move the data back to the device. if (std::is_same::value)