From: Daniel Arndt Date: Wed, 25 Jan 2023 15:16:41 +0000 (-0500) Subject: Convert device-aware MPI implementation for LinearAlgebra::distributed::Vector X-Git-Tag: v9.5.0-rc1~595^2~2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=ac60c6e1f98599c6665b61c275381a2510485bb4;p=dealii.git Convert device-aware MPI implementation for LinearAlgebra::distributed::Vector --- diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index b7c45fde84..c4aea44c86 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -194,6 +194,7 @@ jobs: -D DEAL_II_WITH_KOKKOS="ON" \ -D KOKKOS_DIR=${GITHUB_WORKSPACE}/../kokkos-install \ -D DEAL_II_WITH_MPI="ON" \ + -D DEAL_II_MPI_WITH_DEVICE_SUPPORT="ON" \ -D DEAL_II_WITH_P4EST="ON" \ -D DEAL_II_COMPONENT_EXAMPLES="ON" \ .. diff --git a/cmake/config/template-arguments.in b/cmake/config/template-arguments.in index ce0698f876..96654f2d32 100644 --- a/cmake/config/template-arguments.in +++ b/cmake/config/template-arguments.in @@ -73,6 +73,16 @@ MPI_SCALARS := { int; @DEAL_II_EXPAND_COMPLEX_SCALARS@; } +// complex types and long double are typically not directly supported on GPUs +MPI_DEVICE_SCALARS := { int; + long int; + unsigned int; + unsigned long int; + unsigned long long int; + float; + double; + } + // template names for serial vectors that we can instantiate as T where // S=REAL_SCALARS for example DEAL_II_VEC_TEMPLATES := { Vector; BlockVector } diff --git a/cmake/configure/configure_10_mpi.cmake b/cmake/configure/configure_10_mpi.cmake index b31a85cffb..3b2c1df1cf 100644 --- a/cmake/configure/configure_10_mpi.cmake +++ b/cmake/configure/configure_10_mpi.cmake @@ -65,8 +65,12 @@ macro(feature_mpi_configure_external) # (in Modules/FindMPI.cmake) at some point. For the time being this is an # advanced configuration option. # - option(DEAL_II_MPI_WITH_CUDA_SUPPORT "Enable MPI Cuda support" OFF) - mark_as_advanced(DEAL_II_MPI_WITH_CUDA_SUPPORT) + if(DEAL_II_MPI_WITH_CUDA_SUPPORT) + option(DEAL_II_MPI_WITH_DEVICE_SUPPORT "Enable MPI Device support" ON) + else() + option(DEAL_II_MPI_WITH_DEVICE_SUPPORT "Enable MPI Device support" OFF) + endif() + mark_as_advanced(DEAL_II_MPI_WITH_DEVICE_SUPPORT) endmacro() macro(feature_mpi_error_message) @@ -90,8 +94,8 @@ configure_feature(MPI) if(NOT DEAL_II_WITH_MPI) # - # Disable and hide the DEAL_II_MPI_WITH_CUDA_SUPPORT option + # Disable and hide the DEAL_II_MPI_WITH_DEVICE_SUPPORT option # - set(DEAL_II_MPI_WITH_CUDA_SUPPORT) - unset(DEAL_II_MPI_WITH_CUDA_SUPPORT CACHE) + set(DEAL_II_MPI_WITH_DEVICE_SUPPORT) + unset(DEAL_II_MPI_WITH_DEVICE_SUPPORT CACHE) endif() diff --git a/doc/doxygen/options.dox.in b/doc/doxygen/options.dox.in index 431603172e..b95298ffa2 100644 --- a/doc/doxygen/options.dox.in +++ b/doc/doxygen/options.dox.in @@ -209,7 +209,7 @@ PREDEFINED = DOXYGEN=1 \ DEAL_II_LAPACK_WITH_MKL=1 \ DEAL_II_WITH_METIS=1 \ DEAL_II_WITH_MPI=1 \ - DEAL_II_MPI_WITH_CUDA_SUPPORT=1 \ + DEAL_II_MPI_WITH_DEVICE_SUPPORT=1 \ DEAL_II_MPI_VERSION_MAJOR=3 \ DEAL_II_MPI_VERSION_MINOR=0 \ DEAL_II_WITH_MUPARSER=1 \ diff --git a/doc/external-libs/cuda.html b/doc/external-libs/cuda.html index 854169daee..36cd7f766b 100644 --- a/doc/external-libs/cuda.html +++ b/doc/external-libs/cuda.html @@ -46,7 +46,7 @@ -DDEAL_II_WITH_CUDA=ON -DDEAL_II_WITH_MPI=ON - -DDEAL_II_MPI_WITH_CUDA_SUPPORT=ON + -DDEAL_II_MPI_WITH_DEVICE_SUPPORT=ON Note, that there is no check that detects if the MPI implementation really is CUDA-aware. Activating this flag for incompatible MPI libraries diff --git a/examples/step-64/step-64.cc b/examples/step-64/step-64.cc index e0507a6be0..d59e3feebc 100644 --- a/examples/step-64/step-64.cc +++ b/examples/step-64/step-64.cc @@ -358,8 +358,8 @@ namespace Step64 // memory space to use. There is also LinearAlgebra::CUDAWrappers::Vector // that always uses GPU memory storage but doesn't work with MPI. It might // be worth noticing that the communication between different MPI processes - // can be improved if the MPI implementation is CUDA-aware and the configure - // flag `DEAL_II_MPI_WITH_CUDA_SUPPORT` is enabled. (The value of this + // can be improved if the MPI implementation is GPU-aware and the configure + // flag `DEAL_II_MPI_WITH_DEVICE_SUPPORT` is enabled. (The value of this // flag needs to be set at the time you call `cmake` when installing // deal.II.) // diff --git a/include/deal.II/base/config.h.in b/include/deal.II/base/config.h.in index b391ed6388..6a52d0ac41 100644 --- a/include/deal.II/base/config.h.in +++ b/include/deal.II/base/config.h.in @@ -448,7 +448,10 @@ # define DEAL_II_MPI_VERSION_GTE(major,minor) false #endif +#cmakedefine DEAL_II_MPI_WITH_DEVICE_SUPPORT +#ifdef DEAL_II_MPI_WITH_DEVICE_SUPPORT #cmakedefine DEAL_II_MPI_WITH_CUDA_SUPPORT +#endif /*********************************************************************** * Two macro names that we put at the top and bottom of all deal.II files diff --git a/include/deal.II/base/partitioner.h b/include/deal.II/base/partitioner.h index c1ec508ff7..330fc654f2 100644 --- a/include/deal.II/base/partitioner.h +++ b/include/deal.II/base/partitioner.h @@ -671,7 +671,7 @@ namespace Utilities private: /** * Initialize import_indices_plain_dev from import_indices_data. This - * function is only used when using CUDA-aware MPI. + * function is only used when using device-aware MPI. */ void initialize_import_indices_plain_dev() const; @@ -722,15 +722,13 @@ namespace Utilities /** * The set of (local) indices that we are importing during compress(), * i.e., others' ghosts that belong to the local range. The data stored is - * the same than in import_indices_data but the data is expanded in plain - * arrays. This variable is only used when using CUDA-aware MPI. + * the same as in import_indices_data but the data is expanded in plain + * arrays. This variable is only used when using device-aware MPI. */ // The variable is mutable to enable lazy initialization in - // export_to_ghosted_array_start(). This way partitioner does not have to - // be templated on the MemorySpaceType. + // export_to_ghosted_array_start(). mutable std::vector< - std::pair, - unsigned int>> + Kokkos::View> import_indices_plain_dev; /** diff --git a/include/deal.II/base/partitioner.templates.h b/include/deal.II/base/partitioner.templates.h index c1d7a6856e..3b51ab2f1a 100644 --- a/include/deal.II/base/partitioner.templates.h +++ b/include/deal.II/base/partitioner.templates.h @@ -107,33 +107,43 @@ namespace Utilities } Number *temp_array_ptr = temporary_storage.data(); -# if defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) - // When using CUDAs-aware MPI, the set of local indices that are ghosts +# if defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT) + // When using device-aware MPI, the set of local indices that are ghosts // indices on other processors is expanded in arrays. This is for // performance reasons as this can significantly decrease the number of // kernel launched. The indices are expanded the first time the function // is called. - if ((std::is_same::value) && + if ((std::is_same::value) && (import_indices_plain_dev.size() == 0)) initialize_import_indices_plain_dev(); # endif for (unsigned int i = 0; i < n_import_targets; ++i) { -# if defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) - if (std::is_same::value) +# if defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT) +# ifdef DEAL_II_HAVE_CXX17 + if constexpr (std::is_same::value) +# else + if (std::is_same::value) +# endif { - const auto chunk_size = import_indices_plain_dev[i].second; - const int n_blocks = - 1 + chunk_size / (::dealii::CUDAWrappers::chunk_size * - ::dealii::CUDAWrappers::block_size); - ::dealii::LinearAlgebra::CUDAWrappers::kernel:: - gather<<>>( - temp_array_ptr, - import_indices_plain_dev[i].first.get(), - locally_owned_array.data(), - chunk_size); - cudaDeviceSynchronize(); + const auto chunk_size = import_indices_plain_dev[i].size(); + using IndexType = decltype(chunk_size); + + auto import_indices = import_indices_plain_dev[i]; + auto locally_owned_array_data = locally_owned_array.data(); + MemorySpace::Default::kokkos_space::execution_space exec; + Kokkos::parallel_for( + "fill temp_array_ptr", + Kokkos::RangePolicy< + MemorySpace::Default::kokkos_space::execution_space>( + exec, 0, chunk_size), + KOKKOS_LAMBDA(IndexType idx) { + temp_array_ptr[idx] = + locally_owned_array_data[import_indices[idx]]; + }); + exec.fence(); } else # endif @@ -214,11 +224,25 @@ namespace Utilities { const unsigned int chunk_size = ghost_range.second - ghost_range.first; +# ifdef DEAL_II_HAVE_CXX17 + if constexpr (std::is_same::value) +# else if (std::is_same::value) +# endif { - std::copy(ghost_array.data() + offset, - ghost_array.data() + offset + chunk_size, - ghost_array.data() + ghost_range.first); + // If source and destination are overlapping, we must be + // careful to use an appropriate copy function. + if (ghost_range.first > offset) + std::copy_backward(ghost_array.data() + offset, + ghost_array.data() + offset + + chunk_size, + ghost_array.data() + + ghost_range.first); + else + std::copy(ghost_array.data() + offset, + ghost_array.data() + offset + chunk_size, + ghost_array.data() + ghost_range.first); std::fill(ghost_array.data() + std::max(ghost_range.second, offset), ghost_array.data() + offset + chunk_size, @@ -226,27 +250,41 @@ namespace Utilities } else { -# ifdef DEAL_II_WITH_CUDA - cudaError_t cuda_error = - cudaMemcpy(ghost_array.data() + ghost_range.first, - ghost_array.data() + offset, - chunk_size * sizeof(Number), - cudaMemcpyDeviceToDevice); - AssertCuda(cuda_error); - cuda_error = - cudaMemset(ghost_array.data() + - std::max(ghost_range.second, offset), - 0, - (offset + chunk_size - - std::max(ghost_range.second, offset)) * - sizeof(Number)); - AssertCuda(cuda_error); -# else - Assert( - false, - ExcMessage( - "If the compiler doesn't understand CUDA code, only MemorySpace::Host is allowed!")); -# endif + Kokkos::View + ghost_src_view(ghost_array.data() + offset, chunk_size); + Kokkos::View + ghost_dst_view(ghost_array.data() + ghost_range.first, + chunk_size); + + // If source and destination are overlapping, we can't + // just call deep_copy but must copy the data to a buffer + // first. + if ((offset < ghost_range.first && + ghost_range.first < offset + chunk_size) || + (ghost_range.first < offset && + offset < ghost_range.first + chunk_size)) + { + Kokkos::View + copy(Kokkos::view_alloc( + "copy", Kokkos::WithoutInitializing), + chunk_size); + Kokkos::deep_copy(copy, ghost_src_view); + Kokkos::deep_copy(ghost_dst_view, copy); + } + else + { + Kokkos::deep_copy(ghost_dst_view, ghost_src_view); + } + Kokkos::deep_copy( + Kokkos::View( + ghost_array.data() + + std::max(ghost_range.second, offset), + (offset + chunk_size - + std::max(ghost_range.second, offset))), + 0); } offset += chunk_size; } @@ -362,12 +400,24 @@ namespace Utilities if (ghost_array_ptr + offset != ghost_array.data() + my_ghosts->first) { +# ifdef DEAL_II_HAVE_CXX17 + if constexpr (std::is_same::value) +# else if (std::is_same::value) +# endif { - std::copy(ghost_array.data() + my_ghosts->first, - ghost_array.data() + my_ghosts->second, - ghost_array_ptr + offset); + if (offset > my_ghosts->first) + std::copy_backward(ghost_array.data() + + my_ghosts->first, + ghost_array_ptr + + my_ghosts->second, + ghost_array.data() + offset); + else + std::copy(ghost_array.data() + my_ghosts->first, + ghost_array.data() + my_ghosts->second, + ghost_array_ptr + offset); std::fill( std::max(ghost_array.data() + my_ghosts->first, ghost_array_ptr + offset + chunk_size), @@ -376,28 +426,30 @@ namespace Utilities } else { -# ifdef DEAL_II_WITH_CUDA - cudaError_t cuda_error = - cudaMemcpy(ghost_array_ptr + offset, - ghost_array.data() + my_ghosts->first, - chunk_size * sizeof(Number), - cudaMemcpyDeviceToDevice); - AssertCuda(cuda_error); - cuda_error = cudaMemset( - std::max(ghost_array.data() + my_ghosts->first, - ghost_array_ptr + offset + chunk_size), - 0, - (ghost_array.data() + my_ghosts->second - - std::max(ghost_array.data() + my_ghosts->first, - ghost_array_ptr + offset + chunk_size)) * - sizeof(Number)); - AssertCuda(cuda_error); -# else - Assert( - false, - ExcMessage( - "If the compiler doesn't understand CUDA code, only MemorySpace::Host is allowed!")); -# endif + Kokkos::View + copy("copy", chunk_size); + Kokkos::deep_copy( + copy, + Kokkos::View( + ghost_array.data() + my_ghosts->first, + chunk_size)); + Kokkos::deep_copy( + Kokkos::View( + ghost_array_ptr + offset, chunk_size), + copy); + Kokkos::deep_copy( + Kokkos::View( + std::max(ghost_array.data() + my_ghosts->first, + ghost_array_ptr + offset + chunk_size), + (ghost_array.data() + my_ghosts->second - + std::max(ghost_array.data() + my_ghosts->first, + ghost_array_ptr + offset + + chunk_size))), + 0); } } offset += chunk_size; @@ -412,10 +464,8 @@ namespace Utilities ExcMessage("Index overflow: Maximum message size in MPI is 2GB. " "The number of ghost entries times the size of 'Number' " "exceeds this value. This is not supported.")); -# if defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) - if (std::is_same::value) - cudaDeviceSynchronize(); -# endif + if (std::is_same::value) + Kokkos::fence(); const int ierr = MPI_Isend(ghost_array_ptr, ghost_targets_data[i].second * sizeof(Number), @@ -458,7 +508,7 @@ namespace Utilities // straight forward for complex numbers. Therefore, comparison of complex // numbers is prohibited and throws an exception. template - Number + DEAL_II_HOST_DEVICE Number get_min(const Number a, const Number b) { return std::min(a, b); @@ -475,7 +525,7 @@ namespace Utilities } template - Number + DEAL_II_HOST_DEVICE Number get_max(const Number a, const Number b) { return std::max(a, b); @@ -523,29 +573,10 @@ namespace Utilities "import_from_ghosted_array_start as is passed " "to import_from_ghosted_array_finish.")); -# ifdef DEAL_II_WITH_CUDA - if (std::is_same::value) - { - cudaMemset(ghost_array.data(), - 0, - sizeof(Number) * ghost_array.size()); - } - else -# endif - { -# ifdef DEAL_II_HAVE_CXX17 - if constexpr (std::is_trivial::value) -# else - if (std::is_trivial::value) -# endif - std::memset(ghost_array.data(), - 0, - sizeof(Number) * ghost_array.size()); - else - std::fill(ghost_array.data(), - ghost_array.data() + ghost_array.size(), - 0); - } + Kokkos::deep_copy( + Kokkos::View( + ghost_array.data(), ghost_array.size()), + 0); return; } # endif @@ -557,13 +588,13 @@ namespace Utilities const unsigned int n_import_targets = import_targets_data.size(); const unsigned int n_ghost_targets = ghost_targets_data.size(); -# if (defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)) - // When using CUDAs-aware MPI, the set of local indices that are ghosts +# if defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT) + // When using device-aware MPI, the set of local indices that are ghosts // indices on other processors is expanded in arrays. This is for // performance reasons as this can significantly decrease the number of // kernel launched. The indices are expanded the first time the function // is called. - if ((std::is_same::value) && + if ((std::is_same::value) && (import_indices_plain_dev.size() == 0)) initialize_import_indices_plain_dev(); # endif @@ -579,131 +610,168 @@ namespace Utilities AssertThrowMPI(ierr); const Number *read_position = temporary_storage.data(); -# if !(defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)) - // If the operation is no insertion, add the imported data to the - // local values. For insert, nothing is done here (but in debug mode - // we assert that the specified value is either zero or matches with - // the ones already present - if (vector_operation == dealii::VectorOperation::add) - for (const auto &import_range : import_indices_data) - for (unsigned int j = import_range.first; j < import_range.second; - j++) - locally_owned_array[j] += *read_position++; - else if (vector_operation == dealii::VectorOperation::min) - for (const auto &import_range : import_indices_data) - for (unsigned int j = import_range.first; j < import_range.second; - j++) - { - locally_owned_array[j] = - internal::get_min(*read_position, locally_owned_array[j]); - read_position++; - } - else if (vector_operation == dealii::VectorOperation::max) - for (const auto &import_range : import_indices_data) - for (unsigned int j = import_range.first; j < import_range.second; - j++) +# if defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT) +# ifdef DEAL_II_HAVE_CXX17 + if constexpr (std::is_same::value) +# else + if (std::is_same::value) +# endif + { + if (vector_operation == dealii::VectorOperation::add) { - locally_owned_array[j] = - internal::get_max(*read_position, locally_owned_array[j]); - read_position++; + for (auto const &import_indices_plain : + import_indices_plain_dev) + { + const auto chunk_size = import_indices_plain.size(); + + using IndexType = decltype(chunk_size); + MemorySpace::Default::kokkos_space::execution_space exec; + Kokkos::parallel_for( + "fill locally_owned_array, add", + Kokkos::RangePolicy< + MemorySpace::Default::kokkos_space::execution_space>( + exec, 0, chunk_size), + KOKKOS_LAMBDA(IndexType idx) { + locally_owned_array + .data()[import_indices_plain(idx)] += + read_position[idx]; + }); + exec.fence(); + + read_position += chunk_size; + } } - else - for (const auto &import_range : import_indices_data) - for (unsigned int j = import_range.first; j < import_range.second; - j++, read_position++) - // Below we use relatively large precision in units in the last - // place (ULP) as this Assert can be easily triggered in - // p::d::SolutionTransfer. The rationale is that during - // interpolation on two elements sharing the face, values on - // this face obtained from each side might be different due to - // additions being done in different order. If the local - // value is zero, it indicates that the local process has not - // set the value during the cell loop and its value can be - // safely overridden. - Assert(*read_position == Number() || - internal::get_abs(locally_owned_array[j] - - *read_position) <= - internal::get_abs(locally_owned_array[j] + - *read_position) * - 100000. * - std::numeric_limits::real_type>::epsilon(), - typename dealii::LinearAlgebra::distributed::Vector< - Number>::ExcNonMatchingElements(*read_position, - locally_owned_array[j], - my_pid)); -# else - if (vector_operation == dealii::VectorOperation::add) - { - for (auto const &import_indices_plain : import_indices_plain_dev) + else if (vector_operation == dealii::VectorOperation::min) { - const auto chunk_size = import_indices_plain.second; - const int n_blocks = - 1 + chunk_size / (::dealii::CUDAWrappers::chunk_size * - ::dealii::CUDAWrappers::block_size); - dealii::LinearAlgebra::CUDAWrappers::kernel:: - masked_vector_bin_op - <<>>( - import_indices_plain.first.get(), - locally_owned_array.data(), - read_position, - chunk_size); - read_position += chunk_size; + for (auto const &import_indices_plain : + import_indices_plain_dev) + { + const auto chunk_size = import_indices_plain.size(); + + using IndexType = decltype(chunk_size); + MemorySpace::Default::kokkos_space::execution_space exec; + Kokkos::parallel_for( + "fill locally_owned_array, min", + Kokkos::RangePolicy< + MemorySpace::Default::kokkos_space::execution_space>( + exec, 0, chunk_size), + KOKKOS_LAMBDA(IndexType idx) { + locally_owned_array + .data()[import_indices_plain(idx)] = + internal::get_min( + locally_owned_array + .data()[import_indices_plain(idx)], + read_position[idx]); + }); + exec.fence(); + + read_position += chunk_size; + } } - } - else if (vector_operation == dealii::VectorOperation::min) - { - for (auto const &import_indices_plain : import_indices_plain_dev) + else if (vector_operation == dealii::VectorOperation::max) { - const auto chunk_size = import_indices_plain.second; - const int n_blocks = - 1 + chunk_size / (::dealii::CUDAWrappers::chunk_size * - ::dealii::CUDAWrappers::block_size); - dealii::LinearAlgebra::CUDAWrappers::kernel:: - masked_vector_bin_op< - Number, - dealii::LinearAlgebra::CUDAWrappers::kernel::Binop_Min> - <<>>( - import_indices_plain.first.get(), - locally_owned_array.data(), - read_position, - chunk_size); - read_position += chunk_size; + for (auto const &import_indices_plain : + import_indices_plain_dev) + { + const auto chunk_size = import_indices_plain.size(); + + using IndexType = decltype(chunk_size); + MemorySpace::Default::kokkos_space::execution_space exec; + Kokkos::parallel_for( + "fill locally_owned_array, max", + Kokkos::RangePolicy< + MemorySpace::Default::kokkos_space::execution_space>( + exec, 0, chunk_size), + KOKKOS_LAMBDA(IndexType idx) { + locally_owned_array + .data()[import_indices_plain(idx)] = + internal::get_max( + locally_owned_array + .data()[import_indices_plain(idx)], + read_position[idx]); + }); + exec.fence(); + + read_position += chunk_size; + } } - } - else if (vector_operation == dealii::VectorOperation::max) - { - for (auto const &import_indices_plain : import_indices_plain_dev) + else { - const auto chunk_size = import_indices_plain.second; - const int n_blocks = - 1 + chunk_size / (::dealii::CUDAWrappers::chunk_size * - ::dealii::CUDAWrappers::block_size); - dealii::LinearAlgebra::CUDAWrappers::kernel:: - masked_vector_bin_op< - Number, - dealii::LinearAlgebra::CUDAWrappers::kernel::Binop_Max> - <<>>( - import_indices_plain.first.get(), - locally_owned_array.data(), - read_position, - chunk_size); - read_position += chunk_size; + for (auto const &import_indices_plain : + import_indices_plain_dev) + { + // We can't easily assert here, so we just move the + // pointer matching the host code. + const auto chunk_size = import_indices_plain.size(); + read_position += chunk_size; + } } } else +# endif { - for (auto const &import_indices_plain : import_indices_plain_dev) - { - // We can't easily assert here, so we just move the pointer - // matching the host code. - const auto chunk_size = import_indices_plain.second; - read_position += chunk_size; - } + // If the operation is no insertion, add the imported data to the + // local values. For insert, nothing is done here (but in debug + // mode we assert that the specified value is either zero or + // matches with the ones already present + if (vector_operation == dealii::VectorOperation::add) + for (const auto &import_range : import_indices_data) + for (unsigned int j = import_range.first; + j < import_range.second; + j++) + locally_owned_array[j] += *read_position++; + else if (vector_operation == dealii::VectorOperation::min) + for (const auto &import_range : import_indices_data) + for (unsigned int j = import_range.first; + j < import_range.second; + j++) + { + locally_owned_array[j] = + internal::get_min(*read_position, + locally_owned_array[j]); + read_position++; + } + else if (vector_operation == dealii::VectorOperation::max) + for (const auto &import_range : import_indices_data) + for (unsigned int j = import_range.first; + j < import_range.second; + j++) + { + locally_owned_array[j] = + internal::get_max(*read_position, + locally_owned_array[j]); + read_position++; + } + else + for (const auto &import_range : import_indices_data) + for (unsigned int j = import_range.first; + j < import_range.second; + j++, read_position++) + // Below we use relatively large precision in units in the + // last place (ULP) as this Assert can be easily triggered + // in p::d::SolutionTransfer. The rationale is that during + // interpolation on two elements sharing the face, values on + // this face obtained from each side might be different due + // to additions being done in different order. If the local + // value is zero, it indicates that the local process has + // not set the value during the cell loop and its value can + // be safely overridden. + Assert( + *read_position == Number() || + internal::get_abs(locally_owned_array[j] - + *read_position) <= + internal::get_abs(locally_owned_array[j] + + *read_position) * + 100000. * + std::numeric_limits::real_type>::epsilon(), + typename dealii::LinearAlgebra::distributed::Vector< + Number>::ExcNonMatchingElements(*read_position, + locally_owned_array[j], + my_pid)); } -# endif + AssertDimension(read_position - temporary_storage.data(), n_import_indices()); } @@ -725,13 +793,18 @@ namespace Utilities { Assert(ghost_array.begin() != nullptr, ExcInternalError()); -# if defined(DEAL_II_WITH_CUDA) && defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) - if (std::is_same::value) +# if defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT) +# ifdef DEAL_II_HAVE_CXX17 + if constexpr (std::is_same::value) +# else + if (std::is_same::value) +# endif { - Assert(std::is_trivial::value, ExcNotImplemented()); - cudaMemset(ghost_array.data(), - 0, - sizeof(Number) * n_ghost_indices()); + Kokkos::deep_copy( + Kokkos::View( + ghost_array.data(), n_ghost_indices()), + 0); } else # endif diff --git a/include/deal.II/lac/la_parallel_vector.templates.h b/include/deal.II/lac/la_parallel_vector.templates.h index eab0616c3d..21c9b8e530 100644 --- a/include/deal.II/lac/la_parallel_vector.templates.h +++ b/include/deal.II/lac/la_parallel_vector.templates.h @@ -981,7 +981,7 @@ namespace LinearAlgebra // allocate import_data in case it is not set up yet if (partitioner->n_import_indices() > 0) { -# if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) +# if !defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT) if (std::is_same::value) { @@ -998,7 +998,7 @@ namespace LinearAlgebra } } -# if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) +# if !defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT) if (std::is_same::value) { // Move the data to the host and then move it back to the @@ -1055,7 +1055,7 @@ namespace LinearAlgebra // make this function thread safe std::lock_guard lock(mutex); -# if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) +# if !defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT) if (std::is_same::value) { Assert(partitioner->n_import_indices() == 0 || @@ -1125,7 +1125,7 @@ namespace LinearAlgebra // allocate import_data in case it is not set up yet if (partitioner->n_import_indices() > 0) { -# if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) +# if !defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT) if (std::is_same::value) { if (import_data.values_host_buffer.size() == 0) @@ -1141,7 +1141,7 @@ namespace LinearAlgebra } } -# if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) +# if !defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT) if (std::is_same::value) { // Move the data to the host and then move it back to the @@ -1203,7 +1203,7 @@ namespace LinearAlgebra // make this function thread safe std::lock_guard lock(mutex); -# if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) +# if !defined(DEAL_II_MPI_WITH_DEVICE_SUPPORT) if (std::is_same::value) { partitioner->export_to_ghosted_array_finish( diff --git a/include/deal.II/matrix_free/cuda_matrix_free.h b/include/deal.II/matrix_free/cuda_matrix_free.h index d5bee6a61f..9c0e50d44b 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.h @@ -128,7 +128,7 @@ namespace CUDAWrappers , use_coloring(use_coloring) , overlap_communication_computation(overlap_communication_computation) { -# ifndef DEAL_II_MPI_WITH_CUDA_SUPPORT +# ifndef DEAL_II_MPI_WITH_DEVICE_SUPPORT AssertThrow( overlap_communication_computation == false, ExcMessage( diff --git a/source/base/CMakeLists.txt b/source/base/CMakeLists.txt index 260c2bc0d0..1768b520be 100644 --- a/source/base/CMakeLists.txt +++ b/source/base/CMakeLists.txt @@ -116,7 +116,6 @@ if(DEAL_II_WITH_CUDA) set(_separate_src ${_separate_src} cuda.cc - partitioner_cuda.cc ) endif() @@ -143,7 +142,6 @@ set(_inst mpi_noncontiguous_partitioner.inst.in mpi_remote_point_evaluation.inst.in partitioner.inst.in - partitioner_cuda.inst.in polynomials_rannacher_turek.inst.in symmetric_tensor.inst.in symbolic_function.inst.in diff --git a/source/base/partitioner.cc b/source/base/partitioner.cc index 693042d549..9b895d616f 100644 --- a/source/base/partitioner.cc +++ b/source/base/partitioner.cc @@ -537,6 +537,41 @@ namespace Utilities return memory; } + + + void + Partitioner::initialize_import_indices_plain_dev() const + { + const unsigned int n_import_targets = import_targets_data.size(); + import_indices_plain_dev.reserve(n_import_targets); + for (unsigned int i = 0; i < n_import_targets; ++i) + { + // Expand the indices on the host + std::vector>::const_iterator + my_imports = import_indices_data.begin() + + import_indices_chunks_by_rank_data[i], + end_my_imports = import_indices_data.begin() + + import_indices_chunks_by_rank_data[i + 1]; + std::vector import_indices_plain_host; + for (; my_imports != end_my_imports; ++my_imports) + { + const unsigned int chunk_size = + my_imports->second - my_imports->first; + for (unsigned int j = 0; j < chunk_size; ++j) + import_indices_plain_host.push_back(my_imports->first + j); + } + + // Move the indices to the device + const auto chunk_size = import_indices_plain_host.size(); + import_indices_plain_dev.emplace_back("import_indices_plain_dev" + + std::to_string(i), + chunk_size); + Kokkos::deep_copy(import_indices_plain_dev.back(), + Kokkos::View( + import_indices_plain_host.data(), chunk_size)); + } + } + } // namespace MPI } // end of namespace Utilities diff --git a/source/base/partitioner.inst.in b/source/base/partitioner.inst.in index 4e554c6267..55bf9be898 100644 --- a/source/base/partitioner.inst.in +++ b/source/base/partitioner.inst.in @@ -45,3 +45,37 @@ for (SCALAR : MPI_SCALARS) std::vector &) const; #endif } + +for (SCALAR : MPI_DEVICE_SCALARS) + { +#ifdef DEAL_II_WITH_MPI + template void Utilities::MPI::Partitioner:: + export_to_ghosted_array_start( + const unsigned int, + const ArrayView &, + const ArrayView &, + const ArrayView &, + std::vector &) const; + + template void Utilities::MPI::Partitioner::export_to_ghosted_array_finish< + SCALAR, + MemorySpace::Default>(const ArrayView &, + std::vector &) const; + + template void Utilities::MPI::Partitioner::import_from_ghosted_array_start< + SCALAR, + MemorySpace::Default>(const VectorOperation::values, + const unsigned int, + const ArrayView &, + const ArrayView &, + std::vector &) const; + + template void Utilities::MPI::Partitioner:: + import_from_ghosted_array_finish( + const VectorOperation::values, + const ArrayView &, + const ArrayView &, + const ArrayView &, + std::vector &) const; +#endif + } diff --git a/source/base/partitioner_cuda.cc b/source/base/partitioner_cuda.cc deleted file mode 100644 index a68c79e4a1..0000000000 --- a/source/base/partitioner_cuda.cc +++ /dev/null @@ -1,68 +0,0 @@ -// --------------------------------------------------------------------- -// -// Copyright (C) 1999 - 2021 by the deal.II authors -// -// This file is part of the deal.II library. -// -// The deal.II library is free software; you can use it, redistribute -// it, and/or modify it under the terms of the GNU Lesser General -// Public License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// The full text of the license can be found in the file LICENSE.md at -// the top level directory of deal.II. -// -// --------------------------------------------------------------------- - -#include -#include - -DEAL_II_NAMESPACE_OPEN - - - -namespace Utilities -{ - namespace MPI - { - void - Partitioner::initialize_import_indices_plain_dev() const - { - const unsigned int n_import_targets = import_targets_data.size(); - import_indices_plain_dev.reserve(n_import_targets); - for (unsigned int i = 0; i < n_import_targets; ++i) - { - // Expand the indices on the host - std::vector>::const_iterator - my_imports = import_indices_data.begin() + - import_indices_chunks_by_rank_data[i], - end_my_imports = import_indices_data.begin() + - import_indices_chunks_by_rank_data[i + 1]; - std::vector import_indices_plain_host; - for (; my_imports != end_my_imports; ++my_imports) - { - const unsigned int chunk_size = - my_imports->second - my_imports->first; - for (unsigned int j = 0; j < chunk_size; ++j) - import_indices_plain_host.push_back(my_imports->first + j); - } - - // Move the indices to the device - import_indices_plain_dev.emplace_back(std::make_pair( - std::unique_ptr( - nullptr, Utilities::CUDA::delete_device_data), - import_indices_plain_host.size())); - - import_indices_plain_dev[i].first.reset( - Utilities::CUDA::allocate_device_data( - import_indices_plain_dev[i].second)); - Utilities::CUDA::copy_to_dev(import_indices_plain_host, - import_indices_plain_dev[i].first.get()); - } - } - } // namespace MPI -} // namespace Utilities - -// explicit instantiations from .templates.h file -#include "partitioner_cuda.inst" - -DEAL_II_NAMESPACE_CLOSE diff --git a/source/base/partitioner_cuda.inst.in b/source/base/partitioner_cuda.inst.in deleted file mode 100644 index f31d7d8e2b..0000000000 --- a/source/base/partitioner_cuda.inst.in +++ /dev/null @@ -1,50 +0,0 @@ -// --------------------------------------------------------------------- -// -// Copyright (C) 2017 - 2019 by the deal.II authors -// -// This file is part of the deal.II library. -// -// The deal.II library is free software; you can use it, redistribute -// it, and/or modify it under the terms of the GNU Lesser General -// Public License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// The full text of the license can be found in the file LICENSE.md at -// the top level directory of deal.II. -// -// --------------------------------------------------------------------- - - - -for (SCALAR : MPI_SCALARS) - { -#ifdef DEAL_II_WITH_MPI - template void Utilities::MPI::Partitioner:: - export_to_ghosted_array_start( - const unsigned int, - const ArrayView &, - const ArrayView &, - const ArrayView &, - std::vector &) const; - - template void Utilities::MPI::Partitioner::export_to_ghosted_array_finish< - SCALAR, - MemorySpace::CUDA>(const ArrayView &, - std::vector &) const; - - template void Utilities::MPI::Partitioner::import_from_ghosted_array_start< - SCALAR, - MemorySpace::CUDA>(const VectorOperation::values, - const unsigned int, - const ArrayView &, - const ArrayView &, - std::vector &) const; - - template void Utilities::MPI::Partitioner::import_from_ghosted_array_finish< - SCALAR, - MemorySpace::CUDA>(const VectorOperation::values, - const ArrayView &, - const ArrayView &, - const ArrayView &, - std::vector &) const; -#endif - } diff --git a/tests/cuda/parallel_partitioner_06.cc b/tests/mpi/parallel_partitioner_device_06.cc similarity index 56% rename from tests/cuda/parallel_partitioner_06.cc rename to tests/mpi/parallel_partitioner_device_06.cc index c787091f57..67da810cd6 100644 --- a/tests/cuda/parallel_partitioner_06.cc +++ b/tests/mpi/parallel_partitioner_device_06.cc @@ -28,10 +28,13 @@ template void -print_cuda_view(const ArrayView cuda_view) +print_device_view( + const Kokkos::View device_view) { - std::vector cpu_values(cuda_view.size()); - Utilities::CUDA::copy_to_host(cuda_view.data(), cpu_values); + std::vector cpu_values(device_view.size()); + Kokkos::deep_copy(Kokkos::View( + cpu_values.data(), cpu_values.size()), + device_view); for (Number value : cpu_values) deallog << value << " "; deallog << std::endl; @@ -106,104 +109,88 @@ test() std::vector cpu_locally_owned_data(local_size); for (unsigned int i = 0; i < local_size; ++i) cpu_locally_owned_data[i] = my_start + i; - std::unique_ptr locally_owned_data( - nullptr, Utilities::CUDA::delete_device_data); - locally_owned_data.reset( - Utilities::CUDA::allocate_device_data(local_size)); - ArrayView locally_owned_data_view( - locally_owned_data.get(), local_size); - Utilities::CUDA::copy_to_dev(cpu_locally_owned_data, - locally_owned_data.get()); + Kokkos::View + locally_owned_data("locally_owned_data", local_size); + ArrayView locally_owned_data_view( + locally_owned_data.data(), local_size); + Kokkos::deep_copy( + locally_owned_data, + Kokkos::View(cpu_locally_owned_data.data(), + cpu_locally_owned_data.size())); // set up a ghost array - std::unique_ptr ghosts( - nullptr, Utilities::CUDA::delete_device_data); - ghosts.reset( - Utilities::CUDA::allocate_device_data(v.n_ghost_indices())); - ArrayView ghosts_view(ghosts.get(), - v.n_ghost_indices()); - - std::unique_ptr temp_array( - nullptr, Utilities::CUDA::delete_device_data); - temp_array.reset( - Utilities::CUDA::allocate_device_data(v.n_import_indices())); - ArrayView temp_array_view( - temp_array.get(), v.n_import_indices()); + Kokkos::View ghosts( + "ghosts", v.n_ghost_indices()); + ArrayView ghosts_view(ghosts.data(), + ghosts.size()); + Kokkos::View temp_array( + "temp_array", v.n_import_indices()); + ArrayView temp_array_view( + temp_array.data(), temp_array.size()); std::vector requests; // send the full array - v.export_to_ghosted_array_start( + v.export_to_ghosted_array_start( 3, locally_owned_data_view, temp_array_view, ghosts_view, requests); - v.export_to_ghosted_array_finish(ghosts_view, - requests); + v.export_to_ghosted_array_finish( + ghosts_view, requests); deallog << "All ghosts: "; - print_cuda_view(ghosts_view); + print_device_view(ghosts); // send only the array in w - cudaError_t cuda_error = - cudaMemset(ghosts_view.data(), - 0, - ghosts_view.size() * sizeof(unsigned int)); - AssertCuda(cuda_error); + Kokkos::deep_copy(ghosts, 0); Assert(temp_array_view.size() >= w.n_import_indices(), ExcInternalError()); - ArrayView temp_array_view_w( + ArrayView temp_array_view_w( temp_array_view.data(), w.n_import_indices()); - w.export_to_ghosted_array_start( + w.export_to_ghosted_array_start( 3, locally_owned_data_view, temp_array_view_w, ghosts_view, requests); // start a second send operation for the x partitioner in parallel to make // sure communication does not get messed up - std::unique_ptr temp_array2( - nullptr, Utilities::CUDA::delete_device_data); - temp_array2.reset( - Utilities::CUDA::allocate_device_data(x.n_import_indices())); - ArrayView temp_array2_view( - temp_array2.get(), x.n_import_indices()); - - std::unique_ptr ghosts2( - nullptr, Utilities::CUDA::delete_device_data); - ghosts2.reset( - Utilities::CUDA::allocate_device_data(x.n_ghost_indices())); - ArrayView ghosts2_view(ghosts2.get(), - x.n_ghost_indices()); + Kokkos::View temp_array2( + "temp_array2", x.n_import_indices()); + ArrayView temp_array2_view( + temp_array2.data(), temp_array2.size()); + + Kokkos::View ghosts2( + "ghosts2", x.n_ghost_indices()); + ArrayView ghosts2_view(ghosts2.data(), + ghosts2.size()); std::vector requests2; - x.export_to_ghosted_array_start( + x.export_to_ghosted_array_start( 4, locally_owned_data_view, temp_array2_view, ghosts2_view, requests2); - w.export_to_ghosted_array_finish(ghosts_view, - requests); + w.export_to_ghosted_array_finish( + ghosts_view, requests); deallog << "Ghosts on reduced 1: "; - print_cuda_view(ghosts_view); + print_device_view(ghosts); - cuda_error = cudaMemset(ghosts_view.data(), - 0, - ghosts_view.size() * sizeof(unsigned int)); - AssertCuda(cuda_error); + Kokkos::deep_copy(ghosts, 0); Assert(temp_array_view.size() >= x.n_import_indices(), ExcInternalError()); - ArrayView temp_array_view_x( + ArrayView temp_array_view_x( temp_array_view.data(), x.n_import_indices()); - x.export_to_ghosted_array_start( + x.export_to_ghosted_array_start( 3, locally_owned_data_view, temp_array_view_x, ghosts_view, requests); - x.export_to_ghosted_array_finish(ghosts_view, - requests); + x.export_to_ghosted_array_finish( + ghosts_view, requests); deallog << "Ghosts on reduced 2: "; - print_cuda_view(ghosts_view); + print_device_view(ghosts); - x.export_to_ghosted_array_finish( + x.export_to_ghosted_array_finish( ghosts2_view, requests2); deallog << "Ghosts on reduced 2 without excess entries: "; - print_cuda_view(ghosts2_view); + print_device_view(ghosts2); - x.export_to_ghosted_array_start( + x.export_to_ghosted_array_start( 3, locally_owned_data_view, temp_array_view_x, ghosts_view, requests); - x.export_to_ghosted_array_finish(ghosts_view, - requests); + x.export_to_ghosted_array_finish( + ghosts_view, requests); deallog << "Ghosts on reduced 2: "; - print_cuda_view(ghosts_view); + print_device_view(ghosts); } @@ -212,7 +199,8 @@ int main(int argc, char **argv) { Utilities::MPI::MPI_InitFinalize mpi(argc, argv); - MPILogInitAll log; - init_cuda(true); + Kokkos::initialize(); + MPILogInitAll log; test(); + Kokkos::finalize(); } diff --git a/tests/cuda/parallel_partitioner_06.with_cuda_aware_mpi=on.mpirun=4.output b/tests/mpi/parallel_partitioner_device_06.with_mpi_with_device_support=on.mpirun=4.output similarity index 100% rename from tests/cuda/parallel_partitioner_06.with_cuda_aware_mpi=on.mpirun=4.output rename to tests/mpi/parallel_partitioner_device_06.with_mpi_with_device_support=on.mpirun=4.output diff --git a/tests/cuda/parallel_partitioner_07.cc b/tests/mpi/parallel_partitioner_device_07.cc similarity index 58% rename from tests/cuda/parallel_partitioner_07.cc rename to tests/mpi/parallel_partitioner_device_07.cc index b2df4ae436..d812e9dd34 100644 --- a/tests/cuda/parallel_partitioner_07.cc +++ b/tests/mpi/parallel_partitioner_device_07.cc @@ -28,10 +28,13 @@ template void -print_cuda_view(const ArrayView cuda_view) +print_device_view( + const Kokkos::View device_view) { - std::vector cpu_values(cuda_view.size()); - Utilities::CUDA::copy_to_host(cuda_view.data(), cpu_values); + std::vector cpu_values(device_view.size()); + Kokkos::deep_copy(Kokkos::View( + cpu_values.data(), cpu_values.size()), + device_view); for (Number value : cpu_values) deallog << value << " "; deallog << std::endl; @@ -100,47 +103,36 @@ test() x.set_ghost_indices(local_relevant_3, v.ghost_indices()); // set up a ghost array with some entries - std::vector cpu_ghost_array(v.n_ghost_indices(), 1); - std::unique_ptr ghost_array( - Utilities::CUDA::allocate_device_data(cpu_ghost_array.size()), - Utilities::CUDA::delete_device_data); - ArrayView ghost_array_view( - ghost_array.get(), cpu_ghost_array.size()); - Utilities::CUDA::copy_to_dev(cpu_ghost_array, ghost_array.get()); + Kokkos::View ghost_array( + "ghost_array", v.n_ghost_indices()); + ArrayView ghost_array_view( + ghost_array.data(), ghost_array.size()); + Kokkos::deep_copy(ghost_array, 1); // set up other arrays - std::unique_ptr locally_owned_array( - Utilities::CUDA::allocate_device_data(local_size), - Utilities::CUDA::delete_device_data); - ArrayView locally_owned_array_view( - locally_owned_array.get(), local_size); + Kokkos::View + locally_owned_array("locally_owned_array", local_size); + ArrayView locally_owned_array_view( + locally_owned_array.data(), locally_owned_array.size()); - std::unique_ptr temp_array( - Utilities::CUDA::allocate_device_data(v.n_import_indices()), - Utilities::CUDA::delete_device_data); - ArrayView temp_array_view( - temp_array.get(), v.n_import_indices()); + Kokkos::View temp_array( + "temp_array", v.n_import_indices()); + ArrayView temp_array_view( + temp_array.data(), temp_array.size()); std::vector requests; // send the full array { - std::unique_ptr ghosts( - Utilities::CUDA::allocate_device_data( - ghost_array_view.size()), - Utilities::CUDA::delete_device_data); - ArrayView ghosts_view( - ghosts.get(), ghost_array_view.size()); - const cudaError_t cuda_error = - cudaMemcpy(ghosts.get(), - ghost_array_view.data(), - ghost_array_view.size() * sizeof(unsigned int), - cudaMemcpyDeviceToDevice); - AssertCuda(cuda_error); + Kokkos::View ghosts( + "ghosts", ghost_array_view.size()); + ArrayView ghosts_view(ghosts.data(), + ghosts.size()); + Kokkos::deep_copy(ghosts, ghost_array); - v.import_from_ghosted_array_start( + v.import_from_ghosted_array_start( VectorOperation::add, 3, ghosts_view, temp_array_view, requests); - v.import_from_ghosted_array_finish( + v.import_from_ghosted_array_finish( VectorOperation::add, temp_array_view, locally_owned_array_view, @@ -149,37 +141,26 @@ test() // check that the ghost entries are zeroed out in these calls deallog << "v ghost entries (should be zero up to index " << v.n_ghost_indices() - 1 << "):" << std::endl; - print_cuda_view(ghosts_view); + print_device_view(ghosts); } deallog << "From all ghosts: "; - print_cuda_view(locally_owned_array_view); + print_device_view(locally_owned_array); // send only the array in w - cudaError_t cuda_error = - cudaMemset(locally_owned_array_view.data(), - 0, - locally_owned_array_view.size() * sizeof(unsigned int)); - AssertCuda(cuda_error); + Kokkos::deep_copy(locally_owned_array, 0); Assert(temp_array_view.size() >= w.n_import_indices(), ExcInternalError()); - ArrayView temp_array_view_w( + ArrayView temp_array_view_w( temp_array_view.data(), w.n_import_indices()); { - std::unique_ptr ghosts( - Utilities::CUDA::allocate_device_data( - ghost_array_view.size()), - Utilities::CUDA::delete_device_data); - ArrayView ghosts_view( - ghosts.get(), ghost_array_view.size()); - const cudaError_t cuda_error = - cudaMemcpy(ghosts.get(), - ghost_array_view.data(), - ghost_array_view.size() * sizeof(unsigned int), - cudaMemcpyDeviceToDevice); - AssertCuda(cuda_error); + Kokkos::View ghosts( + "ghosts", ghost_array_view.size()); + ArrayView ghosts_view(ghosts.data(), + ghosts.size()); + Kokkos::deep_copy(ghosts, ghost_array); - w.import_from_ghosted_array_start( + w.import_from_ghosted_array_start( VectorOperation::add, 3, ghosts_view, temp_array_view_w, requests); - w.import_from_ghosted_array_finish( + w.import_from_ghosted_array_finish( VectorOperation::add, temp_array_view_w, locally_owned_array_view, @@ -189,37 +170,26 @@ test() // check that the ghost entries are zeroed out in these calls deallog << "w ghost entries (should be zero up to index " << w.n_ghost_indices() - 1 << "):" << std::endl; - print_cuda_view(ghosts_view); + print_device_view(ghosts); } deallog << "From reduced ghosts 1: "; - print_cuda_view(locally_owned_array_view); + print_device_view(locally_owned_array); // send only the array in x - cuda_error = - cudaMemset(locally_owned_array_view.data(), - 0, - locally_owned_array_view.size() * sizeof(unsigned int)); - AssertCuda(cuda_error); + Kokkos::deep_copy(locally_owned_array, 0); Assert(temp_array_view.size() >= x.n_import_indices(), ExcInternalError()); - ArrayView temp_array_view_x( + ArrayView temp_array_view_x( temp_array_view.data(), x.n_import_indices()); { - std::unique_ptr ghosts( - Utilities::CUDA::allocate_device_data( - ghost_array_view.size()), - Utilities::CUDA::delete_device_data); - ArrayView ghosts_view( - ghosts.get(), ghost_array_view.size()); - const cudaError_t cuda_error = - cudaMemcpy(ghosts.get(), - ghost_array_view.data(), - ghost_array_view.size() * sizeof(unsigned int), - cudaMemcpyDeviceToDevice); - AssertCuda(cuda_error); + Kokkos::View ghosts( + "ghosts", ghost_array_view.size()); + ArrayView ghosts_view(ghosts.data(), + ghosts.size()); + Kokkos::deep_copy(ghosts, ghost_array); - x.import_from_ghosted_array_start( + x.import_from_ghosted_array_start( VectorOperation::add, 3, ghosts_view, temp_array_view_x, requests); - x.import_from_ghosted_array_finish( + x.import_from_ghosted_array_finish( VectorOperation::add, temp_array_view_x, locally_owned_array_view, @@ -229,30 +199,28 @@ test() // check that the ghost entries are zeroed out in these calls deallog << "x ghost entries (should be zero up to index " << x.n_ghost_indices() << "):" << std::endl; - print_cuda_view(ghosts_view); + print_device_view(ghosts); } deallog << "From reduced ghosts 2: "; - print_cuda_view(locally_owned_array_view); + print_device_view(locally_owned_array); // now send a tight array from x and add into the existing entries - std::vector cpu_ghosts(x.n_ghost_indices(), 1); - std::unique_ptr ghosts( - Utilities::CUDA::allocate_device_data(cpu_ghosts.size()), - Utilities::CUDA::delete_device_data); - ArrayView ghosts_view(ghosts.get(), - cpu_ghosts.size()); - Utilities::CUDA::copy_to_dev(cpu_ghosts, ghosts.get()); + Kokkos::View ghosts( + "ghosts", x.n_ghost_indices()); + ArrayView ghosts_view(ghosts.data(), + ghosts.size()); + Kokkos::deep_copy(ghosts, 1); - x.import_from_ghosted_array_start( + x.import_from_ghosted_array_start( VectorOperation::add, 3, ghosts_view, temp_array_view_x, requests); - x.import_from_ghosted_array_finish( + x.import_from_ghosted_array_finish( VectorOperation::add, temp_array_view_x, locally_owned_array_view, ghosts_view, requests); deallog << "From tight reduced ghosts 2: "; - print_cuda_view(locally_owned_array_view); + print_device_view(locally_owned_array); } @@ -261,7 +229,8 @@ int main(int argc, char **argv) { Utilities::MPI::MPI_InitFinalize mpi(argc, argv); - MPILogInitAll log; - init_cuda(true); + Kokkos::initialize(); + MPILogInitAll log; test(); + Kokkos::finalize(); } diff --git a/tests/cuda/parallel_partitioner_07.with_cuda_aware_mpi=on.mpirun=4.output b/tests/mpi/parallel_partitioner_device_07.with_mpi_with_device_support=on.mpirun=4.output similarity index 100% rename from tests/cuda/parallel_partitioner_07.with_cuda_aware_mpi=on.mpirun=4.output rename to tests/mpi/parallel_partitioner_device_07.with_mpi_with_device_support=on.mpirun=4.output diff --git a/tests/cuda/parallel_partitioner_08.cc b/tests/mpi/parallel_partitioner_device_08.cc similarity index 62% rename from tests/cuda/parallel_partitioner_08.cc rename to tests/mpi/parallel_partitioner_device_08.cc index a6a84564ba..3429cd7f7f 100644 --- a/tests/cuda/parallel_partitioner_08.cc +++ b/tests/mpi/parallel_partitioner_device_08.cc @@ -25,22 +25,18 @@ template void -print_cuda_view(const ArrayView cuda_view) +print_device_view( + const Kokkos::View device_view) { - std::vector cpu_values(cuda_view.size()); - Utilities::CUDA::copy_to_host(cuda_view.data(), cpu_values); + std::vector cpu_values(device_view.size()); + Kokkos::deep_copy(Kokkos::View( + cpu_values.data(), cpu_values.size()), + device_view); for (Number value : cpu_values) deallog << value << " "; deallog << std::endl; } -__global__ void -set_value(double *values_dev, unsigned int index, double val) -{ - values_dev[index] = val; -} - - template void test() @@ -97,20 +93,20 @@ test() std::vector cpu_owned(rank == 0 ? 8 : 0); for (unsigned int i = 0; i < cpu_owned.size(); ++i) cpu_owned[i] = i; - std::unique_ptr owned( - Utilities::CUDA::allocate_device_data(cpu_owned.size()), - Utilities::CUDA::delete_device_data); - ArrayView owned_view(owned.get(), - cpu_owned.size()); - Utilities::CUDA::copy_to_dev(cpu_owned, owned.get()); - - std::vector cpu_ghost(4, 0); - std::unique_ptr ghost( - Utilities::CUDA::allocate_device_data(cpu_ghost.size()), - Utilities::CUDA::delete_device_data); - ArrayView ghost_view(ghost.get(), - cpu_ghost.size()); - Utilities::CUDA::copy_to_dev(cpu_ghost, ghost.get()); + Kokkos::View owned( + "owned", rank == 0 ? 8 : 0); + ArrayView owned_view(owned.data(), + owned.size()); + MemorySpace::Default::kokkos_space::execution_space exec; + Kokkos::parallel_for( + Kokkos::RangePolicy(exec, 0, owned.size()), + KOKKOS_LAMBDA(int i) { owned(i) = i; }); + exec.fence(); + + Kokkos::View ghost("ghost", 4); + ArrayView ghost_view(ghost.data(), + ghost.size()); + Kokkos::deep_copy(ghost, 0); // update ghost values // vector of requests @@ -118,47 +114,45 @@ test() std::vector compress_requests; // allocate temporal array - std::unique_ptr tmp_data( - Utilities::CUDA::allocate_device_data( - tight_partitioner->n_import_indices()), - Utilities::CUDA::delete_device_data); - ArrayView tmp_data_view( - tmp_data.get(), tight_partitioner->n_import_indices()); + Kokkos::View tmp_data( + "tmp_data", tight_partitioner->n_import_indices()); + ArrayView tmp_data_view(tmp_data.data(), + tmp_data.size()); // begin exchange, and ... - tight_partitioner->export_to_ghosted_array_start( - 0, owned_view, tmp_data_view, ghost_view, requests); + tight_partitioner + ->export_to_ghosted_array_start( + 0, owned_view, tmp_data_view, ghost_view, requests); // ... finish exchange - tight_partitioner->export_to_ghosted_array_finish( - ghost_view, requests); + tight_partitioner + ->export_to_ghosted_array_finish(ghost_view, + requests); auto print = [&]() { deallog << "owned:" << std::endl; - print_cuda_view(owned_view); + print_device_view(owned); deallog << "ghost:" << std::endl; - print_cuda_view(ghost_view); + print_device_view(ghost); }; deallog << "update ghosts()" << std::endl; print(); - std::unique_ptr import_data( - Utilities::CUDA::allocate_device_data( - tight_partitioner->n_import_indices()), - Utilities::CUDA::delete_device_data); - ArrayView import_data_view( - tmp_data.get(), tight_partitioner->n_import_indices()); + Kokkos::View import_data( + "import_data", tight_partitioner->n_import_indices()); + ArrayView import_data_view(tmp_data.data(), + import_data.size()); // now do insert: auto compress = [&](VectorOperation::values operation) { const unsigned int counter = 0; tight_partitioner - ->import_from_ghosted_array_start( + ->import_from_ghosted_array_start( operation, counter, ghost_view, import_data_view, compress_requests); tight_partitioner - ->import_from_ghosted_array_finish( + ->import_from_ghosted_array_finish( operation, import_data_view, owned_view, ghost_view, compress_requests); }; @@ -168,8 +162,8 @@ test() if (rank == 1) { - set_value<<<1, 1>>>(ghost.get(), 1, 10); - set_value<<<1, 1>>>(ghost.get(), 2, 20); + Kokkos::deep_copy(Kokkos::subview(ghost, 1), 10); + Kokkos::deep_copy(Kokkos::subview(ghost, 2), 20); } deallog << "compress(add)" << std::endl; @@ -183,12 +177,11 @@ main(int argc, char **argv) using namespace dealii; Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); - + Kokkos::initialize(); MPILogInitAll log; - init_cuda(true); - test(); + Kokkos::finalize(); return 0; } diff --git a/tests/cuda/parallel_partitioner_08.with_cuda_aware_mpi=on.mpirun=2.output b/tests/mpi/parallel_partitioner_device_08.with_mpi_with_device_support=on.mpirun=2.output similarity index 100% rename from tests/cuda/parallel_partitioner_08.with_cuda_aware_mpi=on.mpirun=2.output rename to tests/mpi/parallel_partitioner_device_08.with_mpi_with_device_support=on.mpirun=2.output