From c52953cfd70cf585c4c3736e6fdfcd450902cb6d Mon Sep 17 00:00:00 2001 From: Bruno Turcksin Date: Mon, 26 Nov 2018 13:51:38 +0000 Subject: [PATCH] Optimization of export_to_ghosted_array_start() When use CUDA-aware MPI, the sets of local indices that are ghost on other processors are expanded in arrays. This allows for a sharp reduction in the number of kernel launches in export_to_ghosted_array_start(). --- include/deal.II/base/partitioner.h | 14 +++ include/deal.II/base/partitioner.templates.h | 112 ++++++++++++++----- 2 files changed, 98 insertions(+), 28 deletions(-) diff --git a/include/deal.II/base/partitioner.h b/include/deal.II/base/partitioner.h index 440ada1941..2c0db0955f 100644 --- a/include/deal.II/base/partitioner.h +++ b/include/deal.II/base/partitioner.h @@ -614,6 +614,20 @@ namespace Utilities */ std::vector> import_indices_data; + /** + * The set of (local) indices that we are importing during compress(), + * i.e., others' ghosts that belong to the local range. The data stored is + * the same than in import_indices_data but the data is expanded in plain + * arrays. This variable is only used when using CUDA-aware MPI. + */ + // The variable is mutable to enable lazy initialization in + // export_to_ghosted_array_start(). This way partitioner does not have to + // be templated on the MemorySpaceType. + mutable std::vector< + std::pair, + unsigned int>> + import_indices_plain_dev; + /** * A variable caching the number of ghost indices. It would be expensive * to compute it by iterating over the import indices and accumulate them. diff --git a/include/deal.II/base/partitioner.templates.h b/include/deal.II/base/partitioner.templates.h index f21e4e65c2..ddf531bd22 100644 --- a/include/deal.II/base/partitioner.templates.h +++ b/include/deal.II/base/partitioner.templates.h @@ -98,41 +98,97 @@ namespace Utilities } Number *temp_array_ptr = temporary_storage.data(); - for (unsigned int i = 0; i < n_import_targets; i++) - { - // copy the data to be sent to the import_data field - std::vector>::const_iterator - my_imports = import_indices_data.begin() + - import_indices_chunks_by_rank_data[i], - end_my_imports = import_indices_data.begin() + - import_indices_chunks_by_rank_data[i + 1]; - unsigned int index = 0; - for (; my_imports != end_my_imports; ++my_imports) - { - const unsigned int chunk_size = - my_imports->second - my_imports->first; # if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ defined(DEAL_II_WITH_CUDA_AWARE_MPI) - if (std::is_same::value) + // When using CUDAs-aware MPI, the set of local indices that are ghosts + // indices on other processors is expanded in arrays. This is for + // performance reasons as this can significantly decrease the number of + // kernel launched. The indices are expanded the first time the function + // is called. + if (std::is_same::value) + { + if (import_indices_plain_dev.size() == 0) + { + import_indices_plain_dev.reserve(n_import_targets); + for (unsigned int i = 0; i < n_import_targets; i++) { - const cudaError_t cuda_error_code = - cudaMemcpy(temp_array_ptr + index, - locally_owned_array.data() + my_imports->first, - chunk_size * sizeof(Number), - cudaMemcpyDeviceToDevice); - AssertCuda(cuda_error_code); + // Expand the indices on the host + std::vector>:: + const_iterator my_imports = + import_indices_data.begin() + + import_indices_chunks_by_rank_data[i], + end_my_imports = + import_indices_data.begin() + + import_indices_chunks_by_rank_data[i + 1]; + std::vector import_indices_plain_host; + for (; my_imports != end_my_imports; ++my_imports) + { + const unsigned int chunk_size = + my_imports->second - my_imports->first; + for (unsigned int j = 0; j < chunk_size; ++j) + import_indices_plain_host.push_back(my_imports->first + + j); + } + + // Move the indices to the device + import_indices_plain_dev.emplace_back(std::make_pair( + std::unique_ptr( + nullptr, + Utilities::CUDA::delete_device_data), + import_indices_plain_host.size())); + + import_indices_plain_dev[i].first.reset( + Utilities::CUDA::allocate_device_data( + import_indices_plain_dev[i].second)); + Utilities::CUDA::copy_to_dev( + import_indices_plain_host, + import_indices_plain_dev[i].first.get()); } - else + } + } +# endif + + for (unsigned int i = 0; i < n_import_targets; i++) + { +# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_WITH_CUDA_AWARE_MPI) + if (std::is_same::value) + { + const int n_blocks = + 1 + (import_indices_plain_dev[i].second - 1) / + (::dealii::CUDAWrappers::chunk_size * + ::dealii::CUDAWrappers::block_size); + ::dealii::LinearAlgebra::CUDAWrappers::kernel:: + gather<<>>( + temp_array_ptr, + locally_owned_array.data(), + import_indices_plain_dev[i].first.get(), + import_indices_plain_dev[i].second); + } + else # endif + { + // copy the data to be sent to the import_data field + std::vector>::const_iterator + my_imports = import_indices_data.begin() + + import_indices_chunks_by_rank_data[i], + end_my_imports = import_indices_data.begin() + + import_indices_chunks_by_rank_data[i + 1]; + unsigned int index = 0; + for (; my_imports != end_my_imports; ++my_imports) { - std::memcpy(temp_array_ptr + index, - locally_owned_array.data() + my_imports->first, - chunk_size * sizeof(Number)); + const unsigned int chunk_size = + my_imports->second - my_imports->first; + { + std::memcpy(temp_array_ptr + index, + locally_owned_array.data() + my_imports->first, + chunk_size * sizeof(Number)); + } + index += chunk_size; } - index += chunk_size; - } - AssertDimension(index, import_targets_data[i].second); + AssertDimension(index, import_targets_data[i].second); + } // start the send operations const int ierr = @@ -702,7 +758,7 @@ namespace Utilities } // end of namespace MPI -} // end of namespace Utilities +} // namespace Utilities DEAL_II_NAMESPACE_CLOSE -- 2.39.5