From a2fe02d2585e5f98c3208306d1ac0b56e76a8392 Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Wed, 31 Oct 2018 19:38:05 +0100 Subject: [PATCH] Let Partitioner have a MemorySpace template parameter --- include/deal.II/base/partitioner.h | 5 +- include/deal.II/base/partitioner.templates.h | 132 +++++++++++++++---- source/base/CMakeLists.txt | 2 + source/base/partitioner.cc | 19 +-- source/base/partitioner.cu | 24 ++++ source/base/partitioner.cuda.inst.in | 36 +++++ source/base/partitioner.inst.in | 22 ++-- 7 files changed, 193 insertions(+), 47 deletions(-) create mode 100644 source/base/partitioner.cu create mode 100644 source/base/partitioner.cuda.inst.in diff --git a/include/deal.II/base/partitioner.h b/include/deal.II/base/partitioner.h index 1c2f6efeac..e44238ebb0 100644 --- a/include/deal.II/base/partitioner.h +++ b/include/deal.II/base/partitioner.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -419,7 +420,7 @@ namespace Utilities * This functionality is used in * LinearAlgebra::distributed::Vector::update_ghost_values(). */ - template + template void export_to_ghosted_array_start( const unsigned int communication_channel, @@ -529,7 +530,7 @@ namespace Utilities * This functionality is used in * LinearAlgebra::distributed::Vector::compress(). */ - template + template void import_from_ghosted_array_finish( const VectorOperation::values vector_operation, diff --git a/include/deal.II/base/partitioner.templates.h b/include/deal.II/base/partitioner.templates.h index 09e9c73175..62d1cd30ae 100644 --- a/include/deal.II/base/partitioner.templates.h +++ b/include/deal.II/base/partitioner.templates.h @@ -18,8 +18,10 @@ #include +#include #include +#include #include #include @@ -35,7 +37,7 @@ namespace Utilities # ifdef DEAL_II_WITH_MPI - template + template void Partitioner::export_to_ghosted_array_start( const unsigned int communication_channel, @@ -92,7 +94,7 @@ namespace Utilities communicator, &requests[i]); AssertThrowMPI(ierr); - ghost_array_ptr += ghost_targets()[i].second; + ghost_array_ptr += ghost_targets_data[i].second; } Number *temp_array_ptr = temporary_storage.data(); @@ -106,9 +108,30 @@ namespace Utilities import_indices_chunks_by_rank_data[i + 1]; unsigned int index = 0; for (; my_imports != end_my_imports; ++my_imports) - for (unsigned int j = my_imports->first; j < my_imports->second; - j++) - temp_array_ptr[index++] = locally_owned_array[j]; + { + const unsigned int chunk_size = + my_imports->second - my_imports->first; +# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_WITH_CUDA_AWARE_MPI) + if (std::is_same::value) + { + const cudaError_t cuda_error_code = + cudaMemcpy(temp_array_ptr + index, + locally_owned_array.data() + my_imports->first, + chunk_size * sizeof(Number), + cudaMemcpyDeviceToDevice); + AssertCuda(cuda_error_code); + } + else +# endif + { + std::memcpy(temp_array_ptr + index, + locally_owned_array.data() + my_imports->first, + chunk_size * sizeof(Number)); + } + index += chunk_size; + } + AssertDimension(index, import_targets_data[i].second); // start the send operations @@ -370,7 +393,7 @@ namespace Utilities - template + template void Partitioner::import_from_ghosted_array_finish( const VectorOperation::values vector_operation, @@ -423,7 +446,6 @@ namespace Utilities if (vector_operation != dealii::VectorOperation::insert) AssertDimension(n_ghost_targets + n_import_targets, requests.size()); - // first wait for the receive to complete if (requests.size() > 0 && n_import_targets > 0) { @@ -433,21 +455,20 @@ namespace Utilities AssertThrowMPI(ierr); const Number *read_position = temporary_storage.data(); - std::vector>::const_iterator - my_imports = import_indices_data.begin(); - +# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_WITH_CUDA_AWARE_MPI)) // If the operation is no insertion, add the imported data to the // local values. For insert, nothing is done here (but in debug mode // we assert that the specified value is either zero or matches with // the ones already present if (vector_operation == dealii::VectorOperation::add) - for (; my_imports != import_indices_data.end(); ++my_imports) - for (unsigned int j = my_imports->first; j < my_imports->second; + for (const auto &import_range : import_indices_data) + for (unsigned int j = import_range.first; j < import_range.second; j++) locally_owned_array[j] += *read_position++; else if (vector_operation == dealii::VectorOperation::min) - for (; my_imports != import_indices_data.end(); ++my_imports) - for (unsigned int j = my_imports->first; j < my_imports->second; + for (const auto &import_range : import_indices_data) + for (unsigned int j = import_range.first; j < import_range.second; j++) { locally_owned_array[j] = @@ -455,8 +476,8 @@ namespace Utilities read_position++; } else if (vector_operation == dealii::VectorOperation::max) - for (; my_imports != import_indices_data.end(); ++my_imports) - for (unsigned int j = my_imports->first; j < my_imports->second; + for (const auto &import_range : import_indices_data) + for (unsigned int j = import_range.first; j < import_range.second; j++) { locally_owned_array[j] = @@ -464,8 +485,8 @@ namespace Utilities read_position++; } else - for (; my_imports != import_indices_data.end(); ++my_imports) - for (unsigned int j = my_imports->first; j < my_imports->second; + for (const auto &import_range : import_indices_data) + for (unsigned int j = import_range.first; j < import_range.second; j++, read_position++) // Below we use relatively large precision in units in the last // place (ULP) as this Assert can be easily triggered in @@ -485,6 +506,47 @@ namespace Utilities Number>::ExcNonMatchingElements(*read_position, locally_owned_array[j], my_pid)); +# else + if (vector_operation == dealii::VectorOperation::add) + { + for (const auto &import_range : import_indices_data) + { + const auto chunk_size = + import_range.second - import_range.first; + const int n_blocks = + 1 + (chunk_size - 1) / (::dealii::CUDAWrappers::chunk_size * + ::dealii::CUDAWrappers::block_size); + dealii::LinearAlgebra::CUDAWrappers::kernel::vector_bin_op< + Number, + dealii::LinearAlgebra::CUDAWrappers::kernel::Binop_Addition> + <<>>( + locally_owned_array.data() + import_range.first, + read_position, + chunk_size); + read_position += chunk_size; + } + } + else + for (const auto &import_range : import_indices_data) + { + const auto chunk_size = + import_range.second - import_range.first; + const cudaError_t cuda_error_code = + cudaMemcpy(locally_owned_array.data() + import_range.first, + read_position, + chunk_size * sizeof(Number), + cudaMemcpyDeviceToDevice); + AssertCuda(cuda_error_code); + read_position += chunk_size; + } + + static_assert( + std::is_same::value, + "If we are using the CPU implementation, we should not trigger the restriction"); + Assert(vector_operation == dealii::VectorOperation::insert || + vector_operation == dealii::VectorOperation::add, + ExcNotImplemented()); +# endif AssertDimension(read_position - temporary_storage.data(), n_import_indices()); } @@ -505,18 +567,34 @@ namespace Utilities if (ghost_array.size() > 0) { Assert(ghost_array.begin() != nullptr, ExcInternalError()); + +# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_WITH_CUDA_AWARE_MPI) + if (std::is_same::value) + { + Assert(std::is_trivial::value, ExcNotImplemented()); + cudaMemset(ghost_array.data(), + 0, + sizeof(Number) * n_ghost_indices()); + } + else +# endif + { # ifdef DEAL_II_WITH_CXX17 - if constexpr (std::is_trivial::value) + if constexpr (std::is_trivial::value) # else - if (std::is_trivial::value) + if (std::is_trivial::value) # endif - std::memset(ghost_array.data(), - 0, - sizeof(Number) * n_ghost_indices()); - else - std::fill(ghost_array.data(), - ghost_array.data() + n_ghost_indices(), - 0); + { + std::memset(ghost_array.data(), + 0, + sizeof(Number) * n_ghost_indices()); + } + else + std::fill(ghost_array.data(), + ghost_array.data() + n_ghost_indices(), + 0); + } } // clear the compress requests diff --git a/source/base/CMakeLists.txt b/source/base/CMakeLists.txt index 8e03e013c5..1379d6b6e2 100644 --- a/source/base/CMakeLists.txt +++ b/source/base/CMakeLists.txt @@ -96,6 +96,7 @@ IF(DEAL_II_WITH_CUDA) SET(_separate_src ${_separate_src} cuda.cu + partitioner.cu ) ENDIF() @@ -117,6 +118,7 @@ SET(_inst geometric_utilities.inst.in mpi.inst.in partitioner.inst.in + partitioner.cuda.inst.in polynomials_rannacher_turek.inst.in symmetric_tensor.inst.in tensor.inst.in diff --git a/source/base/partitioner.cc b/source/base/partitioner.cc index 75a45a9df7..40a1e9cd6c 100644 --- a/source/base/partitioner.cc +++ b/source/base/partitioner.cc @@ -320,7 +320,8 @@ namespace Utilities n_import_indices_data); { unsigned int current_index_start = 0; - std::vector import_requests(import_targets_data.size()); + std::vector import_requests(import_targets_data.size() + + n_ghost_targets); for (unsigned int i = 0; i < import_targets_data.size(); i++) { const int ierr = @@ -336,17 +337,19 @@ namespace Utilities } AssertDimension(current_index_start, n_import_indices_data); - // use blocking send for ghost indices stored in expanded_ghost_indices + // use non-blocking send for ghost indices stored in + // expanded_ghost_indices current_index_start = 0; for (unsigned int i = 0; i < n_ghost_targets; i++) { const int ierr = - MPI_Send(&expanded_ghost_indices[current_index_start], - ghost_targets_data[i].second, - DEAL_II_DOF_INDEX_MPI_TYPE, - ghost_targets_data[i].first, - my_pid, - communicator); + MPI_Isend(&expanded_ghost_indices[current_index_start], + ghost_targets_data[i].second, + DEAL_II_DOF_INDEX_MPI_TYPE, + ghost_targets_data[i].first, + my_pid, + communicator, + &import_requests[import_targets_data.size() + i]); AssertThrowMPI(ierr); current_index_start += ghost_targets_data[i].second; } diff --git a/source/base/partitioner.cu b/source/base/partitioner.cu new file mode 100644 index 0000000000..ed02193e89 --- /dev/null +++ b/source/base/partitioner.cu @@ -0,0 +1,24 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 1999 - 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#include +#include + +DEAL_II_NAMESPACE_OPEN + +// explicit instantiations from .templates.h file +#include "partitioner.cuda.inst" + +DEAL_II_NAMESPACE_CLOSE diff --git a/source/base/partitioner.cuda.inst.in b/source/base/partitioner.cuda.inst.in new file mode 100644 index 0000000000..c590984dae --- /dev/null +++ b/source/base/partitioner.cuda.inst.in @@ -0,0 +1,36 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +for (SCALAR : MPI_SCALARS) + { +#ifdef DEAL_II_WITH_MPI + template void Utilities::MPI::Partitioner::export_to_ghosted_array_start< + SCALAR, + dealii::MemorySpace::CUDA>(const unsigned int, + const ArrayView &, + const ArrayView &, + const ArrayView &, + std::vector &) const; + template void Utilities::MPI::Partitioner::import_from_ghosted_array_finish< + SCALAR, + MemorySpace::CUDA>(const VectorOperation::values, + const ArrayView &, + const ArrayView &, + const ArrayView &, + std::vector &) const; +#endif + } diff --git a/source/base/partitioner.inst.in b/source/base/partitioner.inst.in index 5e74ab99c4..f20ad8a6a5 100644 --- a/source/base/partitioner.inst.in +++ b/source/base/partitioner.inst.in @@ -19,11 +19,12 @@ for (SCALAR : MPI_SCALARS) { #ifdef DEAL_II_WITH_MPI template void Utilities::MPI::Partitioner::export_to_ghosted_array_start< - SCALAR>(const unsigned int, - const ArrayView &, - const ArrayView &, - const ArrayView &, - std::vector &) const; + SCALAR, + MemorySpace::Host>(const unsigned int, + const ArrayView &, + const ArrayView &, + const ArrayView &, + std::vector &) const; template void Utilities::MPI::Partitioner::export_to_ghosted_array_finish< SCALAR>(const ArrayView &, std::vector &) const; template void Utilities::MPI::Partitioner::import_from_ghosted_array_start< @@ -33,10 +34,11 @@ for (SCALAR : MPI_SCALARS) const ArrayView &, std::vector &) const; template void Utilities::MPI::Partitioner::import_from_ghosted_array_finish< - SCALAR>(const VectorOperation::values, - const ArrayView &, - const ArrayView &, - const ArrayView &, - std::vector &) const; + SCALAR, + MemorySpace::Host>(const VectorOperation::values, + const ArrayView &, + const ArrayView &, + const ArrayView &, + std::vector &) const; #endif } -- 2.39.5